From 9af28fc1d426697f90bba2f46b3957ec4f03bdcb Mon Sep 17 00:00:00 2001 From: MyonKeminta <9948422+MyonKeminta@users.noreply.github.com> Date: Mon, 29 Jul 2024 16:13:17 +0800 Subject: [PATCH 1/9] client: Merge the two tsoStream types to reuse the same error handling and metrics reporting code (#8433) ref tikv/pd#8432 client: Merge the two tsoStream types to reuse the same error handling and metrics reporting code This commit merges the two `xxxTSOStream` types so that the error handling and metrics reporting logic for PD server deployment and TSO service deployment can be reused. Signed-off-by: MyonKeminta Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- client/tso_client.go | 6 +- client/tso_dispatcher.go | 4 +- client/tso_stream.go | 138 +++++++++++++++++++-------------------- 3 files changed, 72 insertions(+), 76 deletions(-) diff --git a/client/tso_client.go b/client/tso_client.go index 5e221eae478..2f3b949f017 100644 --- a/client/tso_client.go +++ b/client/tso_client.go @@ -350,9 +350,7 @@ type tsoConnectionContext struct { // Current URL of the stream connection. streamURL string // Current stream to send gRPC requests. - // - `pdpb.PD_TsoClient` for a leader/follower in the PD cluster. - // - `tsopb.TSO_TsoClient` for a primary/secondary in the TSO cluster. - stream tsoStream + stream *tsoStream } // updateConnectionCtxs will choose the proper way to update the connections for the given dc-location. @@ -382,7 +380,7 @@ func (c *tsoClient) tryConnectToTSO( var ( networkErrNum uint64 err error - stream tsoStream + stream *tsoStream url string cc *grpc.ClientConn updateAndClear = func(newURL string, connectionCtx *tsoConnectionContext) { diff --git a/client/tso_dispatcher.go b/client/tso_dispatcher.go index 0919fd84744..a7c99057275 100644 --- a/client/tso_dispatcher.go +++ b/client/tso_dispatcher.go @@ -186,7 +186,7 @@ func (td *tsoDispatcher) handleDispatcher(wg *sync.WaitGroup) { streamCtx context.Context cancel context.CancelFunc streamURL string - stream tsoStream + stream *tsoStream ) // Loop through each batch of TSO requests and send them for processing. streamLoopTimer := time.NewTimer(option.timeout) @@ -393,7 +393,7 @@ func chooseStream(connectionCtxs *sync.Map) (connectionCtx *tsoConnectionContext } func (td *tsoDispatcher) processRequests( - stream tsoStream, dcLocation string, tbc *tsoBatchController, + stream *tsoStream, dcLocation string, tbc *tsoBatchController, ) error { var ( requests = tbc.getCollectedRequests() diff --git a/client/tso_stream.go b/client/tso_stream.go index 9c4d78dfe18..da9cab95ba0 100644 --- a/client/tso_stream.go +++ b/client/tso_stream.go @@ -47,7 +47,7 @@ func (*tsoTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBui // TSO Stream Builder type tsoStreamBuilder interface { - build(context.Context, context.CancelFunc, time.Duration) (tsoStream, error) + build(context.Context, context.CancelFunc, time.Duration) (*tsoStream, error) } type pdTSOStreamBuilder struct { @@ -55,14 +55,14 @@ type pdTSOStreamBuilder struct { client pdpb.PDClient } -func (b *pdTSOStreamBuilder) build(ctx context.Context, cancel context.CancelFunc, timeout time.Duration) (tsoStream, error) { +func (b *pdTSOStreamBuilder) build(ctx context.Context, cancel context.CancelFunc, timeout time.Duration) (*tsoStream, error) { done := make(chan struct{}) // TODO: we need to handle a conner case that this goroutine is timeout while the stream is successfully created. go checkStreamTimeout(ctx, cancel, done, timeout) stream, err := b.client.Tso(ctx) done <- struct{}{} if err == nil { - return &pdTSOStream{stream: stream, serverURL: b.serverURL}, nil + return &tsoStream{stream: pdTSOStreamAdapter{stream}, serverURL: b.serverURL}, nil } return nil, err } @@ -74,14 +74,14 @@ type tsoTSOStreamBuilder struct { func (b *tsoTSOStreamBuilder) build( ctx context.Context, cancel context.CancelFunc, timeout time.Duration, -) (tsoStream, error) { +) (*tsoStream, error) { done := make(chan struct{}) // TODO: we need to handle a conner case that this goroutine is timeout while the stream is successfully created. go checkStreamTimeout(ctx, cancel, done, timeout) stream, err := b.client.Tso(ctx) done <- struct{}{} if err == nil { - return &tsoTSOStream{stream: stream, serverURL: b.serverURL}, nil + return &tsoStream{stream: tsoTSOStreamAdapter{stream}, serverURL: b.serverURL}, nil } return nil, err } @@ -99,30 +99,24 @@ func checkStreamTimeout(ctx context.Context, cancel context.CancelFunc, done cha <-done } -// TSO Stream - -type tsoStream interface { - getServerURL() string - // processRequests processes TSO requests in streaming mode to get timestamps - processRequests( - clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, - count int64, batchStartTime time.Time, - ) (respKeyspaceGroupID uint32, physical, logical int64, suffixBits uint32, err error) +type tsoRequestResult struct { + physical, logical int64 + count uint32 + suffixBits uint32 + respKeyspaceGroupID uint32 } -type pdTSOStream struct { - serverURL string - stream pdpb.PD_TsoClient +type grpcTSOStreamAdapter interface { + Send(clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, + count int64) error + Recv() (tsoRequestResult, error) } -func (s *pdTSOStream) getServerURL() string { - return s.serverURL +type pdTSOStreamAdapter struct { + stream pdpb.PD_TsoClient } -func (s *pdTSOStream) processRequests( - clusterID uint64, _, _ uint32, dcLocation string, count int64, batchStartTime time.Time, -) (respKeyspaceGroupID uint32, physical, logical int64, suffixBits uint32, err error) { - start := time.Now() +func (s pdTSOStreamAdapter) Send(clusterID uint64, _, _ uint32, dcLocation string, count int64) error { req := &pdpb.TsoRequest{ Header: &pdpb.RequestHeader{ ClusterId: clusterID, @@ -130,55 +124,28 @@ func (s *pdTSOStream) processRequests( Count: uint32(count), DcLocation: dcLocation, } + return s.stream.Send(req) +} - if err = s.stream.Send(req); err != nil { - if err == io.EOF { - err = errs.ErrClientTSOStreamClosed - } else { - err = errors.WithStack(err) - } - return - } - tsoBatchSendLatency.Observe(time.Since(batchStartTime).Seconds()) +func (s pdTSOStreamAdapter) Recv() (tsoRequestResult, error) { resp, err := s.stream.Recv() - duration := time.Since(start).Seconds() if err != nil { - requestFailedDurationTSO.Observe(duration) - if err == io.EOF { - err = errs.ErrClientTSOStreamClosed - } else { - err = errors.WithStack(err) - } - return + return tsoRequestResult{}, err } - requestDurationTSO.Observe(duration) - tsoBatchSize.Observe(float64(count)) - - if resp.GetCount() != uint32(count) { - err = errors.WithStack(errTSOLength) - return - } - - ts := resp.GetTimestamp() - respKeyspaceGroupID = defaultKeySpaceGroupID - physical, logical, suffixBits = ts.GetPhysical(), ts.GetLogical(), ts.GetSuffixBits() - return + return tsoRequestResult{ + physical: resp.GetTimestamp().GetPhysical(), + logical: resp.GetTimestamp().GetLogical(), + count: resp.GetCount(), + suffixBits: resp.GetTimestamp().GetSuffixBits(), + respKeyspaceGroupID: defaultKeySpaceGroupID, + }, nil } -type tsoTSOStream struct { - serverURL string - stream tsopb.TSO_TsoClient +type tsoTSOStreamAdapter struct { + stream tsopb.TSO_TsoClient } -func (s *tsoTSOStream) getServerURL() string { - return s.serverURL -} - -func (s *tsoTSOStream) processRequests( - clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, - count int64, batchStartTime time.Time, -) (respKeyspaceGroupID uint32, physical, logical int64, suffixBits uint32, err error) { - start := time.Now() +func (s tsoTSOStreamAdapter) Send(clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, count int64) error { req := &tsopb.TsoRequest{ Header: &tsopb.RequestHeader{ ClusterId: clusterID, @@ -188,8 +155,40 @@ func (s *tsoTSOStream) processRequests( Count: uint32(count), DcLocation: dcLocation, } + return s.stream.Send(req) +} - if err = s.stream.Send(req); err != nil { +func (s tsoTSOStreamAdapter) Recv() (tsoRequestResult, error) { + resp, err := s.stream.Recv() + if err != nil { + return tsoRequestResult{}, err + } + return tsoRequestResult{ + physical: resp.GetTimestamp().GetPhysical(), + logical: resp.GetTimestamp().GetLogical(), + count: resp.GetCount(), + suffixBits: resp.GetTimestamp().GetSuffixBits(), + respKeyspaceGroupID: resp.GetHeader().GetKeyspaceGroupId(), + }, nil +} + +type tsoStream struct { + serverURL string + // The internal gRPC stream. + // - `pdpb.PD_TsoClient` for a leader/follower in the PD cluster. + // - `tsopb.TSO_TsoClient` for a primary/secondary in the TSO cluster. + stream grpcTSOStreamAdapter +} + +func (s *tsoStream) getServerURL() string { + return s.serverURL +} + +func (s *tsoStream) processRequests( + clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, count int64, batchStartTime time.Time, +) (respKeyspaceGroupID uint32, physical, logical int64, suffixBits uint32, err error) { + start := time.Now() + if err = s.stream.Send(clusterID, keyspaceID, keyspaceGroupID, dcLocation, count); err != nil { if err == io.EOF { err = errs.ErrClientTSOStreamClosed } else { @@ -198,7 +197,7 @@ func (s *tsoTSOStream) processRequests( return } tsoBatchSendLatency.Observe(time.Since(batchStartTime).Seconds()) - resp, err := s.stream.Recv() + res, err := s.stream.Recv() duration := time.Since(start).Seconds() if err != nil { requestFailedDurationTSO.Observe(duration) @@ -212,13 +211,12 @@ func (s *tsoTSOStream) processRequests( requestDurationTSO.Observe(duration) tsoBatchSize.Observe(float64(count)) - if resp.GetCount() != uint32(count) { + if res.count != uint32(count) { err = errors.WithStack(errTSOLength) return } - ts := resp.GetTimestamp() - respKeyspaceGroupID = resp.GetHeader().GetKeyspaceGroupId() - physical, logical, suffixBits = ts.GetPhysical(), ts.GetLogical(), ts.GetSuffixBits() + respKeyspaceGroupID = res.respKeyspaceGroupID + physical, logical, suffixBits = res.physical, res.logical, res.suffixBits return } From 84f90f464ae58c26b7eb97a4518e7de97242730f Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Wed, 31 Jul 2024 14:43:18 +0800 Subject: [PATCH 2/9] *: add some comments to exported function, part of enable revive.exported (#8459) ref tikv/pd#8458 Signed-off-by: okJiang <819421878@qq.com> Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- .golangci.yml | 3 + pkg/core/metrics.go | 77 ++++++++++++++++++++---- pkg/core/region_tree.go | 3 + pkg/core/storelimit/store_limit.go | 1 + pkg/id/id.go | 1 + pkg/ratelimit/metrics.go | 20 +++--- pkg/ratelimit/runner.go | 12 ++-- pkg/schedule/filter/counter.go | 1 + pkg/schedule/filter/filters.go | 35 +++++++++++ pkg/schedule/filter/region_filters.go | 2 + pkg/schedule/operator/builder.go | 1 + pkg/schedule/operator/operator_queue.go | 5 ++ pkg/schedule/schedulers/split_bucket.go | 8 +-- pkg/schedule/splitter/region_splitter.go | 2 + pkg/schedule/type/type.go | 7 ++- pkg/statistics/collector.go | 24 ++++---- pkg/statistics/hot_peer.go | 10 +-- pkg/statistics/hot_peer_cache.go | 7 ++- pkg/statistics/store_collection.go | 11 ++-- pkg/statistics/store_hot_peers_infos.go | 6 +- pkg/statistics/utils/topn.go | 46 +++++++------- pkg/storage/kv/etcd_kv.go | 4 ++ pkg/storage/kv/mem_kv.go | 5 ++ pkg/syncer/client.go | 12 ++-- pkg/syncer/history_buffer.go | 8 +-- pkg/syncer/history_buffer_test.go | 12 ++-- pkg/syncer/server.go | 14 ++--- pkg/window/counter.go | 10 +++ plugin/scheduler_example/evict_leader.go | 14 +++++ 29 files changed, 253 insertions(+), 108 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d5b2e4e7f5a..bc1ba393f39 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -214,3 +214,6 @@ issues: - path: (pd-analysis|pd-api-bench|pd-backup|pd-ctl|pd-heartbeat-bench|pd-recover|pd-simulator|pd-tso-bench|pd-ut|regions-dump|stores-dump) linters: - errcheck + include: + # remove the comment after the path is ready + # - EXC0012 diff --git a/pkg/core/metrics.go b/pkg/core/metrics.go index 7d2c904f319..65cc8be861e 100644 --- a/pkg/core/metrics.go +++ b/pkg/core/metrics.go @@ -108,19 +108,33 @@ type saveCacheStats struct { // RegionHeartbeatProcessTracer is used to trace the process of handling region heartbeat. type RegionHeartbeatProcessTracer interface { + // Begin starts the tracing. Begin() + // OnPreCheckFinished will be called when the pre-check is finished. OnPreCheckFinished() + // OnAsyncHotStatsFinished will be called when the async hot stats is finished. OnAsyncHotStatsFinished() + // OnRegionGuideFinished will be called when the region guide is finished. OnRegionGuideFinished() + // OnSaveCacheBegin will be called when the save cache begins. OnSaveCacheBegin() + // OnSaveCacheFinished will be called when the save cache is finished. OnSaveCacheFinished() + // OnCheckOverlapsFinished will be called when the check overlaps is finished. OnCheckOverlapsFinished() + // OnValidateRegionFinished will be called when the validate region is finished. OnValidateRegionFinished() + // OnSetRegionFinished will be called when the set region is finished. OnSetRegionFinished() + // OnUpdateSubTreeFinished will be called when the update sub tree is finished. OnUpdateSubTreeFinished() + // OnCollectRegionStatsFinished will be called when the collect region stats is finished. OnCollectRegionStatsFinished() + // OnAllStageFinished will be called when all stages are finished. OnAllStageFinished() + // LogFields returns the log fields. LogFields() []zap.Field + // Release releases the tracer. Release() } @@ -131,21 +145,48 @@ func NewNoopHeartbeatProcessTracer() RegionHeartbeatProcessTracer { return &noopHeartbeatProcessTracer{} } -func (*noopHeartbeatProcessTracer) Begin() {} -func (*noopHeartbeatProcessTracer) OnPreCheckFinished() {} -func (*noopHeartbeatProcessTracer) OnAsyncHotStatsFinished() {} -func (*noopHeartbeatProcessTracer) OnRegionGuideFinished() {} -func (*noopHeartbeatProcessTracer) OnSaveCacheBegin() {} -func (*noopHeartbeatProcessTracer) OnSaveCacheFinished() {} -func (*noopHeartbeatProcessTracer) OnCheckOverlapsFinished() {} -func (*noopHeartbeatProcessTracer) OnValidateRegionFinished() {} -func (*noopHeartbeatProcessTracer) OnSetRegionFinished() {} -func (*noopHeartbeatProcessTracer) OnUpdateSubTreeFinished() {} +// Begin implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) Begin() {} + +// OnPreCheckFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnPreCheckFinished() {} + +// OnAsyncHotStatsFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnAsyncHotStatsFinished() {} + +// OnRegionGuideFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnRegionGuideFinished() {} + +// OnSaveCacheBegin implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnSaveCacheBegin() {} + +// OnSaveCacheFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnSaveCacheFinished() {} + +// OnCheckOverlapsFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnCheckOverlapsFinished() {} + +// OnValidateRegionFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnValidateRegionFinished() {} + +// OnSetRegionFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnSetRegionFinished() {} + +// OnUpdateSubTreeFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnUpdateSubTreeFinished() {} + +// OnCollectRegionStatsFinished implements the RegionHeartbeatProcessTracer interface. func (*noopHeartbeatProcessTracer) OnCollectRegionStatsFinished() {} -func (*noopHeartbeatProcessTracer) OnAllStageFinished() {} + +// OnAllStageFinished implements the RegionHeartbeatProcessTracer interface. +func (*noopHeartbeatProcessTracer) OnAllStageFinished() {} + +// LogFields implements the RegionHeartbeatProcessTracer interface. func (*noopHeartbeatProcessTracer) LogFields() []zap.Field { return nil } + +// Release implements the RegionHeartbeatProcessTracer interface. func (*noopHeartbeatProcessTracer) Release() {} type regionHeartbeatProcessTracer struct { @@ -163,12 +204,14 @@ func NewHeartbeatProcessTracer() RegionHeartbeatProcessTracer { return tracerPool.Get().(*regionHeartbeatProcessTracer) } +// Begin implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) Begin() { now := time.Now() h.startTime = now h.lastCheckTime = now } +// OnPreCheckFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnPreCheckFinished() { now := time.Now() h.preCheckDuration = now.Sub(h.lastCheckTime) @@ -177,6 +220,7 @@ func (h *regionHeartbeatProcessTracer) OnPreCheckFinished() { preCheckCount.Inc() } +// OnAsyncHotStatsFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnAsyncHotStatsFinished() { now := time.Now() h.asyncHotStatsDuration = now.Sub(h.lastCheckTime) @@ -185,6 +229,7 @@ func (h *regionHeartbeatProcessTracer) OnAsyncHotStatsFinished() { asyncHotStatsCount.Inc() } +// OnRegionGuideFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnRegionGuideFinished() { now := time.Now() h.regionGuideDuration = now.Sub(h.lastCheckTime) @@ -193,6 +238,7 @@ func (h *regionHeartbeatProcessTracer) OnRegionGuideFinished() { regionGuideCount.Inc() } +// OnSaveCacheBegin implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnSaveCacheBegin() { now := time.Now() h.saveCacheStats.startTime = now @@ -200,11 +246,13 @@ func (h *regionHeartbeatProcessTracer) OnSaveCacheBegin() { h.lastCheckTime = now } +// OnSaveCacheFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnSaveCacheFinished() { // update the outer checkpoint time h.lastCheckTime = time.Now() } +// OnCollectRegionStatsFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnCollectRegionStatsFinished() { now := time.Now() regionCollectDurationSum.Add(now.Sub(h.lastCheckTime).Seconds()) @@ -212,6 +260,7 @@ func (h *regionHeartbeatProcessTracer) OnCollectRegionStatsFinished() { h.lastCheckTime = now } +// OnCheckOverlapsFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnCheckOverlapsFinished() { now := time.Now() h.saveCacheStats.checkOverlapsDuration = now.Sub(h.lastCheckTime) @@ -220,6 +269,7 @@ func (h *regionHeartbeatProcessTracer) OnCheckOverlapsFinished() { checkOverlapsCount.Inc() } +// OnValidateRegionFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnValidateRegionFinished() { now := time.Now() h.saveCacheStats.validateRegionDuration = now.Sub(h.saveCacheStats.lastCheckTime) @@ -228,6 +278,7 @@ func (h *regionHeartbeatProcessTracer) OnValidateRegionFinished() { validateRegionCount.Inc() } +// OnSetRegionFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnSetRegionFinished() { now := time.Now() h.saveCacheStats.setRegionDuration = now.Sub(h.saveCacheStats.lastCheckTime) @@ -236,6 +287,7 @@ func (h *regionHeartbeatProcessTracer) OnSetRegionFinished() { setRegionCount.Inc() } +// OnUpdateSubTreeFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnUpdateSubTreeFinished() { now := time.Now() h.saveCacheStats.updateSubTreeDuration = now.Sub(h.saveCacheStats.lastCheckTime) @@ -244,6 +296,7 @@ func (h *regionHeartbeatProcessTracer) OnUpdateSubTreeFinished() { updateSubTreeCount.Inc() } +// OnAllStageFinished implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) OnAllStageFinished() { now := time.Now() h.OtherDuration = now.Sub(h.lastCheckTime) @@ -251,6 +304,7 @@ func (h *regionHeartbeatProcessTracer) OnAllStageFinished() { otherCount.Inc() } +// LogFields implements the RegionHeartbeatProcessTracer interface. func (h *regionHeartbeatProcessTracer) LogFields() []zap.Field { return []zap.Field{ zap.Duration("pre-check-duration", h.preCheckDuration), @@ -264,6 +318,7 @@ func (h *regionHeartbeatProcessTracer) LogFields() []zap.Field { } } +// Release implements the RegionHeartbeatProcessTracer interface. // Release puts the tracer back into the pool. func (h *regionHeartbeatProcessTracer) Release() { // Reset the fields of h to their zero values. diff --git a/pkg/core/region_tree.go b/pkg/core/region_tree.go index 9a148eeed18..0be207d515d 100644 --- a/pkg/core/region_tree.go +++ b/pkg/core/region_tree.go @@ -437,6 +437,7 @@ func (t *regionTree) RandomRegions(n int, ranges []KeyRange) []*RegionInfo { return regions } +// TotalSize returns the total size of all regions. func (t *regionTree) TotalSize() int64 { if t.length() == 0 { return 0 @@ -444,6 +445,8 @@ func (t *regionTree) TotalSize() int64 { return t.totalSize } +// TotalWriteRate returns the total write bytes rate and the total write keys +// rate of all regions. func (t *regionTree) TotalWriteRate() (bytesRate, keysRate float64) { if t.length() == 0 { return 0, 0 diff --git a/pkg/core/storelimit/store_limit.go b/pkg/core/storelimit/store_limit.go index e35ec773d80..fb7cad442bb 100644 --- a/pkg/core/storelimit/store_limit.go +++ b/pkg/core/storelimit/store_limit.go @@ -178,6 +178,7 @@ func (l *limit) Take(count int64) bool { return l.limiter.AllowN(int(count)) } +// GetRatePerSec returns the rate per second. func (l *limit) GetRatePerSec() float64 { l.ratePerSecMutex.RLock() defer l.ratePerSecMutex.RUnlock() diff --git a/pkg/id/id.go b/pkg/id/id.go index d0889717242..ea4a2a54341 100644 --- a/pkg/id/id.go +++ b/pkg/id/id.go @@ -105,6 +105,7 @@ func (alloc *allocatorImpl) Alloc() (uint64, error) { return alloc.base, nil } +// SetBase sets the base. func (alloc *allocatorImpl) SetBase(newBase uint64) error { alloc.mu.Lock() defer alloc.mu.Unlock() diff --git a/pkg/ratelimit/metrics.go b/pkg/ratelimit/metrics.go index c5510e66b26..0096a76de4c 100644 --- a/pkg/ratelimit/metrics.go +++ b/pkg/ratelimit/metrics.go @@ -24,35 +24,35 @@ const ( ) var ( - RunnerTaskMaxWaitingDuration = prometheus.NewGaugeVec( + runnerTaskMaxWaitingDuration = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "pd", Subsystem: "ratelimit", Name: "runner_task_max_waiting_duration_seconds", Help: "The duration of tasks waiting in the runner.", }, []string{nameStr}) - RunnerPendingTasks = prometheus.NewGaugeVec( + runnerPendingTasks = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "pd", Subsystem: "ratelimit", Name: "runner_pending_tasks", Help: "The number of pending tasks in the runner.", }, []string{nameStr, taskStr}) - RunnerFailedTasks = prometheus.NewCounterVec( + runnerFailedTasks = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "pd", Subsystem: "ratelimit", Name: "runner_failed_tasks_total", Help: "The number of failed tasks in the runner.", }, []string{nameStr, taskStr}) - RunnerSucceededTasks = prometheus.NewCounterVec( + runnerSucceededTasks = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "pd", Subsystem: "ratelimit", Name: "runner_success_tasks_total", Help: "The number of tasks in the runner.", }, []string{nameStr, taskStr}) - RunnerTaskExecutionDuration = prometheus.NewHistogramVec( + runnerTaskExecutionDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "pd", Subsystem: "ratelimit", @@ -63,9 +63,9 @@ var ( ) func init() { - prometheus.MustRegister(RunnerTaskMaxWaitingDuration) - prometheus.MustRegister(RunnerPendingTasks) - prometheus.MustRegister(RunnerFailedTasks) - prometheus.MustRegister(RunnerTaskExecutionDuration) - prometheus.MustRegister(RunnerSucceededTasks) + prometheus.MustRegister(runnerTaskMaxWaitingDuration) + prometheus.MustRegister(runnerPendingTasks) + prometheus.MustRegister(runnerFailedTasks) + prometheus.MustRegister(runnerTaskExecutionDuration) + prometheus.MustRegister(runnerSucceededTasks) } diff --git a/pkg/ratelimit/runner.go b/pkg/ratelimit/runner.go index a230177ac73..4b1b51f1768 100644 --- a/pkg/ratelimit/runner.go +++ b/pkg/ratelimit/runner.go @@ -90,7 +90,7 @@ func NewConcurrentRunner(name string, limiter *ConcurrencyLimiter, maxPendingDur pendingTasks: make([]*Task, 0, initialCapacity), pendingTaskCount: make(map[string]int), existTasks: make(map[taskID]*Task), - maxWaitingDuration: RunnerTaskMaxWaitingDuration.WithLabelValues(name), + maxWaitingDuration: runnerTaskMaxWaitingDuration.WithLabelValues(name), } return s } @@ -136,7 +136,7 @@ func (cr *ConcurrentRunner) Start(ctx context.Context) { maxDuration = time.Since(cr.pendingTasks[0].submittedAt) } for taskName, cnt := range cr.pendingTaskCount { - RunnerPendingTasks.WithLabelValues(cr.name, taskName).Set(float64(cnt)) + runnerPendingTasks.WithLabelValues(cr.name, taskName).Set(float64(cnt)) } cr.pendingMu.Unlock() cr.maxWaitingDuration.Set(maxDuration.Seconds()) @@ -157,8 +157,8 @@ func (cr *ConcurrentRunner) run(ctx context.Context, task *Task, token *TaskToke cr.limiter.ReleaseToken(token) cr.processPendingTasks() } - RunnerTaskExecutionDuration.WithLabelValues(cr.name, task.name).Observe(time.Since(start).Seconds()) - RunnerSucceededTasks.WithLabelValues(cr.name, task.name).Inc() + runnerTaskExecutionDuration.WithLabelValues(cr.name, task.name).Observe(time.Since(start).Seconds()) + runnerSucceededTasks.WithLabelValues(cr.name, task.name).Inc() } func (cr *ConcurrentRunner) processPendingTasks() { @@ -214,12 +214,12 @@ func (cr *ConcurrentRunner) RunTask(id uint64, name string, f func(context.Conte if !task.retained { maxWait := time.Since(cr.pendingTasks[0].submittedAt) if maxWait > cr.maxPendingDuration { - RunnerFailedTasks.WithLabelValues(cr.name, task.name).Inc() + runnerFailedTasks.WithLabelValues(cr.name, task.name).Inc() return ErrMaxWaitingTasksExceeded } } if pendingTaskNum > maxPendingTaskNum { - RunnerFailedTasks.WithLabelValues(cr.name, task.name).Inc() + runnerFailedTasks.WithLabelValues(cr.name, task.name).Inc() return ErrMaxWaitingTasksExceeded } } diff --git a/pkg/schedule/filter/counter.go b/pkg/schedule/filter/counter.go index 9742d2d0c9d..41211c1acce 100644 --- a/pkg/schedule/filter/counter.go +++ b/pkg/schedule/filter/counter.go @@ -128,6 +128,7 @@ func NewCounter(scope string) *Counter { return &Counter{counter: counter, scope: scope} } +// SetScope sets the scope for the counter. func (c *Counter) SetScope(scope string) { c.scope = scope } diff --git a/pkg/schedule/filter/filters.go b/pkg/schedule/filter/filters.go index 1838f0104f4..6c5dd748d17 100644 --- a/pkg/schedule/filter/filters.go +++ b/pkg/schedule/filter/filters.go @@ -181,14 +181,17 @@ func NewExcludedFilter(scope string, sources, targets map[uint64]struct{}) Filte } } +// Scope returns the scheduler or the checker which the filter acts on. func (f *excludedFilter) Scope() string { return f.scope } +// Type returns the type of the filter. func (*excludedFilter) Type() filterType { return excluded } +// Source filters stores when select them as schedule source. func (f *excludedFilter) Source(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if _, ok := f.sources[store.GetID()]; ok { return statusStoreAlreadyHasPeer @@ -196,6 +199,7 @@ func (f *excludedFilter) Source(_ config.SharedConfigProvider, store *core.Store return statusOK } +// Target filters stores when select them as schedule target. func (f *excludedFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if _, ok := f.targets[store.GetID()]; ok { return statusStoreAlreadyHasPeer @@ -211,18 +215,22 @@ func NewStorageThresholdFilter(scope string) Filter { return &storageThresholdFilter{scope: scope} } +// Scope returns the scheduler or the checker which the filter acts on. func (f *storageThresholdFilter) Scope() string { return f.scope } +// Type returns the name of the filter. func (*storageThresholdFilter) Type() filterType { return storageThreshold } +// Source filters stores when select them as schedule source. func (*storageThresholdFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } +// Target filters stores when select them as schedule target. func (*storageThresholdFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if !store.IsLowSpace(conf.GetLowSpaceRatio()) { return statusOK @@ -279,18 +287,22 @@ func newDistinctScoreFilter(scope string, labels []string, stores []*core.StoreI } } +// Scope returns the scheduler or the checker which the filter acts on. func (f *distinctScoreFilter) Scope() string { return f.scope } +// Type returns the type of the filter. func (*distinctScoreFilter) Type() filterType { return distinctScore } +// Source filters stores when select them as schedule source. func (*distinctScoreFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } +// Target filters stores when select them as schedule target. func (f *distinctScoreFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { score := core.DistinctScore(f.labels, f.stores, store) switch f.policy { @@ -630,14 +642,17 @@ func newRuleFitFilter(scope string, cluster *core.BasicCluster, ruleManager *pla } } +// Scope returns the scheduler or the checker which the filter acts on. func (f *ruleFitFilter) Scope() string { return f.scope } +// Type returns the name of the filter. func (*ruleFitFilter) Type() filterType { return ruleFit } +// Source filters stores when select them as schedule source. func (*ruleFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -683,18 +698,22 @@ func newRuleLeaderFitFilter(scope string, cluster *core.BasicCluster, ruleManage } } +// Scope returns the scheduler or the checker which the filter acts on. func (f *ruleLeaderFitFilter) Scope() string { return f.scope } +// Type returns the name of the filter. func (*ruleLeaderFitFilter) Type() filterType { return ruleLeader } +// Source filters stores when select them as schedule source. func (*ruleLeaderFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } +// Target filters stores when select them as schedule target. func (f *ruleLeaderFitFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { targetStoreID := store.GetID() targetPeer := f.region.GetStorePeer(targetStoreID) @@ -739,18 +758,22 @@ func newRuleWitnessFitFilter(scope string, cluster *core.BasicCluster, ruleManag } } +// Scope returns the scheduler or the checker which the filter acts on. func (f *ruleWitnessFitFilter) Scope() string { return f.scope } +// Type returns the name of the filter. func (*ruleWitnessFitFilter) Type() filterType { return ruleFit } +// Source filters stores when select them as schedule source. func (*ruleWitnessFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } +// Target filters stores when select them as schedule target. func (f *ruleWitnessFitFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { targetStoreID := store.GetID() targetPeer := f.region.GetStorePeer(targetStoreID) @@ -811,14 +834,17 @@ func NewEngineFilter(scope string, constraint placement.LabelConstraint) Filter } } +// Scope returns the scheduler or the checker which the filter acts on. func (f *engineFilter) Scope() string { return f.scope } +// Type returns the name of the filter. func (*engineFilter) Type() filterType { return engine } +// Source filters stores when select them as schedule source. func (f *engineFilter) Source(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if f.constraint.MatchStore(store) { return statusOK @@ -826,6 +852,7 @@ func (f *engineFilter) Source(_ config.SharedConfigProvider, store *core.StoreIn return statusStoreNotMatchRule } +// Target filters stores when select them as schedule target. func (f *engineFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if f.constraint.MatchStore(store) { return statusOK @@ -854,14 +881,17 @@ func NewSpecialUseFilter(scope string, allowUses ...string) Filter { } } +// Scope returns the scheduler or the checker which the filter acts on. func (f *specialUseFilter) Scope() string { return f.scope } +// Type returns the name of the filter. func (*specialUseFilter) Type() filterType { return specialUse } +// Source filters stores when select them as schedule source. func (f *specialUseFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if store.IsLowSpace(conf.GetLowSpaceRatio()) || !f.constraint.MatchStore(store) { return statusOK @@ -869,6 +899,7 @@ func (f *specialUseFilter) Source(conf config.SharedConfigProvider, store *core. return statusStoreNotMatchRule } +// Target filters stores when select them as schedule target. func (f *specialUseFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if !f.constraint.MatchStore(store) { return statusOK @@ -928,18 +959,22 @@ func NewIsolationFilter(scope, isolationLevel string, locationLabels []string, r return isolationFilter } +// Scope returns the scheduler or the checker which the filter acts on. func (f *isolationFilter) Scope() string { return f.scope } +// Type returns the name of the filter. func (*isolationFilter) Type() filterType { return isolation } +// Source filters stores when select them as schedule source. func (*isolationFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } +// Target filters stores when select them as schedule target. func (f *isolationFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { // No isolation constraint to fit if len(f.constraintSet) == 0 { diff --git a/pkg/schedule/filter/region_filters.go b/pkg/schedule/filter/region_filters.go index 7cd015412c2..e233ec75973 100644 --- a/pkg/schedule/filter/region_filters.go +++ b/pkg/schedule/filter/region_filters.go @@ -76,6 +76,7 @@ func NewRegionPendingFilter() RegionFilter { return ®ionPendingFilter{} } +// Select implements the RegionFilter interface. func (*regionPendingFilter) Select(region *core.RegionInfo) *plan.Status { if hasPendingPeers(region) { return statusRegionPendingPeer @@ -91,6 +92,7 @@ func NewRegionDownFilter() RegionFilter { return ®ionDownFilter{} } +// Select implements the RegionFilter interface. func (*regionDownFilter) Select(region *core.RegionInfo) *plan.Status { if hasDownPeers(region) { return statusRegionDownPeer diff --git a/pkg/schedule/operator/builder.go b/pkg/schedule/operator/builder.go index 1852f292db0..e28e7de973a 100644 --- a/pkg/schedule/operator/builder.go +++ b/pkg/schedule/operator/builder.go @@ -959,6 +959,7 @@ func (p stepPlan) String() string { p.leaderBeforeAdd, p.add, p.promote, p.leaderBeforeRemove, p.demote, p.remove, p.nonWitness, p.promoteNonWitness, p.witness) } +// IsEmpty checks if the plan is empty. func (p stepPlan) IsEmpty() bool { return p.promote == nil && p.demote == nil && p.add == nil && p.remove == nil && p.nonWitness == nil && p.promoteNonWitness == nil && p.witness == nil } diff --git a/pkg/schedule/operator/operator_queue.go b/pkg/schedule/operator/operator_queue.go index 2233845724e..8643717d5ad 100644 --- a/pkg/schedule/operator/operator_queue.go +++ b/pkg/schedule/operator/operator_queue.go @@ -27,21 +27,26 @@ type operatorWithTime struct { type operatorQueue []*operatorWithTime +// Len implements heap.Interface. func (opn operatorQueue) Len() int { return len(opn) } +// Less implements heap.Interface. func (opn operatorQueue) Less(i, j int) bool { return opn[i].time.Before(opn[j].time) } +// Swap implements heap.Interface. func (opn operatorQueue) Swap(i, j int) { opn[i], opn[j] = opn[j], opn[i] } +// Push implements heap.Interface. func (opn *operatorQueue) Push(x any) { item := x.(*operatorWithTime) *opn = append(*opn, item) } +// Pop implements heap.Interface. func (opn *operatorQueue) Pop() any { old := *opn n := len(old) diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 4516dfe4433..7df3ee8f552 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -98,12 +98,12 @@ type splitBucketHandler struct { rd *render.Render } -func (h *splitBucketHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { +func (h *splitBucketHandler) listConfig(w http.ResponseWriter, _ *http.Request) { conf := h.conf.Clone() h.rd.JSON(w, http.StatusOK, conf) } -func (h *splitBucketHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (h *splitBucketHandler) updateConfig(w http.ResponseWriter, r *http.Request) { h.conf.Lock() defer h.conf.Unlock() rd := render.New(render.Options{IndentJSON: true}) @@ -148,8 +148,8 @@ func newSplitBucketHandler(conf *splitBucketSchedulerConfig) http.Handler { rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) return router } diff --git a/pkg/schedule/splitter/region_splitter.go b/pkg/schedule/splitter/region_splitter.go index aeab4b70cf0..124ad935655 100644 --- a/pkg/schedule/splitter/region_splitter.go +++ b/pkg/schedule/splitter/region_splitter.go @@ -187,6 +187,7 @@ type splitRegionsHandler struct { oc *operator.Controller } +// SplitRegionByKeys split region by keys. func (h *splitRegionsHandler) SplitRegionByKeys(region *core.RegionInfo, splitKeys [][]byte) error { op, err := operator.CreateSplitRegionOperator("region-splitter", region, 0, pdpb.CheckPolicy_USEKEY, splitKeys) if err != nil { @@ -200,6 +201,7 @@ func (h *splitRegionsHandler) SplitRegionByKeys(region *core.RegionInfo, splitKe return nil } +// ScanRegionsByKeyRange scans regions by key range. func (h *splitRegionsHandler) ScanRegionsByKeyRange(groupKeys *regionGroupKeys, results *splitKeyResults) { splitKeys := groupKeys.keys startKey, endKey := groupKeys.region.GetStartKey(), groupKeys.region.GetEndKey() diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index 26e1b6a737a..16910c631fd 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -14,10 +14,12 @@ package types +// CheckerSchedulerType is the type of checker/scheduler. type CheckerSchedulerType string -func (n CheckerSchedulerType) String() string { - return string(n) +// String implements fmt.Stringer. +func (t CheckerSchedulerType) String() string { + return string(t) } const ( @@ -93,6 +95,7 @@ var SchedulerTypeCompatibleMap = map[CheckerSchedulerType]string{ LabelScheduler: "label", } +// SchedulerStr2Type is a map to convert the scheduler string to the CheckerSchedulerType. var SchedulerStr2Type = map[string]CheckerSchedulerType{ "balance-leader-scheduler": BalanceLeaderScheduler, "balance-region-scheduler": BalanceRegionScheduler, diff --git a/pkg/statistics/collector.go b/pkg/statistics/collector.go index 88986b93d4b..4e3e2fa2c7a 100644 --- a/pkg/statistics/collector.go +++ b/pkg/statistics/collector.go @@ -22,12 +22,12 @@ import ( // storeCollector define the behavior of different engines of stores. type storeCollector interface { - // Engine returns the type of Store. - Engine() string - // Filter determines whether the Store needs to be handled by itself. - Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool - // GetLoads obtains available loads from storeLoads and peerLoadSum according to rwTy and kind. - GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) + // engine returns the type of Store. + engine() string + // filter determines whether the Store needs to be handled by itself. + filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool + // getLoads obtains available loads from storeLoads and peerLoadSum according to rwTy and kind. + getLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) } type tikvCollector struct{} @@ -36,11 +36,11 @@ func newTikvCollector() storeCollector { return tikvCollector{} } -func (tikvCollector) Engine() string { +func (tikvCollector) engine() string { return core.EngineTiKV } -func (tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { +func (tikvCollector) filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { if info.IsTiFlash() { return false } @@ -53,7 +53,7 @@ func (tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) return false } -func (tikvCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { +func (tikvCollector) getLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { loads = make([]float64, utils.DimLen) switch rwTy { case utils.Read: @@ -87,11 +87,11 @@ func newTiFlashCollector(isTraceRegionFlow bool) storeCollector { return tiflashCollector{isTraceRegionFlow: isTraceRegionFlow} } -func (tiflashCollector) Engine() string { +func (tiflashCollector) engine() string { return core.EngineTiFlash } -func (tiflashCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { +func (tiflashCollector) filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { switch kind { case constant.LeaderKind: return false @@ -101,7 +101,7 @@ func (tiflashCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKin return false } -func (c tiflashCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { +func (c tiflashCollector) getLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { loads = make([]float64, utils.DimLen) switch rwTy { case utils.Read: diff --git a/pkg/statistics/hot_peer.go b/pkg/statistics/hot_peer.go index 79757d6e27f..8f92fbff542 100644 --- a/pkg/statistics/hot_peer.go +++ b/pkg/statistics/hot_peer.go @@ -41,7 +41,7 @@ func newDimStat(reportInterval time.Duration) *dimStat { } } -func (d *dimStat) Add(delta float64, interval time.Duration) { +func (d *dimStat) add(delta float64, interval time.Duration) { d.Lock() defer d.Unlock() d.lastIntervalSum += int(interval.Seconds()) @@ -74,13 +74,13 @@ func (d *dimStat) clearLastAverage() { d.lastDelta = 0 } -func (d *dimStat) Get() float64 { +func (d *dimStat) get() float64 { d.RLock() defer d.RUnlock() return d.rolling.Get() } -func (d *dimStat) Clone() *dimStat { +func (d *dimStat) clone() *dimStat { d.RLock() defer d.RUnlock() return &dimStat{ @@ -162,7 +162,7 @@ func (stat *HotPeerStat) GetActionType() utils.ActionType { // GetLoad returns denoising load if possible. func (stat *HotPeerStat) GetLoad(dim int) float64 { if stat.rollingLoads != nil { - return math.Round(stat.rollingLoads[dim].Get()) + return math.Round(stat.rollingLoads[dim].get()) } return math.Round(stat.Loads[dim]) } @@ -172,7 +172,7 @@ func (stat *HotPeerStat) GetLoads() []float64 { if stat.rollingLoads != nil { ret := make([]float64, len(stat.rollingLoads)) for dim := range ret { - ret[dim] = math.Round(stat.rollingLoads[dim].Get()) + ret[dim] = math.Round(stat.rollingLoads[dim].get()) } return ret } diff --git a/pkg/statistics/hot_peer_cache.go b/pkg/statistics/hot_peer_cache.go index 4db0c304bb9..8d1f64ca540 100644 --- a/pkg/statistics/hot_peer_cache.go +++ b/pkg/statistics/hot_peer_cache.go @@ -102,6 +102,7 @@ func (f *HotPeerCache) RegionStats(minHotDegree int) map[uint64][]*HotPeerStat { return res } +// UpdateStat updates the stat cache. func (f *HotPeerCache) UpdateStat(item *HotPeerStat) { switch item.actionType { case utils.Remove: @@ -439,7 +440,7 @@ func (f *HotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldIt if source == utils.Inherit { for _, dim := range oldItem.rollingLoads { - newItem.rollingLoads = append(newItem.rollingLoads, dim.Clone()) + newItem.rollingLoads = append(newItem.rollingLoads, dim.clone()) } newItem.allowInherited = false } else { @@ -462,7 +463,7 @@ func (f *HotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldIt } for i, k := range regionStats { - newItem.rollingLoads[i].Add(deltaLoads[k], interval) + newItem.rollingLoads[i].add(deltaLoads[k], interval) } isFull := newItem.rollingLoads[0].isFull(f.interval()) // The intervals of dims are the same, so it is only necessary to determine whether any of them @@ -505,7 +506,7 @@ func (f *HotPeerCache) updateNewHotPeerStat(newItem *HotPeerStat, deltaLoads []f newItem.rollingLoads = make([]*dimStat, len(regionStats)) for i, k := range regionStats { ds := newDimStat(f.interval()) - ds.Add(deltaLoads[k], interval) + ds.add(deltaLoads[k], interval) if ds.isFull(f.interval()) { ds.clearLastAverage() } diff --git a/pkg/statistics/store_collection.go b/pkg/statistics/store_collection.go index 6d5df0bda62..f55c23b27b7 100644 --- a/pkg/statistics/store_collection.go +++ b/pkg/statistics/store_collection.go @@ -61,7 +61,7 @@ func newStoreStatistics(opt config.ConfProvider) *storeStatistics { } } -func (s *storeStatistics) Observe(store *core.StoreInfo) { +func (s *storeStatistics) observe(store *core.StoreInfo) { for _, k := range s.opt.GetLocationLabels() { v := store.GetLabelValue(k) if v == "" { @@ -147,6 +147,7 @@ func (s *storeStatistics) Observe(store *core.StoreInfo) { } } +// ObserveHotStat records the hot region metrics for the store. func ObserveHotStat(store *core.StoreInfo, stats *StoresStats) { // Store flows. storeAddress := store.GetAddress() @@ -178,7 +179,7 @@ func ObserveHotStat(store *core.StoreInfo, stats *StoresStats) { storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_keys_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreRegionsWriteKeys)) } -func (s *storeStatistics) Collect() { +func (s *storeStatistics) collect() { placementStatusGauge.Reset() metrics := make(map[string]float64) @@ -307,12 +308,14 @@ func NewStoreStatisticsMap(opt config.ConfProvider) *storeStatisticsMap { } } +// Observe observes the store. func (m *storeStatisticsMap) Observe(store *core.StoreInfo) { - m.stats.Observe(store) + m.stats.observe(store) } +// Collect collects the metrics. func (m *storeStatisticsMap) Collect() { - m.stats.Collect() + m.stats.collect() } // Reset resets the metrics. diff --git a/pkg/statistics/store_hot_peers_infos.go b/pkg/statistics/store_hot_peers_infos.go index f7873bdd744..f64c7743d16 100644 --- a/pkg/statistics/store_hot_peers_infos.go +++ b/pkg/statistics/store_hot_peers_infos.go @@ -158,7 +158,7 @@ func summaryStoresLoadByEngine( store := info.StoreInfo id := store.GetID() storeLoads, ok := storesLoads[id] - if !ok || !collector.Filter(info, kind) { + if !ok || !collector.filter(info, kind) { continue } @@ -172,7 +172,7 @@ func summaryStoresLoadByEngine( } hotPeers = append(hotPeers, peer.Clone()) } - currentLoads := collector.GetLoads(storeLoads, peerLoadSum, rwTy, kind) + currentLoads := collector.getLoads(storeLoads, peerLoadSum, rwTy, kind) var historyLoads [][]float64 if storesHistoryLoads != nil { @@ -240,7 +240,7 @@ func summaryStoresLoadByEngine( { // Metric for debug. - engine := collector.Engine() + engine := collector.engine() ty := "exp-byte-rate-" + rwTy.String() + "-" + kind.String() hotPeerSummary.WithLabelValues(ty, engine).Set(expectLoads[utils.ByteDim]) ty = "exp-key-rate-" + rwTy.String() + "-" + kind.String() diff --git a/pkg/statistics/utils/topn.go b/pkg/statistics/utils/topn.go index 7ab6c6eaf3e..cb97251edd9 100644 --- a/pkg/statistics/utils/topn.go +++ b/pkg/statistics/utils/topn.go @@ -58,35 +58,35 @@ func NewTopN(k, n int, ttl time.Duration) *TopN { func (tn *TopN) Len() int { tn.rw.RLock() defer tn.rw.RUnlock() - return tn.ttlLst.Len() + return tn.ttlLst.len() } // GetTopNMin returns the min item in top N of the `k`th dimension. func (tn *TopN) GetTopNMin(k int) TopNItem { tn.rw.RLock() defer tn.rw.RUnlock() - return tn.topns[k].GetTopNMin() + return tn.topns[k].getTopNMin() } // GetAllTopN returns the top N items of the `k`th dimension. func (tn *TopN) GetAllTopN(k int) []TopNItem { tn.rw.RLock() defer tn.rw.RUnlock() - return tn.topns[k].GetAllTopN() + return tn.topns[k].getAllTopN() } // GetAll returns all items. func (tn *TopN) GetAll() []TopNItem { tn.rw.RLock() defer tn.rw.RUnlock() - return tn.topns[0].GetAll() + return tn.topns[0].getAll() } // Get returns the item with given id, nil if there is no such item. func (tn *TopN) Get(id uint64) TopNItem { tn.rw.RLock() defer tn.rw.RUnlock() - return tn.topns[0].Get(id) + return tn.topns[0].get(id) } // Put inserts item or updates the old item if it exists. @@ -94,9 +94,9 @@ func (tn *TopN) Put(item TopNItem) (isUpdate bool) { tn.rw.Lock() defer tn.rw.Unlock() for _, stn := range tn.topns { - isUpdate = stn.Put(item) + isUpdate = stn.put(item) } - tn.ttlLst.Put(item.ID()) + tn.ttlLst.put(item.ID()) tn.maintain() return } @@ -113,17 +113,17 @@ func (tn *TopN) Remove(id uint64) (item TopNItem) { tn.rw.Lock() defer tn.rw.Unlock() for _, stn := range tn.topns { - item = stn.Remove(id) + item = stn.remove(id) } - _ = tn.ttlLst.Remove(id) + _ = tn.ttlLst.remove(id) tn.maintain() return } func (tn *TopN) maintain() { - for _, id := range tn.ttlLst.TakeExpired() { + for _, id := range tn.ttlLst.takeExpired() { for _, stn := range tn.topns { - stn.Remove(id) + stn.remove(id) } } } @@ -144,31 +144,27 @@ func newSingleTopN(k, n int) *singleTopN { } } -func (stn *singleTopN) Len() int { - return stn.topn.Len() + stn.rest.Len() -} - -func (stn *singleTopN) GetTopNMin() TopNItem { +func (stn *singleTopN) getTopNMin() TopNItem { return stn.topn.Top() } -func (stn *singleTopN) GetAllTopN() []TopNItem { +func (stn *singleTopN) getAllTopN() []TopNItem { return stn.topn.GetAll() } -func (stn *singleTopN) GetAll() []TopNItem { +func (stn *singleTopN) getAll() []TopNItem { topn := stn.topn.GetAll() return append(topn, stn.rest.GetAll()...) } -func (stn *singleTopN) Get(id uint64) TopNItem { +func (stn *singleTopN) get(id uint64) TopNItem { if item := stn.topn.Get(id); item != nil { return item } return stn.rest.Get(id) } -func (stn *singleTopN) Put(item TopNItem) (isUpdate bool) { +func (stn *singleTopN) put(item TopNItem) (isUpdate bool) { if stn.topn.Get(item.ID()) != nil { isUpdate = true stn.topn.Put(item) @@ -179,7 +175,7 @@ func (stn *singleTopN) Put(item TopNItem) (isUpdate bool) { return } -func (stn *singleTopN) Remove(id uint64) TopNItem { +func (stn *singleTopN) remove(id uint64) TopNItem { item := stn.topn.Remove(id) if item == nil { item = stn.rest.Remove(id) @@ -340,11 +336,11 @@ func newTTLList(ttl time.Duration) *ttlList { } } -func (tl *ttlList) Len() int { +func (tl *ttlList) len() int { return tl.lst.Len() } -func (tl *ttlList) TakeExpired() []uint64 { +func (tl *ttlList) takeExpired() []uint64 { expired := []uint64{} now := time.Now() for ele := tl.lst.Front(); ele != nil; ele = tl.lst.Front() { @@ -359,7 +355,7 @@ func (tl *ttlList) TakeExpired() []uint64 { return expired } -func (tl *ttlList) Put(id uint64) (isUpdate bool) { +func (tl *ttlList) put(id uint64) (isUpdate bool) { item := ttlItem{id: id} if ele, ok := tl.index[id]; ok { isUpdate = true @@ -370,7 +366,7 @@ func (tl *ttlList) Put(id uint64) (isUpdate bool) { return } -func (tl *ttlList) Remove(id uint64) (removed bool) { +func (tl *ttlList) remove(id uint64) (removed bool) { if ele, ok := tl.index[id]; ok { _ = tl.lst.Remove(ele) delete(tl.index, id) diff --git a/pkg/storage/kv/etcd_kv.go b/pkg/storage/kv/etcd_kv.go index 767aeff77a6..e2eb8c979eb 100644 --- a/pkg/storage/kv/etcd_kv.go +++ b/pkg/storage/kv/etcd_kv.go @@ -55,6 +55,7 @@ func NewEtcdKVBase(client *clientv3.Client, rootPath string) *etcdKVBase { } } +// NewEtcdKV creates a new etcd kv. func (kv *etcdKVBase) Load(key string) (string, error) { key = path.Join(kv.rootPath, key) @@ -70,6 +71,7 @@ func (kv *etcdKVBase) Load(key string) (string, error) { return string(resp.Kvs[0].Value), nil } +// LoadRange loads a range of keys [key, endKey) from etcd. func (kv *etcdKVBase) LoadRange(key, endKey string, limit int) ([]string, []string, error) { // Note: reason to use `strings.Join` instead of `path.Join` is that the latter will // removes suffix '/' of the joined string. @@ -99,6 +101,7 @@ func (kv *etcdKVBase) LoadRange(key, endKey string, limit int) ([]string, []stri return keys, values, nil } +// Save puts a key-value pair to etcd. func (kv *etcdKVBase) Save(key, value string) error { failpoint.Inject("etcdSaveFailed", func() { failpoint.Return(errors.New("save failed")) @@ -117,6 +120,7 @@ func (kv *etcdKVBase) Save(key, value string) error { return nil } +// Remove removes the key from etcd. func (kv *etcdKVBase) Remove(key string) error { key = path.Join(kv.rootPath, key) diff --git a/pkg/storage/kv/mem_kv.go b/pkg/storage/kv/mem_kv.go index 91d13c04e61..b97a3d6cfa1 100644 --- a/pkg/storage/kv/mem_kv.go +++ b/pkg/storage/kv/mem_kv.go @@ -41,10 +41,12 @@ type memoryKVItem struct { key, value string } +// Less compares two memoryKVItem. func (s *memoryKVItem) Less(than *memoryKVItem) bool { return s.key < than.key } +// Load loads the value for the key. func (kv *memoryKV) Load(key string) (string, error) { kv.RLock() defer kv.RUnlock() @@ -55,6 +57,7 @@ func (kv *memoryKV) Load(key string) (string, error) { return item.value, nil } +// LoadRange loads the keys in the range of [key, endKey). func (kv *memoryKV) LoadRange(key, endKey string, limit int) ([]string, []string, error) { failpoint.Inject("withRangeLimit", func(val failpoint.Value) { rangeLimit, ok := val.(int) @@ -77,6 +80,7 @@ func (kv *memoryKV) LoadRange(key, endKey string, limit int) ([]string, []string return keys, values, nil } +// Save saves the key-value pair. func (kv *memoryKV) Save(key, value string) error { kv.Lock() defer kv.Unlock() @@ -84,6 +88,7 @@ func (kv *memoryKV) Save(key, value string) error { return nil } +// Remove removes the key. func (kv *memoryKV) Remove(key string) error { kv.Lock() defer kv.Unlock() diff --git a/pkg/syncer/client.go b/pkg/syncer/client.go index 00fa8dc389b..a94f5c41f3f 100644 --- a/pkg/syncer/client.go +++ b/pkg/syncer/client.go @@ -69,7 +69,7 @@ func (s *RegionSyncer) syncRegion(ctx context.Context, conn *grpc.ClientConn) (C err = syncStream.Send(&pdpb.SyncRegionRequest{ Header: &pdpb.RequestHeader{ClusterId: s.server.ClusterID()}, Member: s.server.GetMemberInfo(), - StartIndex: s.history.GetNextIndex(), + StartIndex: s.history.getNextIndex(), }) if err != nil { return nil, err @@ -154,7 +154,7 @@ func (s *RegionSyncer) StartSyncWithLeader(addr string) { time.Sleep(time.Second) continue } - log.Info("server starts to synchronize with leader", zap.String("server", s.server.Name()), zap.String("leader", s.server.GetLeader().GetName()), zap.Uint64("request-index", s.history.GetNextIndex())) + log.Info("server starts to synchronize with leader", zap.String("server", s.server.Name()), zap.String("leader", s.server.GetLeader().GetName()), zap.Uint64("request-index", s.history.getNextIndex())) for { resp, err := stream.Recv() if err != nil { @@ -166,14 +166,14 @@ func (s *RegionSyncer) StartSyncWithLeader(addr string) { time.Sleep(time.Second) break } - if s.history.GetNextIndex() != resp.GetStartIndex() { + if s.history.getNextIndex() != resp.GetStartIndex() { log.Warn("server sync index not match the leader", zap.String("server", s.server.Name()), - zap.Uint64("own", s.history.GetNextIndex()), + zap.Uint64("own", s.history.getNextIndex()), zap.Uint64("leader", resp.GetStartIndex()), zap.Int("records-length", len(resp.GetRegions()))) // reset index - s.history.ResetWithIndex(resp.GetStartIndex()) + s.history.resetWithIndex(resp.GetStartIndex()) } stats := resp.GetRegionStats() regions := resp.GetRegions() @@ -224,7 +224,7 @@ func (s *RegionSyncer) StartSyncWithLeader(addr string) { err = regionStorage.SaveRegion(r) } if err == nil { - s.history.Record(region) + s.history.record(region) } for _, old := range overlaps { _ = regionStorage.DeleteRegion(old.GetMeta()) diff --git a/pkg/syncer/history_buffer.go b/pkg/syncer/history_buffer.go index 08fe85cc8c5..7ff6f202ad3 100644 --- a/pkg/syncer/history_buffer.go +++ b/pkg/syncer/history_buffer.go @@ -84,7 +84,7 @@ func (h *historyBuffer) firstIndex() uint64 { return h.index - uint64(h.len()) } -func (h *historyBuffer) Record(r *core.RegionInfo) { +func (h *historyBuffer) record(r *core.RegionInfo) { h.Lock() defer h.Unlock() syncIndexGauge.Set(float64(h.index)) @@ -101,7 +101,7 @@ func (h *historyBuffer) Record(r *core.RegionInfo) { } } -func (h *historyBuffer) RecordsFrom(index uint64) []*core.RegionInfo { +func (h *historyBuffer) recordsFrom(index uint64) []*core.RegionInfo { h.RLock() defer h.RUnlock() var pos int @@ -117,7 +117,7 @@ func (h *historyBuffer) RecordsFrom(index uint64) []*core.RegionInfo { return records } -func (h *historyBuffer) ResetWithIndex(index uint64) { +func (h *historyBuffer) resetWithIndex(index uint64) { h.Lock() defer h.Unlock() h.index = index @@ -126,7 +126,7 @@ func (h *historyBuffer) ResetWithIndex(index uint64) { h.flushCount = defaultFlushCount } -func (h *historyBuffer) GetNextIndex() uint64 { +func (h *historyBuffer) getNextIndex() uint64 { h.RLock() defer h.RUnlock() return h.index diff --git a/pkg/syncer/history_buffer_test.go b/pkg/syncer/history_buffer_test.go index 4bca5b7f603..70a1caf13dc 100644 --- a/pkg/syncer/history_buffer_test.go +++ b/pkg/syncer/history_buffer_test.go @@ -34,7 +34,7 @@ func TestBufferSize(t *testing.T) { h := newHistoryBuffer(1, kv.NewMemoryKV()) re.Equal(0, h.len()) for _, r := range regions { - h.Record(r) + h.record(r) } re.Equal(1, h.len()) re.Equal(regions[h.nextIndex()-1], h.get(100)) @@ -43,7 +43,7 @@ func TestBufferSize(t *testing.T) { // size equals 2 h = newHistoryBuffer(2, kv.NewMemoryKV()) for _, r := range regions { - h.Record(r) + h.record(r) } re.Equal(2, h.len()) re.Equal(regions[h.nextIndex()-1], h.get(100)) @@ -54,7 +54,7 @@ func TestBufferSize(t *testing.T) { kvMem := kv.NewMemoryKV() h1 := newHistoryBuffer(100, kvMem) for i := 0; i < 6; i++ { - h1.Record(regions[i]) + h1.record(regions[i]) } re.Equal(6, h1.len()) re.Equal(uint64(6), h1.nextIndex()) @@ -68,7 +68,7 @@ func TestBufferSize(t *testing.T) { re.Equal(0, h2.len()) for _, r := range regions { index := h2.nextIndex() - h2.Record(r) + h2.record(r) re.Equal(r, h2.get(index)) } @@ -79,9 +79,9 @@ func TestBufferSize(t *testing.T) { // flush in index 106 re.Equal("106", s) - histories := h2.RecordsFrom(uint64(1)) + histories := h2.recordsFrom(uint64(1)) re.Empty(histories) - histories = h2.RecordsFrom(h2.firstIndex()) + histories = h2.recordsFrom(h2.firstIndex()) re.Len(histories, 100) re.Equal(uint64(7), h2.firstIndex()) re.Equal(regions[1:], histories) diff --git a/pkg/syncer/server.go b/pkg/syncer/server.go index 132b06aec69..2cdc01053f6 100644 --- a/pkg/syncer/server.go +++ b/pkg/syncer/server.go @@ -136,8 +136,8 @@ func (s *RegionSyncer) RunServer(ctx context.Context, regionNotifier <-chan *cor } buckets = append(buckets, bucket) leaders = append(leaders, first.GetLeader()) - startIndex := s.history.GetNextIndex() - s.history.Record(first) + startIndex := s.history.getNextIndex() + s.history.record(first) pending := len(regionNotifier) for i := 0; i < pending && i < maxSyncRegionBatchSize; i++ { region := <-regionNotifier @@ -150,7 +150,7 @@ func (s *RegionSyncer) RunServer(ctx context.Context, regionNotifier <-chan *cor } buckets = append(buckets, bucket) leaders = append(leaders, region.GetLeader()) - s.history.Record(region) + s.history.record(region) } regions := &pdpb.SyncRegionResponse{ Header: &pdpb.ResponseHeader{ClusterId: s.server.ClusterID()}, @@ -164,7 +164,7 @@ func (s *RegionSyncer) RunServer(ctx context.Context, regionNotifier <-chan *cor case <-ticker.C: alive := &pdpb.SyncRegionResponse{ Header: &pdpb.ResponseHeader{ClusterId: s.server.ClusterID()}, - StartIndex: s.history.GetNextIndex(), + StartIndex: s.history.getNextIndex(), } s.broadcast(alive) } @@ -223,9 +223,9 @@ func (s *RegionSyncer) Sync(ctx context.Context, stream pdpb.PD_SyncRegionsServe func (s *RegionSyncer) syncHistoryRegion(ctx context.Context, request *pdpb.SyncRegionRequest, stream pdpb.PD_SyncRegionsServer) error { startIndex := request.GetStartIndex() name := request.GetMember().GetName() - records := s.history.RecordsFrom(startIndex) + records := s.history.recordsFrom(startIndex) if len(records) == 0 { - if s.history.GetNextIndex() == startIndex { + if s.history.getNextIndex() == startIndex { log.Info("requested server has already in sync with server", zap.String("requested-server", name), zap.String("server", s.server.Name()), zap.Uint64("last-index", startIndex)) // still send a response to follower to show the history region sync. @@ -306,7 +306,7 @@ func (s *RegionSyncer) syncHistoryRegion(ctx context.Context, request *pdpb.Sync log.Info("sync the history regions with server", zap.String("server", name), zap.Uint64("from-index", startIndex), - zap.Uint64("last-index", s.history.GetNextIndex()), + zap.Uint64("last-index", s.history.getNextIndex()), zap.Int("records-length", len(records))) regions := make([]*metapb.Region, len(records)) stats := make([]*pdpb.RegionStat, len(records)) diff --git a/pkg/window/counter.go b/pkg/window/counter.go index 8eaf164b7c0..84325cdc14b 100644 --- a/pkg/window/counter.go +++ b/pkg/window/counter.go @@ -76,34 +76,44 @@ func NewRollingCounter(opts RollingCounterOpts) RollingCounter { } } +// Add adds the given value to the counter. func (r *rollingCounter) Add(val int64) { r.policy.Add(float64(val)) } +// Reduce applies the reduction function to all buckets within the window. func (r *rollingCounter) Reduce(f func(Iterator) float64) float64 { return r.policy.Reduce(f) } +// Avg computes average value within the window. func (r *rollingCounter) Avg() float64 { return r.policy.Reduce(Avg) } +// Min finds the min value within the window. func (r *rollingCounter) Min() float64 { return r.policy.Reduce(Min) } +// Max finds the max value within the window. func (r *rollingCounter) Max() float64 { return r.policy.Reduce(Max) } +// Sum computes sum value within the window. func (r *rollingCounter) Sum() float64 { return r.policy.Reduce(Sum) } +// Value gets the current value. func (r *rollingCounter) Value() int64 { return int64(r.Sum()) } +// Timespan returns passed bucket number since lastAppendTime, +// if it is one bucket duration earlier than the last recorded +// time, it will return the size. func (r *rollingCounter) Timespan() int { r.policy.mu.RLock() defer r.policy.mu.RUnlock() diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index a37874a8461..9ad797e0ae4 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -96,6 +96,7 @@ type evictLeaderSchedulerConfig struct { cluster *core.BasicCluster } +// BuildWithArgs builds the config with the args. func (conf *evictLeaderSchedulerConfig) BuildWithArgs(args []string) error { if len(args) != 1 { return errors.New("should specify the store-id") @@ -115,6 +116,7 @@ func (conf *evictLeaderSchedulerConfig) BuildWithArgs(args []string) error { return nil } +// Clone clones the config. func (conf *evictLeaderSchedulerConfig) Clone() *evictLeaderSchedulerConfig { conf.mu.RLock() defer conf.mu.RUnlock() @@ -123,6 +125,7 @@ func (conf *evictLeaderSchedulerConfig) Clone() *evictLeaderSchedulerConfig { } } +// Persist saves the config. func (conf *evictLeaderSchedulerConfig) Persist() error { name := conf.getScheduleName() conf.mu.RLock() @@ -166,24 +169,29 @@ func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeade } } +// ServeHTTP implements the http.Handler interface. func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.handler.ServeHTTP(w, r) } +// GetName returns the scheduler name. func (*evictLeaderScheduler) GetName() string { return EvictLeaderName } +// GetType returns the scheduler type. func (*evictLeaderScheduler) GetType() string { return EvictLeaderType } +// EncodeConfig serializes the config. func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() return schedulers.EncodeConfig(s.conf) } +// PrepareConfig ensures the scheduler config is valid. func (s *evictLeaderScheduler) PrepareConfig(cluster sche.SchedulerCluster) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() @@ -196,6 +204,7 @@ func (s *evictLeaderScheduler) PrepareConfig(cluster sche.SchedulerCluster) erro return res } +// CleanConfig is used to clean the scheduler config. func (s *evictLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() @@ -204,6 +213,7 @@ func (s *evictLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { } } +// IsScheduleAllowed checks if the scheduler is allowed to schedule. func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { @@ -212,6 +222,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } +// Schedule schedules the evict leader operator. func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { ops := make([]*operator.Operator, 0, len(s.conf.StoreIDWitRanges)) s.conf.mu.RLock() @@ -246,6 +257,7 @@ type evictLeaderHandler struct { config *evictLeaderSchedulerConfig } +// UpdateConfig updates the config. func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { @@ -286,11 +298,13 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusOK, nil) } +// ListConfig lists the config. func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } +// DeleteConfig deletes the config. func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.Request) { idStr := mux.Vars(r)["store_id"] id, err := strconv.ParseUint(idStr, 10, 64) From 45dac53d65c3903618e6a2a741fbd0bec20e3b44 Mon Sep 17 00:00:00 2001 From: Jack Lyu <63168620+JackL9u@users.noreply.github.com> Date: Wed, 31 Jul 2024 16:22:21 +0800 Subject: [PATCH 3/9] server: fix inappropriate log level (#8462) ref tikv/pd#8453 changed log.Error to log.Warn, since it's a warning Signed-off-by: Boyang Lyu Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/mcs/resourcemanager/server/manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/mcs/resourcemanager/server/manager.go b/pkg/mcs/resourcemanager/server/manager.go index 13e46ea0bba..8588c22b9ae 100644 --- a/pkg/mcs/resourcemanager/server/manager.go +++ b/pkg/mcs/resourcemanager/server/manager.go @@ -121,7 +121,7 @@ func (m *Manager) Init(ctx context.Context) error { return err } if err = json.Unmarshal([]byte(v), &m.controllerConfig); err != nil { - log.Error("un-marshall controller config failed, fallback to default", zap.Error(err), zap.String("v", v)) + log.Warn("un-marshall controller config failed, fallback to default", zap.Error(err), zap.String("v", v)) } // re-save the config to make sure the config has been persisted. From 3ce31ef4b3e81cbf77271cc29384454f8b3d0a8a Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Thu, 1 Aug 2024 11:56:51 +0800 Subject: [PATCH 4/9] schedule: move the logic of patrol region from coordinator to checkers (#8428) ref tikv/pd#7963 Signed-off-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/cache/cache_test.go | 10 +- pkg/cache/priority_queue.go | 32 +-- pkg/schedule/checker/checker_controller.go | 214 +++++++++++++++++- pkg/schedule/checker/metrics.go | 17 ++ pkg/schedule/checker/priority_inspector.go | 30 ++- .../checker/priority_inspector_test.go | 8 +- pkg/schedule/checker/rule_checker.go | 28 +-- pkg/schedule/coordinator.go | 188 +-------------- pkg/schedule/core/cluster_informer.go | 4 +- pkg/schedule/metrics.go | 18 -- server/cluster/cluster_test.go | 4 +- server/join/join.go | 2 +- tests/server/api/api_test.go | 2 +- tests/server/join/join_fail/join_fail_test.go | 4 +- 14 files changed, 305 insertions(+), 256 deletions(-) diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index 43e97dfa2b0..75f26cfed33 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -371,23 +371,23 @@ func TestPriorityQueue(t *testing.T) { pq.Remove(uint64(1)) re.Nil(pq.Get(1)) re.Equal(2, pq.Len()) - entry := pq.Peek() + entry := pq.peek() re.Equal(2, entry.Priority) re.Equal(testData[2], entry.Value) // case3 update 3's priority to highest pq.Put(-1, testData[3]) - entry = pq.Peek() + entry = pq.peek() re.Equal(-1, entry.Priority) re.Equal(testData[3], entry.Value) pq.Remove(entry.Value.ID()) - re.Equal(testData[2], pq.Peek().Value) + re.Equal(testData[2], pq.peek().Value) re.Equal(1, pq.Len()) // case4 remove all element pq.Remove(uint64(2)) re.Equal(0, pq.Len()) re.Empty(pq.items) - re.Nil(pq.Peek()) - re.Nil(pq.Tail()) + re.Nil(pq.peek()) + re.Nil(pq.tail()) } diff --git a/pkg/cache/priority_queue.go b/pkg/cache/priority_queue.go index a7ac79090b0..00ccf42c927 100644 --- a/pkg/cache/priority_queue.go +++ b/pkg/cache/priority_queue.go @@ -18,17 +18,17 @@ import ( "github.com/tikv/pd/pkg/btree" ) -// defaultDegree default btree degree, the depth is h Date: Thu, 1 Aug 2024 20:19:19 +0800 Subject: [PATCH 5/9] *: replace part of path with filepath (#8455) ref tikv/pd#8475 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/encryption/master_key_test.go | 13 +++----- pkg/storage/hot_region_storage_test.go | 3 +- pkg/utils/grpcutil/grpcutil_test.go | 16 ++++----- pkg/utils/testutil/testutil.go | 2 +- server/config/config_test.go | 4 +-- server/join/join.go | 6 ++-- server/server_test.go | 2 +- server/testutil.go | 2 +- tests/config.go | 2 +- tests/integrations/client/client_tls_test.go | 24 +++++++------- tests/server/join/join_test.go | 6 ++-- tools/pd-backup/pdbackup/backup_test.go | 3 +- tools/pd-ctl/pdctl/command/global_test.go | 11 ++++--- tools/pd-ctl/tests/config/config_test.go | 6 ++-- tools/pd-ctl/tests/health/health_test.go | 10 +++--- tools/pd-ctl/tests/store/store_test.go | 10 +++--- tools/pd-simulator/simulator/config/config.go | 2 +- tools/pd-ut/coverProfile.go | 4 +-- tools/pd-ut/ut.go | 33 +++++++++---------- 19 files changed, 79 insertions(+), 80 deletions(-) diff --git a/pkg/encryption/master_key_test.go b/pkg/encryption/master_key_test.go index 31962e9e99d..d6d7845284a 100644 --- a/pkg/encryption/master_key_test.go +++ b/pkg/encryption/master_key_test.go @@ -17,6 +17,7 @@ package encryption import ( "encoding/hex" "os" + "path/filepath" "testing" "github.com/pingcap/kvproto/pkg/encryptionpb" @@ -94,8 +95,7 @@ func TestNewFileMasterKeyMissingPath(t *testing.T) { func TestNewFileMasterKeyMissingFile(t *testing.T) { re := require.New(t) - dir := t.TempDir() - path := dir + "/key" + path := filepath.Join(t.TempDir(), "key") config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_File{ File: &encryptionpb.MasterKeyFile{ @@ -109,8 +109,7 @@ func TestNewFileMasterKeyMissingFile(t *testing.T) { func TestNewFileMasterKeyNotHexString(t *testing.T) { re := require.New(t) - dir := t.TempDir() - path := dir + "/key" + path := filepath.Join(t.TempDir(), "key") os.WriteFile(path, []byte("not-a-hex-string"), 0600) config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_File{ @@ -125,8 +124,7 @@ func TestNewFileMasterKeyNotHexString(t *testing.T) { func TestNewFileMasterKeyLengthMismatch(t *testing.T) { re := require.New(t) - dir := t.TempDir() - path := dir + "/key" + path := filepath.Join(t.TempDir(), "key") os.WriteFile(path, []byte("2f07ec61e5a50284f47f2b402a962ec6"), 0600) config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_File{ @@ -142,8 +140,7 @@ func TestNewFileMasterKeyLengthMismatch(t *testing.T) { func TestNewFileMasterKey(t *testing.T) { re := require.New(t) key := "2f07ec61e5a50284f47f2b402a962ec672e500b26cb3aa568bb1531300c74806" // #nosec G101 - dir := t.TempDir() - path := dir + "/key" + path := filepath.Join(t.TempDir(), "key") os.WriteFile(path, []byte(key), 0600) config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_File{ diff --git a/pkg/storage/hot_region_storage_test.go b/pkg/storage/hot_region_storage_test.go index 1486fb8271d..4e98f2059d6 100644 --- a/pkg/storage/hot_region_storage_test.go +++ b/pkg/storage/hot_region_storage_test.go @@ -21,6 +21,7 @@ import ( "math/rand" "os" "path/filepath" + "strings" "testing" "time" @@ -287,7 +288,7 @@ func newTestHotRegionStorage(pullInterval time.Duration, packHotRegionInfo *MockPackHotRegionInfo) ( hotRegionStorage *HotRegionStorage, clear func(), err error) { - writePath := "./tmp" + writePath := strings.Join([]string{".", "tmp"}, string(filepath.Separator)) ctx := context.Background() packHotRegionInfo.pullInterval = pullInterval packHotRegionInfo.reservedDays = reservedDays diff --git a/pkg/utils/grpcutil/grpcutil_test.go b/pkg/utils/grpcutil/grpcutil_test.go index 99cbeae6cde..fbcfe59f02c 100644 --- a/pkg/utils/grpcutil/grpcutil_test.go +++ b/pkg/utils/grpcutil/grpcutil_test.go @@ -4,7 +4,7 @@ import ( "context" "os" "os/exec" - "path" + "path/filepath" "testing" "github.com/pingcap/errors" @@ -14,8 +14,8 @@ import ( ) var ( - certPath = "../../../tests/integrations/client/" - certScript = "cert_opt.sh" + certPath = filepath.Join("..", "..", "..", "tests", "integrations", "client") + string(filepath.Separator) + certScript = filepath.Join("..", "..", "..", "tests", "integrations", "client", "cert_opt.sh") ) func loadTLSContent(re *require.Assertions, caPath, certPath, keyPath string) (caData, certData, keyData []byte) { @@ -30,20 +30,20 @@ func loadTLSContent(re *require.Assertions, caPath, certPath, keyPath string) (c } func TestToTLSConfig(t *testing.T) { - if err := exec.Command(certPath+certScript, "generate", certPath).Run(); err != nil { + if err := exec.Command(certScript, "generate", certPath).Run(); err != nil { t.Fatal(err) } defer func() { - if err := exec.Command(certPath+certScript, "cleanup", certPath).Run(); err != nil { + if err := exec.Command(certScript, "cleanup", certPath).Run(); err != nil { t.Fatal(err) } }() re := require.New(t) tlsConfig := TLSConfig{ - KeyPath: path.Join(certPath, "pd-server-key.pem"), - CertPath: path.Join(certPath, "pd-server.pem"), - CAPath: path.Join(certPath, "ca.pem"), + KeyPath: filepath.Join(certPath, "pd-server-key.pem"), + CertPath: filepath.Join(certPath, "pd-server.pem"), + CAPath: filepath.Join(certPath, "ca.pem"), } // test without bytes _, err := tlsConfig.ToTLSConfig() diff --git a/pkg/utils/testutil/testutil.go b/pkg/utils/testutil/testutil.go index cef952353bc..76f7058738b 100644 --- a/pkg/utils/testutil/testutil.go +++ b/pkg/utils/testutil/testutil.go @@ -94,7 +94,7 @@ func CleanServer(dataDir string) { // InitTempFileLogger initializes the logger and redirects the log output to a temporary file. func InitTempFileLogger(level string) (fname string) { cfg := &log.Config{} - f, _ := os.CreateTemp("/tmp", "pd_tests") + f, _ := os.CreateTemp(os.TempDir(), "pd_tests") fname = f.Name() f.Close() cfg.File.Filename = fname diff --git a/server/config/config_test.go b/server/config/config_test.go index df23241b787..78d6d25b73e 100644 --- a/server/config/config_test.go +++ b/server/config/config_test.go @@ -19,7 +19,7 @@ import ( "fmt" "math" "os" - "path" + "path/filepath" "testing" "time" @@ -123,7 +123,7 @@ func TestValidation(t *testing.T) { cfg := NewConfig() re.NoError(cfg.Adjust(nil, false)) - cfg.Log.File.Filename = path.Join(cfg.DataDir, "test") + cfg.Log.File.Filename = filepath.Join(cfg.DataDir, "test") re.Error(cfg.Validate()) // check schedule config diff --git a/server/join/join.go b/server/join/join.go index 6bf111b9b45..8da90b7201b 100644 --- a/server/join/join.go +++ b/server/join/join.go @@ -17,7 +17,7 @@ package join import ( "fmt" "os" - "path" + "path/filepath" "strings" "time" @@ -90,7 +90,7 @@ func PrepareJoinCluster(cfg *config.Config) error { return errors.New("join self is forbidden") } - filePath := path.Join(cfg.DataDir, "join") + filePath := filepath.Join(cfg.DataDir, "join") // Read the persist join config if _, err := os.Stat(filePath); !os.IsNotExist(err) { s, err := os.ReadFile(filePath) @@ -104,7 +104,7 @@ func PrepareJoinCluster(cfg *config.Config) error { initialCluster := "" // Cases with data directory. - if isDataExist(path.Join(cfg.DataDir, "member")) { + if isDataExist(filepath.Join(cfg.DataDir, "member")) { cfg.InitialCluster = initialCluster cfg.InitialClusterState = embed.ClusterStateFlagExisting return nil diff --git a/server/server_test.go b/server/server_test.go index b2b15962fdc..410afda448d 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -280,7 +280,7 @@ func TestIsPathInDirectory(t *testing.T) { path := filepath.Join(directory, fileName) re.True(isPathInDirectory(path, directory)) - fileName = "../../test" + fileName = filepath.Join("..", "..", "test") path = filepath.Join(directory, fileName) re.False(isPathInDirectory(path, directory)) } diff --git a/server/testutil.go b/server/testutil.go index 5f817d47016..be6b2bbebb0 100644 --- a/server/testutil.go +++ b/server/testutil.go @@ -78,7 +78,7 @@ func NewTestSingleConfig(c *assertutil.Checker) *config.Config { cfg.AdvertiseClientUrls = cfg.ClientUrls cfg.AdvertisePeerUrls = cfg.PeerUrls - cfg.DataDir, _ = os.MkdirTemp("/tmp", "test_pd") + cfg.DataDir, _ = os.MkdirTemp(os.TempDir(), "test_pd") cfg.InitialCluster = fmt.Sprintf("pd=%s", cfg.PeerUrls) cfg.DisableStrictReconfigCheck = true cfg.TickInterval = typeutil.NewDuration(100 * time.Millisecond) diff --git a/tests/config.go b/tests/config.go index 4f653a3dc3c..a162a02009c 100644 --- a/tests/config.go +++ b/tests/config.go @@ -36,7 +36,7 @@ type serverConfig struct { } func newServerConfig(name string, cc *clusterConfig, join bool) *serverConfig { - tempDir, _ := os.MkdirTemp("/tmp", "pd-tests") + tempDir, _ := os.MkdirTemp(os.TempDir(), "pd-tests") return &serverConfig{ Name: name, DataDir: tempDir, diff --git a/tests/integrations/client/client_tls_test.go b/tests/integrations/client/client_tls_test.go index a5f0f5b200d..091fea2a4c8 100644 --- a/tests/integrations/client/client_tls_test.go +++ b/tests/integrations/client/client_tls_test.go @@ -37,25 +37,25 @@ import ( ) var ( - certPath = "./cert" - certExpiredPath = "./cert-expired" - certScript = "./cert_opt.sh" + certPath = strings.Join([]string{".", "cert"}, string(filepath.Separator)) + certExpiredPath = strings.Join([]string{".", "cert-expired"}, string(filepath.Separator)) + certScript = strings.Join([]string{".", "cert_opt.sh"}, string(filepath.Separator)) testTLSInfo = transport.TLSInfo{ - KeyFile: "./cert/pd-server-key.pem", - CertFile: "./cert/pd-server.pem", - TrustedCAFile: "./cert/ca.pem", + KeyFile: strings.Join([]string{".", "cert", "pd-server-key.pem"}, string(filepath.Separator)), + CertFile: strings.Join([]string{".", "cert", "pd-server.pem"}, string(filepath.Separator)), + TrustedCAFile: strings.Join([]string{".", "cert", "ca.pem"}, string(filepath.Separator)), } testClientTLSInfo = transport.TLSInfo{ - KeyFile: "./cert/client-key.pem", - CertFile: "./cert/client.pem", - TrustedCAFile: "./cert/ca.pem", + KeyFile: strings.Join([]string{".", "cert", "client-key.pem"}, string(filepath.Separator)), + CertFile: strings.Join([]string{".", "cert", "client.pem"}, string(filepath.Separator)), + TrustedCAFile: strings.Join([]string{".", "cert", "ca.pem"}, string(filepath.Separator)), } testTLSInfoExpired = transport.TLSInfo{ - KeyFile: "./cert-expired/pd-server-key.pem", - CertFile: "./cert-expired/pd-server.pem", - TrustedCAFile: "./cert-expired/ca.pem", + KeyFile: strings.Join([]string{".", "cert-expired", "pd-server-key.pem"}, string(filepath.Separator)), + CertFile: strings.Join([]string{".", "cert-expired", "pd-server.pem"}, string(filepath.Separator)), + TrustedCAFile: strings.Join([]string{".", "cert-expired", "ca.pem"}, string(filepath.Separator)), } ) diff --git a/tests/server/join/join_test.go b/tests/server/join/join_test.go index a9d2a69c5e4..1eaa59a4e2a 100644 --- a/tests/server/join/join_test.go +++ b/tests/server/join/join_test.go @@ -17,7 +17,7 @@ package join_test import ( "context" "os" - "path" + "path/filepath" "testing" "time" @@ -56,7 +56,7 @@ func TestSimpleJoin(t *testing.T) { re.NoError(err) err = pd2.Run() re.NoError(err) - _, err = os.Stat(path.Join(pd2.GetConfig().DataDir, "join")) + _, err = os.Stat(filepath.Join(pd2.GetConfig().DataDir, "join")) re.False(os.IsNotExist(err)) members, err = etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) @@ -71,7 +71,7 @@ func TestSimpleJoin(t *testing.T) { re.NoError(err) err = pd3.Run() re.NoError(err) - _, err = os.Stat(path.Join(pd3.GetConfig().DataDir, "join")) + _, err = os.Stat(filepath.Join(pd3.GetConfig().DataDir, "join")) re.False(os.IsNotExist(err)) members, err = etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) diff --git a/tools/pd-backup/pdbackup/backup_test.go b/tools/pd-backup/pdbackup/backup_test.go index 0ab9116ddbe..b67873baf8e 100644 --- a/tools/pd-backup/pdbackup/backup_test.go +++ b/tools/pd-backup/pdbackup/backup_test.go @@ -10,6 +10,7 @@ import ( "net/http/httptest" "os" "path" + "path/filepath" "strconv" "testing" "time" @@ -68,7 +69,7 @@ func setupServer() (*httptest.Server, *config.Config) { AdvertiseClientUrls: "example.com:2380", AdvertisePeerUrls: "example.com:2380", Name: "test-svc", - DataDir: "/data", + DataDir: string(filepath.Separator) + "data", ForceNewCluster: true, EnableGRPCGateway: true, InitialCluster: "pd1=http://127.0.0.1:10208", diff --git a/tools/pd-ctl/pdctl/command/global_test.go b/tools/pd-ctl/pdctl/command/global_test.go index 86eb4366d04..0d1cf74ac74 100644 --- a/tools/pd-ctl/pdctl/command/global_test.go +++ b/tools/pd-ctl/pdctl/command/global_test.go @@ -16,6 +16,7 @@ package command import ( "os" "os/exec" + "path/filepath" "testing" "github.com/spf13/cobra" @@ -30,16 +31,16 @@ func TestParseTLSConfig(t *testing.T) { Short: "Placement Driver control", SilenceErrors: true, } - certPath := "../../tests/cert" - rootCmd.Flags().String("cacert", certPath+"/ca.pem", "path of file that contains list of trusted SSL CAs") - rootCmd.Flags().String("cert", certPath+"/client.pem", "path of file that contains X509 certificate in PEM format") - rootCmd.Flags().String("key", certPath+"/client-key.pem", "path of file that contains X509 key in PEM format") + certPath := filepath.Join("..", "..", "tests", "cert") + rootCmd.Flags().String("cacert", filepath.Join(certPath, "ca.pem"), "path of file that contains list of trusted SSL CAs") + rootCmd.Flags().String("cert", filepath.Join(certPath, "client.pem"), "path of file that contains X509 certificate in PEM format") + rootCmd.Flags().String("key", filepath.Join(certPath, "client-key.pem"), "path of file that contains X509 key in PEM format") // generate certs if err := os.Mkdir(certPath, 0755); err != nil { t.Fatal(err) } - certScript := "../../tests/cert_opt.sh" + certScript := filepath.Join("..", "..", "tests", "cert_opt.sh") if err := exec.Command(certScript, "generate", certPath).Run(); err != nil { t.Fatal(err) } diff --git a/tools/pd-ctl/tests/config/config_test.go b/tools/pd-ctl/tests/config/config_test.go index cd77104f01f..2a9f7bb2353 100644 --- a/tools/pd-ctl/tests/config/config_test.go +++ b/tools/pd-ctl/tests/config/config_test.go @@ -357,7 +357,7 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu leaderServer := cluster.GetLeaderServer() pdAddr := leaderServer.GetAddr() - f, _ := os.CreateTemp("/tmp", "pd_tests") + f, _ := os.CreateTemp(os.TempDir(), "pd_tests") fname := f.Name() f.Close() defer os.RemoveAll(fname) @@ -570,7 +570,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) // test show checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) - f, _ := os.CreateTemp("/tmp", "pd_tests") + f, _ := os.CreateTemp(os.TempDir(), "pd_tests") fname := f.Name() f.Close() defer os.RemoveAll(fname) @@ -717,7 +717,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus re.NoError(json.Unmarshal(output, &bundle)) re.Equal(placement.GroupBundle{ID: placement.DefaultGroupID, Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, bundle) - f, err := os.CreateTemp("/tmp", "pd_tests") + f, err := os.CreateTemp(os.TempDir(), "pd_tests") re.NoError(err) fname := f.Name() f.Close() diff --git a/tools/pd-ctl/tests/health/health_test.go b/tools/pd-ctl/tests/health/health_test.go index f1d3c7cfbf1..1d6cf884ccf 100644 --- a/tools/pd-ctl/tests/health/health_test.go +++ b/tools/pd-ctl/tests/health/health_test.go @@ -80,8 +80,8 @@ func TestHealthTLS(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - certPath := "../cert" - certScript := "../cert_opt.sh" + certPath := filepath.Join("..", "cert") + certScript := filepath.Join("..", "cert_opt.sh") // generate certs if err := os.Mkdir(certPath, 0755); err != nil { t.Fatal(err) @@ -143,9 +143,9 @@ func TestHealthTLS(t *testing.T) { pdAddr := tc.GetConfig().GetClientURL() pdAddr = strings.ReplaceAll(pdAddr, "http", "https") args := []string{"-u", pdAddr, "health", - "--cacert=../cert/ca.pem", - "--cert=../cert/client.pem", - "--key=../cert/client-key.pem"} + "--cacert=" + filepath.Join("..", "cert", "ca.pem"), + "--cert=" + filepath.Join("..", "cert", "client.pem"), + "--key=" + filepath.Join("..", "cert", "client-key.pem")} output, err := tests.ExecuteCommand(cmd, args...) re.NoError(err) h := make([]api.Health, len(healths)) diff --git a/tools/pd-ctl/tests/store/store_test.go b/tools/pd-ctl/tests/store/store_test.go index 2e1e7ac9444..ae35839837e 100644 --- a/tools/pd-ctl/tests/store/store_test.go +++ b/tools/pd-ctl/tests/store/store_test.go @@ -595,8 +595,8 @@ func TestStoreTLS(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - certPath := "../cert" - certScript := "../cert_opt.sh" + certPath := filepath.Join("..", "cert") + certScript := filepath.Join("..", "cert_opt.sh") // generate certs if err := os.Mkdir(certPath, 0755); err != nil { t.Fatal(err) @@ -674,9 +674,9 @@ func TestStoreTLS(t *testing.T) { pdAddr = strings.ReplaceAll(pdAddr, "http", "https") // store command args := []string{"-u", pdAddr, "store", - "--cacert=../cert/ca.pem", - "--cert=../cert/client.pem", - "--key=../cert/client-key.pem"} + "--cacert=" + filepath.Join("..", "cert", "ca.pem"), + "--cert=" + filepath.Join("..", "cert", "client.pem"), + "--key=" + filepath.Join("..", "cert", "client-key.pem")} output, err := tests.ExecuteCommand(cmd, args...) re.NoError(err) storesInfo := new(response.StoresInfo) diff --git a/tools/pd-simulator/simulator/config/config.go b/tools/pd-simulator/simulator/config/config.go index 030655bd3dc..4d182a2a03f 100644 --- a/tools/pd-simulator/simulator/config/config.go +++ b/tools/pd-simulator/simulator/config/config.go @@ -98,7 +98,7 @@ func NewSimConfig(serverLogLevel string) *SimConfig { cfg.AdvertiseClientUrls = cfg.ClientUrls cfg.AdvertisePeerUrls = cfg.PeerUrls - cfg.DataDir, _ = os.MkdirTemp("/tmp", "test_pd") + cfg.DataDir, _ = os.MkdirTemp(os.TempDir(), "test_pd") cfg.InitialCluster = fmt.Sprintf("pd=%s", cfg.PeerUrls) cfg.Log.Level = serverLogLevel return &SimConfig{ServerConfig: cfg} diff --git a/tools/pd-ut/coverProfile.go b/tools/pd-ut/coverProfile.go index 0ed1c3f3c61..75410f4b707 100644 --- a/tools/pd-ut/coverProfile.go +++ b/tools/pd-ut/coverProfile.go @@ -18,7 +18,7 @@ import ( "bufio" "fmt" "os" - "path" + "path/filepath" "sort" "golang.org/x/tools/cover" @@ -70,7 +70,7 @@ func collectCoverProfileFile() { } func collectOneCoverProfileFile(result map[string]*cover.Profile, file os.DirEntry) { - f, err := os.Open(path.Join(coverFileTempDir, file.Name())) + f, err := os.Open(filepath.Join(coverFileTempDir, file.Name())) if err != nil { fmt.Println("open temp cover file error:", err) os.Exit(-1) diff --git a/tools/pd-ut/ut.go b/tools/pd-ut/ut.go index dcf0c17c686..e0312272310 100644 --- a/tools/pd-ut/ut.go +++ b/tools/pd-ut/ut.go @@ -25,7 +25,7 @@ import ( "math/rand" "os" "os/exec" - "path" + "path/filepath" "regexp" "runtime" "strconv" @@ -93,7 +93,7 @@ go tool cover --func=xxx` var ( modulePath = "github.com/tikv/pd" - integrationsTestPath = "tests/integrations" + integrationsTestPath = filepath.Join("tests", "integrations") ) var ( @@ -171,8 +171,8 @@ func main() { case "it": // run integration tests if len(os.Args) >= 3 { - modulePath = path.Join(modulePath, integrationsTestPath) - workDir = path.Join(workDir, integrationsTestPath) + modulePath = filepath.Join(modulePath, integrationsTestPath) + workDir = filepath.Join(workDir, integrationsTestPath) switch os.Args[2] { case "run": isSucceed = cmdRun(os.Args[3:]...) @@ -576,7 +576,7 @@ type testResult struct { func (n *numa) runTestCase(pkg string, fn string) testResult { res := testResult{ JUnitTestCase: JUnitTestCase{ - ClassName: path.Join(modulePath, pkg), + ClassName: filepath.Join(modulePath, pkg), Name: fn, }, } @@ -586,7 +586,7 @@ func (n *numa) runTestCase(pkg string, fn string) testResult { var start time.Time for i := 0; i < 3; i++ { cmd := n.testCommand(pkg, fn) - cmd.Dir = path.Join(workDir, pkg) + cmd.Dir = filepath.Join(workDir, pkg) // Combine the test case output, so the run result for failed cases can be displayed. cmd.Stdout = &buf cmd.Stderr = &buf @@ -675,10 +675,10 @@ func (*numa) testCommand(pkg string, fn string) *exec.Cmd { args := make([]string, 0, 10) // let the test run in the verbose mode. args = append(args, "-test.v") - exe := "./" + testFileName(pkg) + exe := strings.Join([]string{".", testFileName(pkg)}, string(filepath.Separator)) if coverProfile != "" { fileName := strings.ReplaceAll(pkg, "/", "_") + "." + fn - tmpFile := path.Join(coverFileTempDir, fileName) + tmpFile := filepath.Join(coverFileTempDir, fileName) args = append(args, "-test.coverprofile", tmpFile) } if strings.Contains(fn, "Suite") { @@ -746,13 +746,13 @@ func buildTestBinaryMulti(pkgs []string) ([]byte, error) { // go test --exec=xprog --tags=tso_function_test,deadlock -vet=off --count=0 $(pkgs) // workPath just like `/pd/tests/integrations` - xprogPath := path.Join(workDir, "bin/xprog") + xprogPath := filepath.Join(workDir, "bin", "xprog") if strings.Contains(workDir, integrationsTestPath) { - xprogPath = path.Join(workDir[:strings.LastIndex(workDir, integrationsTestPath)], "bin/xprog") + xprogPath = filepath.Join(workDir[:strings.LastIndex(workDir, integrationsTestPath)], "bin", "xprog") } packages := make([]string, 0, len(pkgs)) for _, pkg := range pkgs { - packages = append(packages, path.Join(modulePath, pkg)) + packages = append(packages, filepath.Join(modulePath, pkg)) } // We use 2 * parallel for `go build` to make it faster. @@ -799,7 +799,7 @@ func buildTestBinary(pkg string) error { if race { cmd.Args = append(cmd.Args, "-race") } - cmd.Dir = path.Join(workDir, pkg) + cmd.Dir = filepath.Join(workDir, pkg) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { @@ -820,20 +820,19 @@ func testBinaryExist(pkg string) (bool, error) { } func testFileName(pkg string) string { - _, file := path.Split(pkg) + _, file := filepath.Split(pkg) return file + ".test.bin" } func testFileFullPath(pkg string) string { - return path.Join(workDir, pkg, testFileName(pkg)) + return filepath.Join(workDir, pkg, testFileName(pkg)) } func listNewTestCases(pkg string) []string { - exe := "./" + testFileName(pkg) - + exe := strings.Join([]string{".", testFileName(pkg)}, string(filepath.Separator)) // core.test -test.list Test cmd := exec.Command(exe, "-test.list", "Test") - cmd.Dir = path.Join(workDir, pkg) + cmd.Dir = filepath.Join(workDir, pkg) var buf bytes.Buffer cmd.Stdout = &buf err := cmd.Run() From 87ec78819def9b0f31a93db48e9bf2aaed31fb1f Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Thu, 1 Aug 2024 21:01:20 +0800 Subject: [PATCH 6/9] scheduler: GetType() returns types.CheckerSchedulerType directly (#8440) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ref tikv/pd#8379 - GetType() returns types.CheckerSchedulerType directly - keep the compatibility in `PersistOptions` and `PersistConfig“ - wrap `operator.OperatorLimitCounter.WithLabelValues().Inc()` to func `IncOperatorLimitCounter` Signed-off-by: okJiang <819421878@qq.com> Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/mcs/scheduling/server/cluster.go | 4 ++- pkg/mcs/scheduling/server/config/config.go | 10 +++--- pkg/schedule/checker/checker_controller.go | 6 ++-- pkg/schedule/checker/merge_checker.go | 5 +-- pkg/schedule/checker/replica_checker.go | 5 +++ pkg/schedule/checker/rule_checker.go | 5 +++ pkg/schedule/config/config_provider.go | 7 +++-- pkg/schedule/operator/metrics.go | 15 ++++++--- pkg/schedule/schedulers/balance_leader.go | 24 ++------------ pkg/schedule/schedulers/balance_region.go | 27 ++++------------ pkg/schedule/schedulers/balance_witness.go | 22 ++----------- pkg/schedule/schedulers/base_scheduler.go | 19 ++++++++++-- pkg/schedule/schedulers/evict_leader.go | 31 ++++++------------- pkg/schedule/schedulers/evict_slow_store.go | 18 +++-------- .../schedulers/evict_slow_store_test.go | 3 +- pkg/schedule/schedulers/evict_slow_trend.go | 18 +++-------- .../schedulers/evict_slow_trend_test.go | 3 +- pkg/schedule/schedulers/grant_hot_region.go | 21 +++---------- pkg/schedule/schedulers/grant_leader.go | 20 +++--------- pkg/schedule/schedulers/hot_region.go | 13 ++------ pkg/schedule/schedulers/init.go | 4 --- pkg/schedule/schedulers/label.go | 14 ++------- pkg/schedule/schedulers/metrics.go | 2 +- pkg/schedule/schedulers/random_merge.go | 16 +++------- pkg/schedule/schedulers/scatter_range.go | 23 +++++--------- pkg/schedule/schedulers/scheduler.go | 3 +- pkg/schedule/schedulers/shuffle_hot_region.go | 25 ++++----------- pkg/schedule/schedulers/shuffle_leader.go | 13 ++------ pkg/schedule/schedulers/shuffle_region.go | 13 ++------ pkg/schedule/schedulers/split_bucket.go | 15 ++------- .../schedulers/transfer_witness_leader.go | 24 ++++++-------- pkg/schedule/type/type.go | 14 +++++---- plugin/scheduler_example/evict_leader.go | 25 ++++----------- server/config/persist_options.go | 22 +++++++------ server/handler.go | 7 +++-- 35 files changed, 175 insertions(+), 321 deletions(-) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index c86c739f724..955af4b9b4a 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule/scatter" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/schedule/splitter" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" @@ -343,8 +344,9 @@ func (c *Cluster) updateScheduler() { // Remove the deleted schedulers. for _, name := range schedulersController.GetSchedulerNames() { scheduler := schedulersController.GetScheduler(name) + oldType := types.SchedulerTypeCompatibleMap[scheduler.GetType()] if slice.AnyOf(latestSchedulersConfig, func(i int) bool { - return latestSchedulersConfig[i].Type == scheduler.GetType() + return latestSchedulersConfig[i].Type == oldType }) { continue } diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 2111aa3ddcc..ac59de5b97a 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -36,6 +36,7 @@ import ( "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/mcs/utils" sc "github.com/tikv/pd/pkg/schedule/config" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/configutil" @@ -646,10 +647,11 @@ func (o *PersistConfig) SetMaxReplicas(replicas int) { } // IsSchedulerDisabled returns if the scheduler is disabled. -func (o *PersistConfig) IsSchedulerDisabled(t string) bool { +func (o *PersistConfig) IsSchedulerDisabled(tp types.CheckerSchedulerType) bool { + oldType := types.SchedulerTypeCompatibleMap[tp] schedulers := o.GetScheduleConfig().Schedulers for _, s := range schedulers { - if t == s.Type { + if oldType == s.Type { return s.Disable } } @@ -739,11 +741,11 @@ func (o *PersistConfig) IsRaftKV2() bool { // AddSchedulerCfg adds the scheduler configurations. // This method is a no-op since we only use configurations derived from one-way synchronization from API server now. -func (*PersistConfig) AddSchedulerCfg(string, []string) {} +func (*PersistConfig) AddSchedulerCfg(types.CheckerSchedulerType, []string) {} // RemoveSchedulerCfg removes the scheduler configurations. // This method is a no-op since we only use configurations derived from one-way synchronization from API server now. -func (*PersistConfig) RemoveSchedulerCfg(string) {} +func (*PersistConfig) RemoveSchedulerCfg(types.CheckerSchedulerType) {} // CheckLabelProperty checks if the label property is satisfied. func (*PersistConfig) CheckLabelProperty(string, []*metapb.StoreLabel) bool { diff --git a/pkg/schedule/checker/checker_controller.go b/pkg/schedule/checker/checker_controller.go index f772219558b..627408e6c43 100644 --- a/pkg/schedule/checker/checker_controller.go +++ b/pkg/schedule/checker/checker_controller.go @@ -241,7 +241,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { if opController.OperatorCount(operator.OpReplica) < c.conf.GetReplicaScheduleLimit() { return []*operator.Operator{op} } - operator.OperatorLimitCounter.WithLabelValues(c.ruleChecker.Name(), operator.OpReplica.String()).Inc() + operator.IncOperatorLimitCounter(c.ruleChecker.GetType(), operator.OpReplica) c.pendingProcessedRegions.Put(region.GetID(), nil) } } @@ -253,7 +253,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { if opController.OperatorCount(operator.OpReplica) < c.conf.GetReplicaScheduleLimit() { return []*operator.Operator{op} } - operator.OperatorLimitCounter.WithLabelValues(c.replicaChecker.Name(), operator.OpReplica.String()).Inc() + operator.IncOperatorLimitCounter(c.replicaChecker.GetType(), operator.OpReplica) c.pendingProcessedRegions.Put(region.GetID(), nil) } } @@ -270,7 +270,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { if c.mergeChecker != nil { allowed := opController.OperatorCount(operator.OpMerge) < c.conf.GetMergeScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(c.mergeChecker.GetType(), operator.OpMerge.String()).Inc() + operator.IncOperatorLimitCounter(c.mergeChecker.GetType(), operator.OpMerge) } else if ops := c.mergeChecker.Check(region); ops != nil { // It makes sure that two operators can be added successfully altogether. return ops diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index 1a7548a1084..65189d35c1d 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/utils/logutil" ) @@ -69,8 +70,8 @@ func NewMergeChecker(ctx context.Context, cluster sche.CheckerCluster, conf conf } // GetType return MergeChecker's type -func (*MergeChecker) GetType() string { - return "merge-checker" +func (*MergeChecker) GetType() types.CheckerSchedulerType { + return types.MergeChecker } // RecordRegionSplit put the recently split region into cache. MergeChecker diff --git a/pkg/schedule/checker/replica_checker.go b/pkg/schedule/checker/replica_checker.go index f75ffe7e882..6be5432125b 100644 --- a/pkg/schedule/checker/replica_checker.go +++ b/pkg/schedule/checker/replica_checker.go @@ -61,6 +61,11 @@ func (*ReplicaChecker) Name() string { return types.ReplicaChecker.String() } +// GetType return ReplicaChecker's type. +func (*ReplicaChecker) GetType() types.CheckerSchedulerType { + return types.ReplicaChecker +} + // Check verifies a region's replicas, creating an operator.Operator if need. func (r *ReplicaChecker) Check(region *core.RegionInfo) *operator.Operator { replicaCheckerCounter.Inc() diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index a90de0a58d4..a8acb002951 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -75,6 +75,11 @@ func (*RuleChecker) Name() string { return types.RuleChecker.String() } +// GetType returns RuleChecker's type. +func (*RuleChecker) GetType() types.CheckerSchedulerType { + return types.RuleChecker +} + // Check checks if the region matches placement rules and returns Operator to // fix it. func (c *RuleChecker) Check(region *core.RegionInfo) *operator.Operator { diff --git a/pkg/schedule/config/config_provider.go b/pkg/schedule/config/config_provider.go index 90e489f86f3..51ade0edb77 100644 --- a/pkg/schedule/config/config_provider.go +++ b/pkg/schedule/config/config_provider.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" ) @@ -49,9 +50,9 @@ type SchedulerConfigProvider interface { SetSchedulingAllowanceStatus(bool, string) GetStoresLimit() map[uint64]StoreLimitConfig - IsSchedulerDisabled(string) bool - AddSchedulerCfg(string, []string) - RemoveSchedulerCfg(string) + IsSchedulerDisabled(types.CheckerSchedulerType) bool + AddSchedulerCfg(types.CheckerSchedulerType, []string) + RemoveSchedulerCfg(types.CheckerSchedulerType) Persist(endpoint.ConfigStorage) error GetRegionScheduleLimit() uint64 diff --git a/pkg/schedule/operator/metrics.go b/pkg/schedule/operator/metrics.go index 20bb4e6b7ca..74f9ddad0c7 100644 --- a/pkg/schedule/operator/metrics.go +++ b/pkg/schedule/operator/metrics.go @@ -14,7 +14,10 @@ package operator -import "github.com/prometheus/client_golang/prometheus" +import ( + "github.com/prometheus/client_golang/prometheus" + types "github.com/tikv/pd/pkg/schedule/type" +) var ( operatorStepDuration = prometheus.NewHistogramVec( @@ -26,8 +29,7 @@ var ( Buckets: []float64{0.5, 1, 2, 4, 8, 16, 20, 40, 60, 90, 120, 180, 240, 300, 480, 600, 720, 900, 1200, 1800, 3600}, }, []string{"type"}) - // OperatorLimitCounter exposes the counter when meeting limit. - OperatorLimitCounter = prometheus.NewCounterVec( + operatorLimitCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "pd", Subsystem: "schedule", @@ -82,10 +84,15 @@ var ( func init() { prometheus.MustRegister(operatorStepDuration) - prometheus.MustRegister(OperatorLimitCounter) + prometheus.MustRegister(operatorLimitCounter) prometheus.MustRegister(OperatorExceededStoreLimitCounter) prometheus.MustRegister(operatorCounter) prometheus.MustRegister(operatorDuration) prometheus.MustRegister(operatorSizeHist) prometheus.MustRegister(storeLimitCostCounter) } + +// IncOperatorLimitCounter increases the counter of operator meeting limit. +func IncOperatorLimitCounter(typ types.CheckerSchedulerType, kind OpKind) { + operatorLimitCounter.WithLabelValues(typ.String(), kind.String()).Inc() +} diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 899737536e2..6762c8751e4 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -166,7 +166,6 @@ func (handler *balanceLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.R type balanceLeaderScheduler struct { *BaseScheduler *retryQuota - name string conf *balanceLeaderSchedulerConfig handler http.Handler filters []filter.Filter @@ -176,14 +175,11 @@ type balanceLeaderScheduler struct { // newBalanceLeaderScheduler creates a scheduler that tends to keep leaders on // each store balanced. func newBalanceLeaderScheduler(opController *operator.Controller, conf *balanceLeaderSchedulerConfig, options ...BalanceLeaderCreateOption) Scheduler { - base := NewBaseScheduler(opController) s := &balanceLeaderScheduler{ - BaseScheduler: base, + BaseScheduler: NewBaseScheduler(opController, types.BalanceLeaderScheduler), retryQuota: newRetryQuota(), - name: BalanceLeaderName, conf: conf, handler: newBalanceLeaderHandler(conf), - filterCounter: filter.NewCounter(types.BalanceLeaderScheduler.String()), } for _, option := range options { option(s) @@ -192,6 +188,7 @@ func newBalanceLeaderScheduler(opController *operator.Controller, conf *balanceL &filter.StoreStateFilter{ActionScope: s.GetName(), TransferLeader: true, OperatorLevel: constant.High}, filter.NewSpecialUseFilter(s.GetName()), } + s.filterCounter = filter.NewCounter(s.GetName()) return s } @@ -202,13 +199,6 @@ func (l *balanceLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reques // BalanceLeaderCreateOption is used to create a scheduler with an option. type BalanceLeaderCreateOption func(s *balanceLeaderScheduler) -// WithBalanceLeaderFilterCounterName sets the filter counter name for the scheduler. -func WithBalanceLeaderFilterCounterName(name string) BalanceLeaderCreateOption { - return func(s *balanceLeaderScheduler) { - s.filterCounter.SetScope(name) - } -} - // WithBalanceLeaderName sets the name for the scheduler. func WithBalanceLeaderName(name string) BalanceLeaderCreateOption { return func(s *balanceLeaderScheduler) { @@ -216,14 +206,6 @@ func WithBalanceLeaderName(name string) BalanceLeaderCreateOption { } } -func (l *balanceLeaderScheduler) GetName() string { - return l.name -} - -func (*balanceLeaderScheduler) GetType() string { - return BalanceLeaderType -} - func (l *balanceLeaderScheduler) EncodeConfig() ([]byte, error) { l.conf.RLock() defer l.conf.RUnlock() @@ -252,7 +234,7 @@ func (l *balanceLeaderScheduler) ReloadConfig() error { func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := l.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(l.GetType(), operator.OpLeader) } return allowed } diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index b26830155b0..3ef01345aea 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -38,7 +38,6 @@ const ( ) type balanceRegionSchedulerConfig struct { - Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -46,6 +45,7 @@ type balanceRegionSchedulerConfig struct { type balanceRegionScheduler struct { *BaseScheduler *retryQuota + name string conf *balanceRegionSchedulerConfig filters []filter.Filter filterCounter *filter.Counter @@ -54,12 +54,11 @@ type balanceRegionScheduler struct { // newBalanceRegionScheduler creates a scheduler that tends to keep regions on // each store balanced. func newBalanceRegionScheduler(opController *operator.Controller, conf *balanceRegionSchedulerConfig, opts ...BalanceRegionCreateOption) Scheduler { - base := NewBaseScheduler(opController) scheduler := &balanceRegionScheduler{ - BaseScheduler: base, + BaseScheduler: NewBaseScheduler(opController, types.BalanceRegionScheduler), retryQuota: newRetryQuota(), + name: types.BalanceRegionScheduler.String(), conf: conf, - filterCounter: filter.NewCounter(types.BalanceRegionScheduler.String()), } for _, setOption := range opts { setOption(scheduler) @@ -68,6 +67,7 @@ func newBalanceRegionScheduler(opController *operator.Controller, conf *balanceR &filter.StoreStateFilter{ActionScope: scheduler.GetName(), MoveRegion: true, OperatorLevel: constant.Medium}, filter.NewSpecialUseFilter(scheduler.GetName()), } + scheduler.filterCounter = filter.NewCounter(scheduler.GetName()) return scheduler } @@ -77,25 +77,10 @@ type BalanceRegionCreateOption func(s *balanceRegionScheduler) // WithBalanceRegionName sets the name for the scheduler. func WithBalanceRegionName(name string) BalanceRegionCreateOption { return func(s *balanceRegionScheduler) { - s.conf.Name = name + s.name = name } } -// WithBalanceRegionFilterCounterName sets the filter counter name for the scheduler. -func WithBalanceRegionFilterCounterName(name string) BalanceRegionCreateOption { - return func(s *balanceRegionScheduler) { - s.filterCounter.SetScope(name) - } -} - -func (s *balanceRegionScheduler) GetName() string { - return s.conf.Name -} - -func (*balanceRegionScheduler) GetType() string { - return BalanceRegionType -} - func (s *balanceRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -103,7 +88,7 @@ func (s *balanceRegionScheduler) EncodeConfig() ([]byte, error) { func (s *balanceRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpRegion) } return allowed } diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 1c4daa62634..319a0f2493a 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -163,7 +163,6 @@ func (handler *balanceWitnessHandler) ListConfig(w http.ResponseWriter, _ *http. type balanceWitnessScheduler struct { *BaseScheduler *retryQuota - name string conf *balanceWitnessSchedulerConfig handler http.Handler filters []filter.Filter @@ -174,11 +173,9 @@ type balanceWitnessScheduler struct { // newBalanceWitnessScheduler creates a scheduler that tends to keep witnesses on // each store balanced. func newBalanceWitnessScheduler(opController *operator.Controller, conf *balanceWitnessSchedulerConfig, options ...BalanceWitnessCreateOption) Scheduler { - base := NewBaseScheduler(opController) s := &balanceWitnessScheduler{ - BaseScheduler: base, + BaseScheduler: NewBaseScheduler(opController, types.BalanceWitnessScheduler), retryQuota: newRetryQuota(), - name: BalanceWitnessName, conf: conf, handler: newBalanceWitnessHandler(conf), counter: balanceWitnessCounter, @@ -208,21 +205,6 @@ func WithBalanceWitnessCounter(counter *prometheus.CounterVec) BalanceWitnessCre } } -// WithBalanceWitnessName sets the name for the scheduler. -func WithBalanceWitnessName(name string) BalanceWitnessCreateOption { - return func(s *balanceWitnessScheduler) { - s.name = name - } -} - -func (b *balanceWitnessScheduler) GetName() string { - return b.name -} - -func (*balanceWitnessScheduler) GetType() string { - return BalanceWitnessType -} - func (b *balanceWitnessScheduler) EncodeConfig() ([]byte, error) { b.conf.RLock() defer b.conf.RUnlock() @@ -251,7 +233,7 @@ func (b *balanceWitnessScheduler) ReloadConfig() error { func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := b.OpController.OperatorCount(operator.OpWitness) < cluster.GetSchedulerConfig().GetWitnessScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(b.GetType(), operator.OpWitness.String()).Inc() + operator.IncOperatorLimitCounter(b.GetType(), operator.OpWitness) } return allowed } diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index f3772757ad3..6cd02d2b555 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -23,6 +23,7 @@ import ( "github.com/tikv/pd/pkg/errs" sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/utils/typeutil" ) @@ -61,11 +62,14 @@ func intervalGrow(x time.Duration, maxInterval time.Duration, typ intervalGrowth // BaseScheduler is a basic scheduler for all other complex scheduler type BaseScheduler struct { OpController *operator.Controller + + name string + tp types.CheckerSchedulerType } // NewBaseScheduler returns a basic scheduler -func NewBaseScheduler(opController *operator.Controller) *BaseScheduler { - return &BaseScheduler{OpController: opController} +func NewBaseScheduler(opController *operator.Controller, tp types.CheckerSchedulerType) *BaseScheduler { + return &BaseScheduler{OpController: opController, tp: tp} } func (*BaseScheduler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { @@ -97,3 +101,14 @@ func (*BaseScheduler) PrepareConfig(sche.SchedulerCluster) error { return nil } // CleanConfig does some cleanup work about config. func (*BaseScheduler) CleanConfig(sche.SchedulerCluster) {} + +func (s *BaseScheduler) GetName() string { + if len(s.name) == 0 { + return s.tp.String() + } + return s.name +} + +func (s *BaseScheduler) GetType() types.CheckerSchedulerType { + return s.tp +} diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 2adcfbe7e48..3aba9a5d184 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -29,6 +29,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -87,7 +88,6 @@ func (conf *evictLeaderSchedulerConfig) Clone() *evictLeaderSchedulerConfig { } func (conf *evictLeaderSchedulerConfig) persistLocked() error { - name := conf.getSchedulerName() data, err := EncodeConfig(conf) failpoint.Inject("persistFail", func() { err = errors.New("fail to persist") @@ -95,11 +95,7 @@ func (conf *evictLeaderSchedulerConfig) persistLocked() error { if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) -} - -func (*evictLeaderSchedulerConfig) getSchedulerName() string { - return EvictLeaderName + return conf.storage.SaveSchedulerConfig(types.EvictLeaderScheduler.String(), data) } func (conf *evictLeaderSchedulerConfig) getRanges(id uint64) []string { @@ -256,10 +252,9 @@ type evictLeaderScheduler struct { // newEvictLeaderScheduler creates an admin scheduler that transfers all leaders // out of a store. func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController) handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ - BaseScheduler: base, + BaseScheduler: NewBaseScheduler(opController, types.EvictLeaderScheduler), conf: conf, handler: handler, } @@ -274,14 +269,6 @@ func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -func (*evictLeaderScheduler) GetName() string { - return EvictLeaderName -} - -func (*evictLeaderScheduler) GetType() string { - return EvictLeaderType -} - func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return s.conf.encodeConfig() } @@ -301,14 +288,14 @@ func (s *evictLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf), nil + return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf), nil } func uniqueAppendOperator(dst []*operator.Operator, src ...*operator.Operator) []*operator.Operator { @@ -332,11 +319,11 @@ type evictLeaderStoresConf interface { getBatch() int } -func scheduleEvictLeaderBatch(name, typ string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderBatch(name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { var ops []*operator.Operator batchSize := conf.getBatch() for i := 0; i < batchSize; i++ { - once := scheduleEvictLeaderOnce(name, typ, cluster, conf) + once := scheduleEvictLeaderOnce(name, cluster, conf) // no more regions if len(once) == 0 { break @@ -350,7 +337,7 @@ func scheduleEvictLeaderBatch(name, typ string, cluster sche.SchedulerCluster, c return ops } -func scheduleEvictLeaderOnce(name, typ string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderOnce(name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { stores := conf.getStores() ops := make([]*operator.Operator, 0, len(stores)) for _, storeID := range stores { @@ -395,7 +382,7 @@ func scheduleEvictLeaderOnce(name, typ string, cluster sche.SchedulerCluster, co for _, t := range targets { targetIDs = append(targetIDs, t.GetID()) } - op, err := operator.CreateTransferLeaderOperator(typ, cluster, region, target.GetID(), targetIDs, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), targetIDs, operator.OpLeader) if err != nil { log.Debug("fail to create evict leader operator", errs.ZapError(err)) continue diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index c9f10fa610f..721444d1da7 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -26,6 +26,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -72,7 +73,6 @@ func (conf *evictSlowStoreSchedulerConfig) Clone() *evictSlowStoreSchedulerConfi } func (conf *evictSlowStoreSchedulerConfig) persistLocked() error { - name := EvictSlowStoreName data, err := EncodeConfig(conf) failpoint.Inject("persistFail", func() { err = errors.New("fail to persist") @@ -80,7 +80,7 @@ func (conf *evictSlowStoreSchedulerConfig) persistLocked() error { if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) + return conf.storage.SaveSchedulerConfig(types.EvictSlowStoreScheduler.String(), data) } func (conf *evictSlowStoreSchedulerConfig) getStores() []uint64 { @@ -193,14 +193,6 @@ func (s *evictSlowStoreScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reque s.handler.ServeHTTP(w, r) } -func (*evictSlowStoreScheduler) GetName() string { - return EvictSlowStoreName -} - -func (*evictSlowStoreScheduler) GetType() string { - return EvictSlowStoreType -} - func (s *evictSlowStoreScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -267,14 +259,14 @@ func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.SchedulerClust } func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.SchedulerCluster) []*operator.Operator { - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf) + return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf) } func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { if s.conf.evictStore() != 0 { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } @@ -336,7 +328,7 @@ func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, _ bool func newEvictSlowStoreScheduler(opController *operator.Controller, conf *evictSlowStoreSchedulerConfig) Scheduler { handler := newEvictSlowStoreHandler(conf) return &evictSlowStoreScheduler{ - BaseScheduler: NewBaseScheduler(opController), + BaseScheduler: NewBaseScheduler(opController, types.EvictSlowStoreScheduler), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/evict_slow_store_test.go b/pkg/schedule/schedulers/evict_slow_store_test.go index 6ed9764ba7c..440ab85d08e 100644 --- a/pkg/schedule/schedulers/evict_slow_store_test.go +++ b/pkg/schedule/schedulers/evict_slow_store_test.go @@ -25,6 +25,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) @@ -79,7 +80,7 @@ func (suite *evictSlowStoreTestSuite) TestEvictSlowStore() { // Add evict leader scheduler to store 1 ops, _ := suite.es.Schedule(suite.tc, false) operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2}) - re.Equal(EvictSlowStoreType, ops[0].Desc()) + re.Equal(types.EvictSlowStoreScheduler.String(), ops[0].Desc()) // Cannot balance leaders to store 1 ops, _ = suite.bs.Schedule(suite.tc, false) re.Empty(ops) diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index dc2266b5540..d14cec1e06a 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -27,6 +27,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -86,7 +87,6 @@ func (conf *evictSlowTrendSchedulerConfig) Clone() *evictSlowTrendSchedulerConfi } func (conf *evictSlowTrendSchedulerConfig) persistLocked() error { - name := EvictSlowTrendName data, err := EncodeConfig(conf) failpoint.Inject("persistFail", func() { err = errors.New("fail to persist") @@ -94,7 +94,7 @@ func (conf *evictSlowTrendSchedulerConfig) persistLocked() error { if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) + return conf.storage.SaveSchedulerConfig(types.EvictSlowTrendScheduler.String(), data) } func (conf *evictSlowTrendSchedulerConfig) getStores() []uint64 { @@ -295,14 +295,6 @@ func (s *evictSlowTrendScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reque s.handler.ServeHTTP(w, r) } -func (*evictSlowTrendScheduler) GetName() string { - return EvictSlowTrendName -} - -func (*evictSlowTrendScheduler) GetType() string { - return EvictSlowTrendType -} - func (s *evictSlowTrendScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -374,7 +366,7 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.SchedulerClus return nil } storeSlowTrendEvictedStatusGauge.WithLabelValues(store.GetAddress(), strconv.FormatUint(store.GetID(), 10)).Set(1) - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf) + return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf) } func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { @@ -383,7 +375,7 @@ func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste } allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } @@ -459,7 +451,7 @@ func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, _ bool func newEvictSlowTrendScheduler(opController *operator.Controller, conf *evictSlowTrendSchedulerConfig) Scheduler { handler := newEvictSlowTrendHandler(conf) return &evictSlowTrendScheduler{ - BaseScheduler: NewBaseScheduler(opController), + BaseScheduler: NewBaseScheduler(opController, types.EvictSlowTrendScheduler), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/evict_slow_trend_test.go b/pkg/schedule/schedulers/evict_slow_trend_test.go index dd6807f4a85..c01ae4959ba 100644 --- a/pkg/schedule/schedulers/evict_slow_trend_test.go +++ b/pkg/schedule/schedulers/evict_slow_trend_test.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) @@ -155,7 +156,7 @@ func (suite *evictSlowTrendTestSuite) TestEvictSlowTrend() { } ops, _ = suite.es.Schedule(suite.tc, false) operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2, 3}) - re.Equal(EvictSlowTrendType, ops[0].Desc()) + re.Equal(types.EvictSlowTrendScheduler.String(), ops[0].Desc()) re.Zero(es2.conf.candidate()) re.Equal(uint64(1), es2.conf.evictedStore()) // Cannot balance leaders to store 1 diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index a19a4e1bf4b..4289effd7bd 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -30,6 +30,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" @@ -92,18 +93,13 @@ func (conf *grantHotRegionSchedulerConfig) Clone() *grantHotRegionSchedulerConfi } func (conf *grantHotRegionSchedulerConfig) Persist() error { - name := conf.getSchedulerName() conf.RLock() defer conf.RUnlock() data, err := EncodeConfig(conf) if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) -} - -func (*grantHotRegionSchedulerConfig) getSchedulerName() string { - return GrantHotRegionName + return conf.storage.SaveSchedulerConfig(types.GrantHotRegionScheduler.String(), data) } func (conf *grantHotRegionSchedulerConfig) has(storeID uint64) bool { @@ -133,6 +129,7 @@ type grantHotRegionScheduler struct { func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHotRegionSchedulerConfig) *grantHotRegionScheduler { base := newBaseHotScheduler(opController, statistics.DefaultHistorySampleDuration, statistics.DefaultHistorySampleInterval) + base.tp = types.GrantHotRegionScheduler handler := newGrantHotRegionHandler(conf) ret := &grantHotRegionScheduler{ baseHotScheduler: base, @@ -142,14 +139,6 @@ func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHo return ret } -func (*grantHotRegionScheduler) GetName() string { - return GrantHotRegionName -} - -func (*grantHotRegionScheduler) GetType() string { - return GrantHotRegionType -} - func (s *grantHotRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -180,10 +169,10 @@ func (s *grantHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < conf.GetRegionScheduleLimit() leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < conf.GetLeaderScheduleLimit() if !regionAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpRegion) } if !leaderAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return regionAllowed && leaderAllowed } diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 21900fac85d..41e6debaafa 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -28,6 +28,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -82,18 +83,13 @@ func (conf *grantLeaderSchedulerConfig) Clone() *grantLeaderSchedulerConfig { } func (conf *grantLeaderSchedulerConfig) Persist() error { - name := conf.getSchedulerName() conf.RLock() defer conf.RUnlock() data, err := EncodeConfig(conf) if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) -} - -func (*grantLeaderSchedulerConfig) getSchedulerName() string { - return GrantLeaderName + return conf.storage.SaveSchedulerConfig(types.GrantLeaderScheduler.String(), data) } func (conf *grantLeaderSchedulerConfig) getRanges(id uint64) []string { @@ -159,7 +155,7 @@ type grantLeaderScheduler struct { // newGrantLeaderScheduler creates an admin scheduler that transfers all leaders // to a store. func newGrantLeaderScheduler(opController *operator.Controller, conf *grantLeaderSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.GrantLeaderScheduler) handler := newGrantLeaderHandler(conf) return &grantLeaderScheduler{ BaseScheduler: base, @@ -172,14 +168,6 @@ func (s *grantLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -func (*grantLeaderScheduler) GetName() string { - return GrantLeaderName -} - -func (*grantLeaderScheduler) GetType() string { - return GrantLeaderType -} - func (s *grantLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -226,7 +214,7 @@ func (s *grantLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index f79d8fac760..fe9b3964139 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -34,6 +34,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" @@ -92,7 +93,7 @@ type baseHotScheduler struct { } func newBaseHotScheduler(opController *operator.Controller, sampleDuration time.Duration, sampleInterval time.Duration) *baseHotScheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.BalanceHotRegionScheduler) ret := &baseHotScheduler{ BaseScheduler: base, regionPendings: make(map[uint64]*pendingInfluence), @@ -214,14 +215,6 @@ func newHotScheduler(opController *operator.Controller, conf *hotRegionScheduler return ret } -func (h *hotScheduler) GetName() string { - return h.name -} - -func (*hotScheduler) GetType() string { - return HotRegionType -} - func (h *hotScheduler) EncodeConfig() ([]byte, error) { return h.conf.EncodeConfig() } @@ -281,7 +274,7 @@ func (h *hotScheduler) GetNextInterval(time.Duration) time.Duration { func (h *hotScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetSchedulerConfig().GetHotRegionScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(h.GetType(), operator.OpHotRegion.String()).Inc() + operator.IncOperatorLimitCounter(h.GetType(), operator.OpHotRegion) } return allowed } diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index 777c8b3d625..988bbc30475 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -75,7 +75,6 @@ func schedulersRegister() { return err } conf.Ranges = ranges - conf.Name = BalanceRegionName return nil } }) @@ -282,7 +281,6 @@ func schedulersRegister() { return err } conf.Ranges = ranges - conf.Name = LabelName return nil } }) @@ -307,7 +305,6 @@ func schedulersRegister() { return err } conf.Ranges = ranges - conf.Name = RandomMergeName return nil } }) @@ -370,7 +367,6 @@ func schedulersRegister() { } conf.Limit = limit } - conf.Name = ShuffleHotRegionName return nil } }) diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 6b7a98f8d02..814f525a76c 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "go.uber.org/zap" ) @@ -35,7 +36,6 @@ const ( ) type labelSchedulerConfig struct { - Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -50,19 +50,11 @@ type labelScheduler struct { // the store with the specific label. func newLabelScheduler(opController *operator.Controller, conf *labelSchedulerConfig) Scheduler { return &labelScheduler{ - BaseScheduler: NewBaseScheduler(opController), + BaseScheduler: NewBaseScheduler(opController, types.LabelScheduler), conf: conf, } } -func (s *labelScheduler) GetName() string { - return s.conf.Name -} - -func (*labelScheduler) GetType() string { - return LabelType -} - func (s *labelScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -70,7 +62,7 @@ func (s *labelScheduler) EncodeConfig() ([]byte, error) { func (s *labelScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } diff --git a/pkg/schedule/schedulers/metrics.go b/pkg/schedule/schedulers/metrics.go index f8bd2b4d686..42170e43818 100644 --- a/pkg/schedule/schedulers/metrics.go +++ b/pkg/schedule/schedulers/metrics.go @@ -186,7 +186,7 @@ func grantLeaderCounterWithEvent(event string) prometheus.Counter { } func hotRegionCounterWithEvent(event string) prometheus.Counter { - return schedulerCounter.WithLabelValues(types.HotRegionScheduler.String(), event) + return schedulerCounter.WithLabelValues(types.BalanceHotRegionScheduler.String(), event) } func labelCounterWithEvent(event string) prometheus.Counter { diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index ff96afe03eb..2d425746cea 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -26,6 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" ) const ( @@ -36,7 +37,6 @@ const ( ) type randomMergeSchedulerConfig struct { - Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -49,21 +49,13 @@ type randomMergeScheduler struct { // newRandomMergeScheduler creates an admin scheduler that randomly picks two adjacent regions // then merges them. func newRandomMergeScheduler(opController *operator.Controller, conf *randomMergeSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.RandomMergeScheduler) return &randomMergeScheduler{ BaseScheduler: base, conf: conf, } } -func (s *randomMergeScheduler) GetName() string { - return s.conf.Name -} - -func (*randomMergeScheduler) GetType() string { - return RandomMergeType -} - func (s *randomMergeScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -71,7 +63,7 @@ func (s *randomMergeScheduler) EncodeConfig() ([]byte, error) { func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpMerge) < cluster.GetSchedulerConfig().GetMergeScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpMerge.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpMerge) } return allowed } @@ -80,7 +72,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ( randomMergeCounter.Inc() store := filter.NewCandidates(cluster.GetStores()). - FilterSource(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: s.conf.Name, MoveRegion: true, OperatorLevel: constant.Low}). + FilterSource(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true, OperatorLevel: constant.Low}). RandomPick() if store == nil { randomMergeNoSourceStoreCounter.Inc() diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 17c67a154ab..8874eb19cff 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -25,6 +25,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -106,7 +107,6 @@ func (conf *scatterRangeSchedulerConfig) getSchedulerName() string { type scatterRangeScheduler struct { *BaseScheduler - name string config *scatterRangeSchedulerConfig balanceLeader Scheduler balanceRegion Scheduler @@ -115,28 +115,27 @@ type scatterRangeScheduler struct { // newScatterRangeScheduler creates a scheduler that balances the distribution of leaders and regions that in the specified key range. func newScatterRangeScheduler(opController *operator.Controller, config *scatterRangeSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.ScatterRangeScheduler) - name := config.getSchedulerName() handler := newScatterRangeHandler(config) scheduler := &scatterRangeScheduler{ BaseScheduler: base, config: config, handler: handler, - name: name, balanceLeader: newBalanceLeaderScheduler( opController, &balanceLeaderSchedulerConfig{Ranges: []core.KeyRange{core.NewKeyRange("", "")}}, + // the name will not be persisted WithBalanceLeaderName("scatter-range-leader"), - WithBalanceLeaderFilterCounterName("scatter-range-leader"), ), balanceRegion: newBalanceRegionScheduler( opController, &balanceRegionSchedulerConfig{Ranges: []core.KeyRange{core.NewKeyRange("", "")}}, + // the name will not be persisted WithBalanceRegionName("scatter-range-region"), - WithBalanceRegionFilterCounterName("scatter-range-region"), ), } + scheduler.name = config.getSchedulerName() return scheduler } @@ -144,14 +143,6 @@ func (l *scatterRangeScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request l.handler.ServeHTTP(w, r) } -func (l *scatterRangeScheduler) GetName() string { - return l.name -} - -func (*scatterRangeScheduler) GetType() string { - return ScatterRangeType -} - func (l *scatterRangeScheduler) EncodeConfig() ([]byte, error) { l.config.RLock() defer l.config.RUnlock() @@ -185,7 +176,7 @@ func (l *scatterRangeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) func (l *scatterRangeScheduler) allowBalanceLeader(cluster sche.SchedulerCluster) bool { allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(l.GetType(), operator.OpLeader) } return allowed } @@ -193,7 +184,7 @@ func (l *scatterRangeScheduler) allowBalanceLeader(cluster sche.SchedulerCluster func (l *scatterRangeScheduler) allowBalanceRegion(cluster sche.SchedulerCluster) bool { allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(l.GetType(), operator.OpRegion) } return allowed } diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index abace59a266..894544d9617 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -27,6 +27,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "go.uber.org/zap" ) @@ -36,7 +37,7 @@ type Scheduler interface { http.Handler GetName() string // GetType should in accordance with the name passing to RegisterScheduler() - GetType() string + GetType() types.CheckerSchedulerType EncodeConfig() ([]byte, error) // ReloadConfig reloads the config from the storage. ReloadConfig() error diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index f4b566c56a4..32384a19df1 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -26,6 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" @@ -44,30 +45,23 @@ const ( type shuffleHotRegionSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage - Name string `json:"name"` Limit uint64 `json:"limit"` } -func (conf *shuffleHotRegionSchedulerConfig) getSchedulerName() string { - return conf.Name -} - func (conf *shuffleHotRegionSchedulerConfig) Clone() *shuffleHotRegionSchedulerConfig { conf.RLock() defer conf.RUnlock() return &shuffleHotRegionSchedulerConfig{ - Name: conf.Name, Limit: conf.Limit, } } func (conf *shuffleHotRegionSchedulerConfig) persistLocked() error { - name := conf.getSchedulerName() data, err := EncodeConfig(conf) if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) + return conf.storage.SaveSchedulerConfig(types.ShuffleHotRegionScheduler.String(), data) } func (conf *shuffleHotRegionSchedulerConfig) getLimit() uint64 { @@ -90,6 +84,7 @@ type shuffleHotRegionScheduler struct { func newShuffleHotRegionScheduler(opController *operator.Controller, conf *shuffleHotRegionSchedulerConfig) Scheduler { base := newBaseHotScheduler(opController, statistics.DefaultHistorySampleDuration, statistics.DefaultHistorySampleInterval) + base.tp = types.ShuffleHotRegionScheduler handler := newShuffleHotRegionHandler(conf) ret := &shuffleHotRegionScheduler{ baseHotScheduler: base, @@ -103,14 +98,6 @@ func (s *shuffleHotRegionScheduler) ServeHTTP(w http.ResponseWriter, r *http.Req s.handler.ServeHTTP(w, r) } -func (s *shuffleHotRegionScheduler) GetName() string { - return s.conf.Name -} - -func (*shuffleHotRegionScheduler) GetType() string { - return ShuffleHotRegionType -} - func (s *shuffleHotRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -139,13 +126,13 @@ func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerClus regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < conf.GetRegionScheduleLimit() leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < conf.GetLeaderScheduleLimit() if !hotRegionAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpHotRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpHotRegion) } if !regionAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpRegion) } if !leaderAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return hotRegionAllowed && regionAllowed && leaderAllowed } diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 17b5fae6448..ce2c8cd31d5 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -23,6 +23,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" ) const ( @@ -51,7 +52,7 @@ func newShuffleLeaderScheduler(opController *operator.Controller, conf *shuffleL &filter.StoreStateFilter{ActionScope: conf.Name, TransferLeader: true, OperatorLevel: constant.Low}, filter.NewSpecialUseFilter(conf.Name), } - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.ShuffleLeaderScheduler) return &shuffleLeaderScheduler{ BaseScheduler: base, conf: conf, @@ -59,14 +60,6 @@ func newShuffleLeaderScheduler(opController *operator.Controller, conf *shuffleL } } -func (s *shuffleLeaderScheduler) GetName() string { - return s.conf.Name -} - -func (*shuffleLeaderScheduler) GetType() string { - return ShuffleLeaderType -} - func (s *shuffleLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -74,7 +67,7 @@ func (s *shuffleLeaderScheduler) EncodeConfig() ([]byte, error) { func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index 57f6c618962..b59e97b2a11 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" ) const ( @@ -46,7 +47,7 @@ func newShuffleRegionScheduler(opController *operator.Controller, conf *shuffleR &filter.StoreStateFilter{ActionScope: ShuffleRegionName, MoveRegion: true, OperatorLevel: constant.Low}, filter.NewSpecialUseFilter(ShuffleRegionName), } - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.ShuffleRegionScheduler) return &shuffleRegionScheduler{ BaseScheduler: base, conf: conf, @@ -58,14 +59,6 @@ func (s *shuffleRegionScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reques s.conf.ServeHTTP(w, r) } -func (*shuffleRegionScheduler) GetName() string { - return ShuffleRegionName -} - -func (*shuffleRegionScheduler) GetType() string { - return ShuffleRegionType -} - func (s *shuffleRegionScheduler) EncodeConfig() ([]byte, error) { return s.conf.EncodeConfig() } @@ -92,7 +85,7 @@ func (s *shuffleRegionScheduler) ReloadConfig() error { func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpRegion) } return allowed } diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 7df3ee8f552..2031e232aee 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -28,6 +28,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/reflectutil" @@ -154,7 +155,7 @@ func newSplitBucketHandler(conf *splitBucketSchedulerConfig) http.Handler { } func newSplitBucketScheduler(opController *operator.Controller, conf *splitBucketSchedulerConfig) *splitBucketScheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.SplitBucketScheduler) handler := newSplitBucketHandler(conf) ret := &splitBucketScheduler{ BaseScheduler: base, @@ -164,16 +165,6 @@ func newSplitBucketScheduler(opController *operator.Controller, conf *splitBucke return ret } -// GetName returns the name of the split bucket scheduler. -func (*splitBucketScheduler) GetName() string { - return SplitBucketName -} - -// GetType returns the type of the split bucket scheduler. -func (*splitBucketScheduler) GetType() string { - return SplitBucketType -} - func (s *splitBucketScheduler) ReloadConfig() error { s.conf.Lock() defer s.conf.Unlock() @@ -207,7 +198,7 @@ func (s *splitBucketScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) allowed := s.BaseScheduler.OpController.OperatorCount(operator.OpSplit) < s.conf.getSplitLimit() if !allowed { splitBuckerSplitLimitCounter.Inc() - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpSplit.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpSplit) } return allowed } diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 2050194b9ae..8b6e9c39f1d 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" ) const ( @@ -48,35 +49,27 @@ type transferWitnessLeaderScheduler struct { // newTransferWitnessLeaderScheduler creates an admin scheduler that transfers witness leader of a region. func newTransferWitnessLeaderScheduler(opController *operator.Controller) Scheduler { return &transferWitnessLeaderScheduler{ - BaseScheduler: NewBaseScheduler(opController), + BaseScheduler: NewBaseScheduler(opController, types.TransferWitnessLeaderScheduler), regions: make(chan *core.RegionInfo, transferWitnessLeaderRecvMaxRegionSize), } } -func (*transferWitnessLeaderScheduler) GetName() string { - return TransferWitnessLeaderName -} - -func (*transferWitnessLeaderScheduler) GetType() string { - return TransferWitnessLeaderType -} - func (*transferWitnessLeaderScheduler) IsScheduleAllowed(sche.SchedulerCluster) bool { return true } func (s *transferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { transferWitnessLeaderCounter.Inc() - return s.scheduleTransferWitnessLeaderBatch(s.GetName(), s.GetType(), cluster, transferWitnessLeaderBatchSize), nil + return s.scheduleTransferWitnessLeaderBatch(s.GetName(), cluster, transferWitnessLeaderBatchSize), nil } -func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, typ string, cluster sche.SchedulerCluster, batchSize int) []*operator.Operator { +func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name string, cluster sche.SchedulerCluster, batchSize int) []*operator.Operator { var ops []*operator.Operator batchLoop: for i := 0; i < batchSize; i++ { select { case region := <-s.regions: - op, err := scheduleTransferWitnessLeader(name, typ, cluster, region) + op, err := scheduleTransferWitnessLeader(name, cluster, region) if err != nil { log.Debug("fail to create transfer leader operator", errs.ZapError(err)) continue @@ -93,7 +86,7 @@ batchLoop: return ops } -func scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { +func scheduleTransferWitnessLeader(name string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { var filters []filter.Filter unhealthyPeerStores := make(map[uint64]struct{}) for _, peer := range region.GetDownPeers() { @@ -102,7 +95,8 @@ func scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerClust for _, peer := range region.GetPendingPeers() { unhealthyPeerStores[peer.GetStoreId()] = struct{}{} } - filters = append(filters, filter.NewExcludedFilter(name, nil, unhealthyPeerStores), &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent}) + filters = append(filters, filter.NewExcludedFilter(name, nil, unhealthyPeerStores), + &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent}) candidates := filter.NewCandidates(cluster.GetFollowerStores(region)).FilterTarget(cluster.GetSchedulerConfig(), nil, nil, filters...) // Compatible with old TiKV transfer leader logic. target := candidates.RandomPick() @@ -116,7 +110,7 @@ func scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerClust for _, t := range targets { targetIDs = append(targetIDs, t.GetID()) } - return operator.CreateTransferLeaderOperator(typ, cluster, region, target.GetID(), targetIDs, operator.OpWitnessLeader) + return operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), targetIDs, operator.OpWitnessLeader) } // RecvRegionInfo receives a checked region from coordinator diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index 16910c631fd..1f6211a9783 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -52,8 +52,8 @@ const ( GrantLeaderScheduler CheckerSchedulerType = "grant-leader-scheduler" // GrantHotRegionScheduler is grant hot region scheduler name. GrantHotRegionScheduler CheckerSchedulerType = "grant-hot-region-scheduler" - // HotRegionScheduler is balance hot region scheduler name. - HotRegionScheduler CheckerSchedulerType = "balance-hot-region-scheduler" + // BalanceHotRegionScheduler is balance hot region scheduler name. + BalanceHotRegionScheduler CheckerSchedulerType = "balance-hot-region-scheduler" // RandomMergeScheduler is random merge scheduler name. RandomMergeScheduler CheckerSchedulerType = "random-merge-scheduler" // ScatterRangeScheduler is scatter range scheduler name. @@ -73,8 +73,10 @@ const ( LabelScheduler CheckerSchedulerType = "label-scheduler" ) -// SchedulerTypeCompatibleMap temporarily exists for compatibility. -// TODO: remove it after all components use CheckerSchedulerType. +// SchedulerTypeCompatibleMap exists for compatibility. +// +// It is used in the `PersistOptions` and `PersistConfig`. These two structs +// are persisted in the storage, so we need to keep the compatibility. var SchedulerTypeCompatibleMap = map[CheckerSchedulerType]string{ BalanceLeaderScheduler: "balance-leader", BalanceRegionScheduler: "balance-region", @@ -84,7 +86,7 @@ var SchedulerTypeCompatibleMap = map[CheckerSchedulerType]string{ EvictSlowTrendScheduler: "evict-slow-trend", GrantLeaderScheduler: "grant-leader", GrantHotRegionScheduler: "grant-hot-region", - HotRegionScheduler: "hot-region", + BalanceHotRegionScheduler: "hot-region", RandomMergeScheduler: "random-merge", ScatterRangeScheduler: "scatter-range", ShuffleHotRegionScheduler: "shuffle-hot-region", @@ -105,7 +107,7 @@ var SchedulerStr2Type = map[string]CheckerSchedulerType{ "evict-slow-trend-scheduler": EvictSlowTrendScheduler, "grant-leader-scheduler": GrantLeaderScheduler, "grant-hot-region-scheduler": GrantHotRegionScheduler, - "balance-hot-region-scheduler": HotRegionScheduler, + "balance-hot-region-scheduler": BalanceHotRegionScheduler, "random-merge-scheduler": RandomMergeScheduler, // TODO: update to `scatter-range-scheduler` "scatter-range": ScatterRangeScheduler, diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 9ad797e0ae4..83d4771adc4 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -30,6 +30,7 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/schedulers" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -42,6 +43,8 @@ const ( // EvictLeaderType is evict leader scheduler type. EvictLeaderType = "user-evict-leader" noStoreInSchedulerInfo = "No store in user-evict-leader-scheduler-config" + + UserEvictLeaderScheduler types.CheckerSchedulerType = "user-evict-leader-scheduler" ) func init() { @@ -127,18 +130,13 @@ func (conf *evictLeaderSchedulerConfig) Clone() *evictLeaderSchedulerConfig { // Persist saves the config. func (conf *evictLeaderSchedulerConfig) Persist() error { - name := conf.getScheduleName() conf.mu.RLock() defer conf.mu.RUnlock() data, err := schedulers.EncodeConfig(conf) if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) -} - -func (*evictLeaderSchedulerConfig) getScheduleName() string { - return EvictLeaderName + return conf.storage.SaveSchedulerConfig(EvictLeaderName, data) } func (conf *evictLeaderSchedulerConfig) getRanges(id uint64) []string { @@ -160,7 +158,7 @@ type evictLeaderScheduler struct { // newEvictLeaderScheduler creates an admin scheduler that transfers all leaders // out of a store. func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) schedulers.Scheduler { - base := schedulers.NewBaseScheduler(opController) + base := schedulers.NewBaseScheduler(opController, UserEvictLeaderScheduler) handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ BaseScheduler: base, @@ -174,17 +172,6 @@ func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -// GetName returns the scheduler name. -func (*evictLeaderScheduler) GetName() string { - return EvictLeaderName -} - -// GetType returns the scheduler type. -func (*evictLeaderScheduler) GetType() string { - return EvictLeaderType -} - -// EncodeConfig serializes the config. func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() @@ -217,7 +204,7 @@ func (s *evictLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } diff --git a/server/config/persist_options.go b/server/config/persist_options.go index d8a7d69f783..b6963a6645a 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -33,6 +33,7 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" sc "github.com/tikv/pd/pkg/schedule/config" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/etcdutil" @@ -669,10 +670,11 @@ func (o *PersistOptions) GetSchedulers() sc.SchedulerConfigs { } // IsSchedulerDisabled returns if the scheduler is disabled. -func (o *PersistOptions) IsSchedulerDisabled(t string) bool { +func (o *PersistOptions) IsSchedulerDisabled(tp types.CheckerSchedulerType) bool { + oldType := types.SchedulerTypeCompatibleMap[tp] schedulers := o.GetScheduleConfig().Schedulers for _, s := range schedulers { - if t == s.Type { + if oldType == s.Type { return s.Disable } } @@ -690,33 +692,35 @@ func (o *PersistOptions) GetHotRegionsReservedDays() uint64 { } // AddSchedulerCfg adds the scheduler configurations. -func (o *PersistOptions) AddSchedulerCfg(tp string, args []string) { +func (o *PersistOptions) AddSchedulerCfg(tp types.CheckerSchedulerType, args []string) { + oldType := types.SchedulerTypeCompatibleMap[tp] v := o.GetScheduleConfig().Clone() for i, schedulerCfg := range v.Schedulers { // comparing args is to cover the case that there are schedulers in same type but not with same name // such as two schedulers of type "evict-leader", // one name is "evict-leader-scheduler-1" and the other is "evict-leader-scheduler-2" - if reflect.DeepEqual(schedulerCfg, sc.SchedulerConfig{Type: tp, Args: args, Disable: false}) { + if reflect.DeepEqual(schedulerCfg, sc.SchedulerConfig{Type: oldType, Args: args, Disable: false}) { return } - if reflect.DeepEqual(schedulerCfg, sc.SchedulerConfig{Type: tp, Args: args, Disable: true}) { + if reflect.DeepEqual(schedulerCfg, sc.SchedulerConfig{Type: oldType, Args: args, Disable: true}) { schedulerCfg.Disable = false v.Schedulers[i] = schedulerCfg o.SetScheduleConfig(v) return } } - v.Schedulers = append(v.Schedulers, sc.SchedulerConfig{Type: tp, Args: args, Disable: false}) + v.Schedulers = append(v.Schedulers, sc.SchedulerConfig{Type: oldType, Args: args, Disable: false}) o.SetScheduleConfig(v) } // RemoveSchedulerCfg removes the scheduler configurations. -func (o *PersistOptions) RemoveSchedulerCfg(tp string) { +func (o *PersistOptions) RemoveSchedulerCfg(tp types.CheckerSchedulerType) { + oldType := types.SchedulerTypeCompatibleMap[tp] v := o.GetScheduleConfig().Clone() for i, schedulerCfg := range v.Schedulers { - if tp == schedulerCfg.Type { - if sc.IsDefaultScheduler(tp) { + if oldType == schedulerCfg.Type { + if sc.IsDefaultScheduler(oldType) { schedulerCfg.Disable = true v.Schedulers[i] = schedulerCfg } else { diff --git a/server/handler.go b/server/handler.go index cc924cf9a0b..d36dd6656ae 100644 --- a/server/handler.go +++ b/server/handler.go @@ -186,6 +186,7 @@ func (h *Handler) GetAllRequestHistoryHotRegion(request *HistoryHotRegionsReques // AddScheduler adds a scheduler. func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) error { + // TODO: remove this map in subsequent PRs, because we need use new type in the `CreateScheduler`. name := types.SchedulerTypeCompatibleMap[tp] c, err := h.GetRaftCluster() if err != nil { @@ -208,19 +209,19 @@ func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) er log.Error("can not add scheduler handler", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args), errs.ZapError(err)) return err } - log.Info("add scheduler handler successfully", zap.String("scheduler-name", name), zap.Strings("scheduler-args", args)) + log.Info("add scheduler handler successfully", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args)) } else { if err = c.AddScheduler(s, args...); err != nil { log.Error("can not add scheduler", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args), errs.ZapError(err)) return err } - log.Info("add scheduler successfully", zap.String("scheduler-name", name), zap.Strings("scheduler-args", args)) + log.Info("add scheduler successfully", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args)) } if err = h.opt.Persist(c.GetStorage()); err != nil { log.Error("can not persist scheduler config", errs.ZapError(err)) return err } - log.Info("persist scheduler config successfully", zap.String("scheduler-name", name), zap.Strings("scheduler-args", args)) + log.Info("persist scheduler config successfully", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args)) return nil } From 95645aabe25655af7c23f405841dec117fe080e5 Mon Sep 17 00:00:00 2001 From: Hu# Date: Fri, 2 Aug 2024 16:40:23 +0800 Subject: [PATCH 7/9] pdms: Add the name field to the startup parameters (#8461) ref tikv/pd#7995 Signed-off-by: husharp Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- cmd/pd-server/main.go | 3 +++ pkg/mcs/discovery/discover.go | 8 ++++---- pkg/mcs/discovery/registry_entry.go | 3 +++ pkg/mcs/resourcemanager/server/config.go | 1 + pkg/mcs/resourcemanager/server/server.go | 2 +- pkg/mcs/resourcemanager/server/testutil.go | 4 +++- pkg/mcs/scheduling/server/config/config.go | 1 + pkg/mcs/scheduling/server/server.go | 1 + pkg/mcs/tso/server/config.go | 1 + pkg/mcs/tso/server/server.go | 1 + pkg/mcs/tso/server/testutil.go | 2 ++ tests/testutil.go | 3 +++ 12 files changed, 24 insertions(+), 6 deletions(-) diff --git a/cmd/pd-server/main.go b/cmd/pd-server/main.go index 553b93ed0ef..459b18605e9 100644 --- a/cmd/pd-server/main.go +++ b/cmd/pd-server/main.go @@ -94,6 +94,7 @@ func NewTSOServiceCommand() *cobra.Command { Short: "Run the TSO service", Run: tso.CreateServerWrapper, } + cmd.Flags().StringP("name", "", "", "human-readable name for this tso member") cmd.Flags().BoolP("version", "V", false, "print version information and exit") cmd.Flags().StringP("config", "", "", "config file") cmd.Flags().StringP("backend-endpoints", "", "", "url for etcd client") @@ -114,6 +115,7 @@ func NewSchedulingServiceCommand() *cobra.Command { Short: "Run the scheduling service", Run: scheduling.CreateServerWrapper, } + cmd.Flags().StringP("name", "", "", "human-readable name for this scheduling member") cmd.Flags().BoolP("version", "V", false, "print version information and exit") cmd.Flags().StringP("config", "", "", "config file") cmd.Flags().StringP("backend-endpoints", "", "", "url for etcd client") @@ -134,6 +136,7 @@ func NewResourceManagerServiceCommand() *cobra.Command { Short: "Run the resource manager service", Run: resource_manager.CreateServerWrapper, } + cmd.Flags().StringP("name", "", "", "human-readable name for this resource manager member") cmd.Flags().BoolP("version", "V", false, "print version information and exit") cmd.Flags().StringP("config", "", "", "config file") cmd.Flags().StringP("backend-endpoints", "", "", "url for etcd client") diff --git a/pkg/mcs/discovery/discover.go b/pkg/mcs/discovery/discover.go index 1ce5ecda51d..3e1d678cffb 100644 --- a/pkg/mcs/discovery/discover.go +++ b/pkg/mcs/discovery/discover.go @@ -45,14 +45,14 @@ func Discover(cli *clientv3.Client, clusterID, serviceName string) ([]string, er } // GetMSMembers returns all the members of the specified service name. -func GetMSMembers(name string, client *clientv3.Client) ([]ServiceRegistryEntry, error) { - switch name { +func GetMSMembers(serviceName string, client *clientv3.Client) ([]ServiceRegistryEntry, error) { + switch serviceName { case utils.TSOServiceName, utils.SchedulingServiceName, utils.ResourceManagerServiceName: clusterID, err := etcdutil.GetClusterID(client, utils.ClusterIDPath) if err != nil { return nil, err } - servicePath := ServicePath(strconv.FormatUint(clusterID, 10), name) + servicePath := ServicePath(strconv.FormatUint(clusterID, 10), serviceName) resps, err := kv.NewSlowLogTxn(client).Then(clientv3.OpGet(servicePath, clientv3.WithPrefix())).Commit() if err != nil { return nil, errs.ErrEtcdKVGet.Wrap(err).GenWithStackByCause() @@ -75,5 +75,5 @@ func GetMSMembers(name string, client *clientv3.Client) ([]ServiceRegistryEntry, return entries, nil } - return nil, errors.Errorf("unknown service name %s", name) + return nil, errors.Errorf("unknown service name %s", serviceName) } diff --git a/pkg/mcs/discovery/registry_entry.go b/pkg/mcs/discovery/registry_entry.go index bf11ae5c8a4..db4ac44a2cc 100644 --- a/pkg/mcs/discovery/registry_entry.go +++ b/pkg/mcs/discovery/registry_entry.go @@ -23,6 +23,9 @@ import ( // ServiceRegistryEntry is the registry entry of a service type ServiceRegistryEntry struct { + // The specific value will be assigned only if the startup parameter is added. + // If not assigned, the default value(service-hostname) will be used. + Name string `json:"name"` ServiceAddr string `json:"service-addr"` Version string `json:"version"` GitHash string `json:"git-hash"` diff --git a/pkg/mcs/resourcemanager/server/config.go b/pkg/mcs/resourcemanager/server/config.go index 2ccdfb05cc4..03fc6718926 100644 --- a/pkg/mcs/resourcemanager/server/config.go +++ b/pkg/mcs/resourcemanager/server/config.go @@ -202,6 +202,7 @@ func (c *Config) Parse(flagSet *pflag.FlagSet) error { } // Ignore the error check here + configutil.AdjustCommandLineString(flagSet, &c.Name, "name") configutil.AdjustCommandLineString(flagSet, &c.Log.Level, "log-level") configutil.AdjustCommandLineString(flagSet, &c.Log.File.Filename, "log-file") configutil.AdjustCommandLineString(flagSet, &c.Metric.PushAddress, "metrics-addr") diff --git a/pkg/mcs/resourcemanager/server/server.go b/pkg/mcs/resourcemanager/server/server.go index 708a11344d4..19317d8202a 100644 --- a/pkg/mcs/resourcemanager/server/server.go +++ b/pkg/mcs/resourcemanager/server/server.go @@ -339,7 +339,7 @@ func (s *Server) startServer() (err error) { s.startServerLoop() // Server has started. - entry := &discovery.ServiceRegistryEntry{ServiceAddr: s.cfg.AdvertiseListenAddr} + entry := &discovery.ServiceRegistryEntry{ServiceAddr: s.cfg.AdvertiseListenAddr, Name: s.Name()} serializedEntry, err := entry.Serialize() if err != nil { return err diff --git a/pkg/mcs/resourcemanager/server/testutil.go b/pkg/mcs/resourcemanager/server/testutil.go index 0277e5e8a8f..3577301258c 100644 --- a/pkg/mcs/resourcemanager/server/testutil.go +++ b/pkg/mcs/resourcemanager/server/testutil.go @@ -49,16 +49,18 @@ func NewTestServer(ctx context.Context, re *require.Assertions, cfg *Config) (*S // GenerateConfig generates a new config with the given options. func GenerateConfig(c *Config) (*Config, error) { arguments := []string{ + "--name=" + c.Name, "--listen-addr=" + c.ListenAddr, "--advertise-listen-addr=" + c.AdvertiseListenAddr, "--backend-endpoints=" + c.BackendEndpoints, } flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + flagSet.StringP("name", "", "", "human-readable name for this resource manager member") flagSet.BoolP("version", "V", false, "print version information and exit") flagSet.StringP("config", "", "", "config file") flagSet.StringP("backend-endpoints", "", "", "url for etcd client") - flagSet.StringP("listen-addr", "", "", "listen address for tso service") + flagSet.StringP("listen-addr", "", "", "listen address for resource manager service") flagSet.StringP("advertise-listen-addr", "", "", "advertise urls for listen address (default '${listen-addr}')") flagSet.StringP("cacert", "", "", "path of file that contains list of trusted TLS CAs") flagSet.StringP("cert", "", "", "path of file that contains X509 certificate in PEM format") diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index ac59de5b97a..c1fcad33ace 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -105,6 +105,7 @@ func (c *Config) Parse(flagSet *pflag.FlagSet) error { } // Ignore the error check here + configutil.AdjustCommandLineString(flagSet, &c.Name, "name") configutil.AdjustCommandLineString(flagSet, &c.Log.Level, "log-level") configutil.AdjustCommandLineString(flagSet, &c.Log.File.Filename, "log-file") configutil.AdjustCommandLineString(flagSet, &c.Metric.PushAddress, "metrics-addr") diff --git a/pkg/mcs/scheduling/server/server.go b/pkg/mcs/scheduling/server/server.go index 8eb9e49d964..50936325f45 100644 --- a/pkg/mcs/scheduling/server/server.go +++ b/pkg/mcs/scheduling/server/server.go @@ -427,6 +427,7 @@ func (s *Server) startServer() (err error) { GitHash: versioninfo.PDGitHash, DeployPath: deployPath, StartTimestamp: s.StartTimestamp(), + Name: s.Name(), } uniqueName := s.cfg.GetAdvertiseListenAddr() uniqueID := memberutil.GenerateUniqueID(uniqueName) diff --git a/pkg/mcs/tso/server/config.go b/pkg/mcs/tso/server/config.go index 8cfef98ebaf..82ac777ad06 100644 --- a/pkg/mcs/tso/server/config.go +++ b/pkg/mcs/tso/server/config.go @@ -167,6 +167,7 @@ func (c *Config) Parse(flagSet *pflag.FlagSet) error { } // Ignore the error check here + configutil.AdjustCommandLineString(flagSet, &c.Name, "name") configutil.AdjustCommandLineString(flagSet, &c.Log.Level, "log-level") configutil.AdjustCommandLineString(flagSet, &c.Log.File.Filename, "log-file") configutil.AdjustCommandLineString(flagSet, &c.Metric.PushAddress, "metrics-addr") diff --git a/pkg/mcs/tso/server/server.go b/pkg/mcs/tso/server/server.go index 60ce2917ed5..a120cbc9868 100644 --- a/pkg/mcs/tso/server/server.go +++ b/pkg/mcs/tso/server/server.go @@ -382,6 +382,7 @@ func (s *Server) startServer() (err error) { GitHash: versioninfo.PDGitHash, DeployPath: deployPath, StartTimestamp: s.StartTimestamp(), + Name: s.Name(), } s.keyspaceGroupManager = tso.NewKeyspaceGroupManager( s.serverLoopCtx, s.serviceID, s.GetClient(), s.GetHTTPClient(), s.cfg.AdvertiseListenAddr, diff --git a/pkg/mcs/tso/server/testutil.go b/pkg/mcs/tso/server/testutil.go index cf5d45e7754..5dcfd4759b9 100644 --- a/pkg/mcs/tso/server/testutil.go +++ b/pkg/mcs/tso/server/testutil.go @@ -34,12 +34,14 @@ func MustNewGrpcClient(re *require.Assertions, addr string) (*grpc.ClientConn, t // GenerateConfig generates a new config with the given options. func GenerateConfig(c *Config) (*Config, error) { arguments := []string{ + "--name=" + c.Name, "--listen-addr=" + c.ListenAddr, "--advertise-listen-addr=" + c.AdvertiseListenAddr, "--backend-endpoints=" + c.BackendEndpoints, } flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + flagSet.StringP("name", "", "", "human-readable name for this tso member") flagSet.BoolP("version", "V", false, "print version information and exit") flagSet.StringP("config", "", "", "config file") flagSet.StringP("backend-endpoints", "", "", "url for etcd client") diff --git a/tests/testutil.go b/tests/testutil.go index 2fc87298d07..c895d206c05 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -110,6 +110,7 @@ func StartSingleResourceManagerTestServer(ctx context.Context, re *require.Asser cfg := rm.NewConfig() cfg.BackendEndpoints = backendEndpoints cfg.ListenAddr = listenAddrs + cfg.Name = cfg.ListenAddr cfg, err := rm.GenerateConfig(cfg) re.NoError(err) @@ -127,6 +128,7 @@ func StartSingleTSOTestServerWithoutCheck(ctx context.Context, re *require.Asser cfg := tso.NewConfig() cfg.BackendEndpoints = backendEndpoints cfg.ListenAddr = listenAddrs + cfg.Name = cfg.ListenAddr cfg, err := tso.GenerateConfig(cfg) re.NoError(err) // Setup the logger. @@ -164,6 +166,7 @@ func StartSingleSchedulingTestServer(ctx context.Context, re *require.Assertions cfg := sc.NewConfig() cfg.BackendEndpoints = backendEndpoints cfg.ListenAddr = listenAddrs + cfg.Name = cfg.ListenAddr cfg, err := scheduling.GenerateConfig(cfg) re.NoError(err) From aa85b6c0047b0f2af37c1bd271ee194af1c0dd56 Mon Sep 17 00:00:00 2001 From: lucasliang Date: Fri, 2 Aug 2024 16:47:52 +0800 Subject: [PATCH 8/9] *: enlarge the default value of `max-merge-region-size`. (#8445) close tikv/pd#8484, ref tikv/tikv#17309 This pr is used to enlarge the region size from default value `96MB` to `256MB`, compatible to the requirement of the stability of large cluster. Signed-off-by: lucasliang --- conf/config.toml | 4 ++-- pkg/schedule/config/config.go | 11 +++++++---- tools/pd-ctl/tests/config/config_test.go | 6 ++++-- tools/pd-ctl/tests/region/region_test.go | 2 +- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/conf/config.toml b/conf/config.toml index 438d2c857a5..0c4acf5fd8c 100644 --- a/conf/config.toml +++ b/conf/config.toml @@ -111,9 +111,9 @@ [schedule] ## Controls the size limit of Region Merge. -# max-merge-region-size = 20 +# max-merge-region-size = 54 ## Specifies the upper limit of the Region Merge key. -# max-merge-region-keys = 200000 +# max-merge-region-keys = 540000 ## Controls the time interval between the split and merge operations on the same Region. # split-merge-interval = "1h" ## When PD fails to receive the heartbeat from a store after the specified period of time, diff --git a/pkg/schedule/config/config.go b/pkg/schedule/config/config.go index 5a67a547483..d35f7ac6383 100644 --- a/pkg/schedule/config/config.go +++ b/pkg/schedule/config/config.go @@ -27,10 +27,13 @@ import ( const ( // DefaultMaxReplicas is the default number of replicas for each region. - DefaultMaxReplicas = 3 - defaultMaxSnapshotCount = 64 - defaultMaxPendingPeerCount = 64 - defaultMaxMergeRegionSize = 20 + DefaultMaxReplicas = 3 + defaultMaxSnapshotCount = 64 + defaultMaxPendingPeerCount = 64 + // defaultMaxMergeRegionSize is the default maximum size of region when regions can be merged. + // After https://github.com/tikv/tikv/issues/17309, the default value is enlarged from 20 to 54, + // to make it compatible with the default value of region size of tikv. + defaultMaxMergeRegionSize = 54 defaultLeaderScheduleLimit = 4 defaultRegionScheduleLimit = 2048 defaultWitnessScheduleLimit = 4 diff --git a/tools/pd-ctl/tests/config/config_test.go b/tools/pd-ctl/tests/config/config_test.go index 2a9f7bb2353..f3c261e1f49 100644 --- a/tools/pd-ctl/tests/config/config_test.go +++ b/tools/pd-ctl/tests/config/config_test.go @@ -181,9 +181,11 @@ func (suite *configTestSuite) checkConfig(cluster *pdTests.TestCluster) { scheduleConfig.MaxMergeRegionKeys = scheduleConfig.GetMaxMergeRegionKeys() re.Equal(scheduleConfig, &scheduleCfg) - re.Equal(20, int(svr.GetScheduleConfig().MaxMergeRegionSize)) + // After https://github.com/tikv/tikv/issues/17309, the default value is enlarged from 20 to 54, + // to make it compatible with the default value of region size of tikv. + re.Equal(54, int(svr.GetScheduleConfig().MaxMergeRegionSize)) re.Equal(0, int(svr.GetScheduleConfig().MaxMergeRegionKeys)) - re.Equal(20*10000, int(svr.GetScheduleConfig().GetMaxMergeRegionKeys())) + re.Equal(54*10000, int(svr.GetScheduleConfig().GetMaxMergeRegionKeys())) // set max-merge-region-size to 40MB args = []string{"-u", pdAddr, "config", "set", "max-merge-region-size", "40"} diff --git a/tools/pd-ctl/tests/region/region_test.go b/tools/pd-ctl/tests/region/region_test.go index afffba411bc..49f1eaa0a58 100644 --- a/tools/pd-ctl/tests/region/region_test.go +++ b/tools/pd-ctl/tests/region/region_test.go @@ -142,7 +142,7 @@ func TestRegion(t *testing.T) { // region check empty-region command {[]string{"region", "check", "empty-region"}, []*core.RegionInfo{r1}}, // region check undersized-region command - {[]string{"region", "check", "undersized-region"}, []*core.RegionInfo{r1, r4}}, + {[]string{"region", "check", "undersized-region"}, []*core.RegionInfo{r1, r3, r4}}, // region check oversized-region command {[]string{"region", "check", "oversized-region"}, []*core.RegionInfo{r2}}, // region keys --format=raw command From 1335ff93a407012eaf37f32d6657ed5b63bc308a Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Mon, 5 Aug 2024 12:42:39 +0800 Subject: [PATCH 9/9] *: add some comments to exported function, part 2 of enable revive.exported (#8481) ref tikv/pd#8458 Signed-off-by: okJiang <819421878@qq.com> Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/core/region.go | 5 +- pkg/core/region_tree.go | 4 +- pkg/core/region_tree_test.go | 14 +- pkg/mcs/scheduling/server/grpc_service.go | 6 +- pkg/ratelimit/runner.go | 1 + pkg/schedule/filter/filters.go | 20 +-- pkg/schedule/filter/region_filters.go | 2 + pkg/schedule/operator/builder.go | 65 ++++----- pkg/schedule/operator/create_operator.go | 6 +- pkg/schedule/operator/operator_controller.go | 14 +- .../operator/operator_controller_test.go | 16 +-- pkg/schedule/operator/operator_queue.go | 6 +- pkg/schedule/schedulers/balance_leader.go | 38 ++--- pkg/schedule/schedulers/balance_region.go | 31 ++-- pkg/schedule/schedulers/balance_test.go | 4 +- pkg/schedule/schedulers/balance_witness.go | 39 ++--- pkg/schedule/schedulers/base_scheduler.go | 2 + pkg/schedule/schedulers/evict_leader.go | 19 ++- pkg/schedule/schedulers/evict_slow_store.go | 19 ++- pkg/schedule/schedulers/evict_slow_trend.go | 15 +- pkg/schedule/schedulers/grant_hot_region.go | 24 ++-- pkg/schedule/schedulers/grant_leader.go | 29 ++-- pkg/schedule/schedulers/hot_region.go | 67 +++++---- pkg/schedule/schedulers/hot_region_config.go | 65 +++++---- pkg/schedule/schedulers/hot_region_rank_v1.go | 4 +- .../schedulers/hot_region_rank_v2_test.go | 54 +++---- pkg/schedule/schedulers/hot_region_test.go | 136 +++++++++--------- pkg/schedule/schedulers/init.go | 2 +- pkg/schedule/schedulers/label.go | 3 + pkg/schedule/schedulers/random_merge.go | 3 + pkg/schedule/schedulers/scatter_range.go | 45 +++--- pkg/schedule/schedulers/shuffle_hot_region.go | 13 +- pkg/schedule/schedulers/shuffle_leader.go | 3 + pkg/schedule/schedulers/shuffle_region.go | 15 +- .../schedulers/shuffle_region_config.go | 11 +- pkg/schedule/schedulers/split_bucket.go | 6 +- .../schedulers/transfer_witness_leader.go | 2 + pkg/schedule/schedulers/utils.go | 28 ++-- pkg/schedule/schedulers/utils_test.go | 30 ++-- plugin/scheduler_example/evict_leader.go | 21 ++- server/forward.go | 4 +- server/grpc_service.go | 6 +- server/handler.go | 2 + 43 files changed, 493 insertions(+), 406 deletions(-) diff --git a/pkg/core/region.go b/pkg/core/region.go index 244fef836f8..9768a258889 100644 --- a/pkg/core/region.go +++ b/pkg/core/region.go @@ -45,7 +45,8 @@ import ( const ( randomRegionMaxRetry = 10 scanRegionLimit = 1000 - CollectFactor = 0.9 + // CollectFactor is the factor to collect the count of region. + CollectFactor = 0.9 ) // errRegionIsStale is error info for region is stale. @@ -721,7 +722,7 @@ func (r *RegionInfo) isRegionRecreated() bool { return r.GetRegionEpoch().GetVersion() == 1 && r.GetRegionEpoch().GetConfVer() == 1 && (len(r.GetStartKey()) != 0 || len(r.GetEndKey()) != 0) } -func (r *RegionInfo) Contains(key []byte) bool { +func (r *RegionInfo) contain(key []byte) bool { start, end := r.GetStartKey(), r.GetEndKey() return bytes.Compare(key, start) >= 0 && (len(end) == 0 || bytes.Compare(key, end) < 0) } diff --git a/pkg/core/region_tree.go b/pkg/core/region_tree.go index 0be207d515d..12e2c5c8878 100644 --- a/pkg/core/region_tree.go +++ b/pkg/core/region_tree.go @@ -261,7 +261,7 @@ func (t *regionTree) find(item *regionItem) *regionItem { return false }) - if result == nil || !result.Contains(item.GetStartKey()) { + if result == nil || !result.contain(item.GetStartKey()) { return nil } @@ -370,7 +370,7 @@ func (t *regionTree) RandomRegions(n int, ranges []KeyRange) []*RegionInfo { // we need to check if the previous item contains the key. if startIndex != 0 && startItem == nil { region = t.tree.GetAt(startIndex - 1).RegionInfo - if region.Contains(startKey) { + if region.contain(startKey) { startIndex-- } } diff --git a/pkg/core/region_tree_test.go b/pkg/core/region_tree_test.go index 2726b4fdab5..a2b1bfab7a7 100644 --- a/pkg/core/region_tree_test.go +++ b/pkg/core/region_tree_test.go @@ -102,15 +102,15 @@ func TestRegionItem(t *testing.T) { re.False(item.Less(newRegionItem([]byte("b"), []byte{}))) re.True(item.Less(newRegionItem([]byte("c"), []byte{}))) - re.False(item.Contains([]byte("a"))) - re.True(item.Contains([]byte("b"))) - re.True(item.Contains([]byte("c"))) + re.False(item.contain([]byte("a"))) + re.True(item.contain([]byte("b"))) + re.True(item.contain([]byte("c"))) item = newRegionItem([]byte("b"), []byte("d")) - re.False(item.Contains([]byte("a"))) - re.True(item.Contains([]byte("b"))) - re.True(item.Contains([]byte("c"))) - re.False(item.Contains([]byte("d"))) + re.False(item.contain([]byte("a"))) + re.True(item.contain([]byte("b"))) + re.True(item.contain([]byte("c"))) + re.False(item.contain([]byte("d"))) } func newRegionWithStat(start, end string, size, keys int64) *RegionInfo { diff --git a/pkg/mcs/scheduling/server/grpc_service.go b/pkg/mcs/scheduling/server/grpc_service.go index 1459ccd3bac..d068aa5c058 100644 --- a/pkg/mcs/scheduling/server/grpc_service.go +++ b/pkg/mcs/scheduling/server/grpc_service.go @@ -51,6 +51,7 @@ var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { type dummyRestService struct{} +// ServeHTTP implements the http.Handler interface. func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) @@ -83,6 +84,7 @@ type heartbeatServer struct { closed int32 } +// Send implements the HeartbeatStream interface. func (s *heartbeatServer) Send(m core.RegionHeartbeatResponse) error { if atomic.LoadInt32(&s.closed) == 1 { return io.EOF @@ -106,7 +108,7 @@ func (s *heartbeatServer) Send(m core.RegionHeartbeatResponse) error { } } -func (s *heartbeatServer) Recv() (*schedulingpb.RegionHeartbeatRequest, error) { +func (s *heartbeatServer) recv() (*schedulingpb.RegionHeartbeatRequest, error) { if atomic.LoadInt32(&s.closed) == 1 { return nil, io.EOF } @@ -133,7 +135,7 @@ func (s *Service) RegionHeartbeat(stream schedulingpb.Scheduling_RegionHeartbeat }() for { - request, err := server.Recv() + request, err := server.recv() if err == io.EOF { return nil } diff --git a/pkg/ratelimit/runner.go b/pkg/ratelimit/runner.go index 4b1b51f1768..1d65ff6a568 100644 --- a/pkg/ratelimit/runner.go +++ b/pkg/ratelimit/runner.go @@ -65,6 +65,7 @@ type taskID struct { name string } +// ConcurrentRunner is a task runner that limits the number of concurrent tasks. type ConcurrentRunner struct { ctx context.Context cancel context.CancelFunc diff --git a/pkg/schedule/filter/filters.go b/pkg/schedule/filter/filters.go index 6c5dd748d17..e2846e6c9a6 100644 --- a/pkg/schedule/filter/filters.go +++ b/pkg/schedule/filter/filters.go @@ -65,7 +65,7 @@ func SelectUnavailableTargetStores(stores []*core.StoreInfo, filters []Filter, c cfilter, ok := filters[i].(comparingFilter) sourceID := uint64(0) if ok { - sourceID = cfilter.GetSourceStoreID() + sourceID = cfilter.getSourceStoreID() } if counter != nil { counter.inc(target, filters[i].Type(), sourceID, s.GetID()) @@ -99,7 +99,7 @@ func SelectTargetStores(stores []*core.StoreInfo, filters []Filter, conf config. cfilter, ok := filter.(comparingFilter) sourceID := uint64(0) if ok { - sourceID = cfilter.GetSourceStoreID() + sourceID = cfilter.getSourceStoreID() } if counter != nil { counter.inc(target, filter.Type(), sourceID, s.GetID()) @@ -141,8 +141,8 @@ type Filter interface { // comparingFilter is an interface to filter target store by comparing source and target stores type comparingFilter interface { Filter - // GetSourceStoreID returns the source store when comparing. - GetSourceStoreID() uint64 + // getSourceStoreID returns the source store when comparing. + getSourceStoreID() uint64 } // Target checks if store can pass all Filters as target store. @@ -156,7 +156,7 @@ func Target(conf config.SharedConfigProvider, store *core.StoreInfo, filters []F targetID := storeID sourceID := "" if ok { - sourceID = strconv.FormatUint(cfilter.GetSourceStoreID(), 10) + sourceID = strconv.FormatUint(cfilter.getSourceStoreID(), 10) } filterCounter.WithLabelValues(target.String(), filter.Scope(), filter.Type().String(), sourceID, targetID).Inc() } @@ -319,8 +319,8 @@ func (f *distinctScoreFilter) Target(_ config.SharedConfigProvider, store *core. return statusStoreNotMatchIsolation } -// GetSourceStoreID implements the ComparingFilter -func (f *distinctScoreFilter) GetSourceStoreID() uint64 { +// getSourceStoreID implements the ComparingFilter +func (f *distinctScoreFilter) getSourceStoreID() uint64 { return f.srcStore } @@ -669,8 +669,8 @@ func (f *ruleFitFilter) Target(_ config.SharedConfigProvider, store *core.StoreI return statusStoreNotMatchRule } -// GetSourceStoreID implements the ComparingFilter -func (f *ruleFitFilter) GetSourceStoreID() uint64 { +// getSourceStoreID implements the ComparingFilter +func (f *ruleFitFilter) getSourceStoreID() uint64 { return f.srcStore } @@ -730,7 +730,7 @@ func (f *ruleLeaderFitFilter) Target(_ config.SharedConfigProvider, store *core. return statusStoreNotMatchRule } -func (f *ruleLeaderFitFilter) GetSourceStoreID() uint64 { +func (f *ruleLeaderFitFilter) getSourceStoreID() uint64 { return f.srcLeaderStoreID } diff --git a/pkg/schedule/filter/region_filters.go b/pkg/schedule/filter/region_filters.go index e233ec75973..dca15dbf8ed 100644 --- a/pkg/schedule/filter/region_filters.go +++ b/pkg/schedule/filter/region_filters.go @@ -142,6 +142,7 @@ func NewRegionEmptyFilter(cluster sche.SharedCluster) RegionFilter { return ®ionEmptyFilter{cluster: cluster} } +// Select implements the RegionFilter interface. func (f *regionEmptyFilter) Select(region *core.RegionInfo) *plan.Status { if !isEmptyRegionAllowBalance(f.cluster, region) { return statusRegionEmpty @@ -163,6 +164,7 @@ func NewRegionWitnessFilter(storeID uint64) RegionFilter { return ®ionWitnessFilter{storeID: storeID} } +// Select implements the RegionFilter interface. func (f *regionWitnessFilter) Select(region *core.RegionInfo) *plan.Status { if region.GetStoreWitness(f.storeID) != nil { return statusRegionWitnessPeer diff --git a/pkg/schedule/operator/builder.go b/pkg/schedule/operator/builder.go index e28e7de973a..29b8aedf978 100644 --- a/pkg/schedule/operator/builder.go +++ b/pkg/schedule/operator/builder.go @@ -117,15 +117,15 @@ func NewBuilder(desc string, ci sche.SharedCluster, region *core.RegionInfo, opt err = errors.Errorf("cannot build operator for region with nil peer") break } - originPeers.Set(p) + originPeers.set(p) } for _, p := range region.GetPendingPeers() { - unhealthyPeers.Set(p) + unhealthyPeers.set(p) } for _, p := range region.GetDownPeers() { - unhealthyPeers.Set(p.Peer) + unhealthyPeers.set(p.Peer) } // origin leader @@ -158,7 +158,7 @@ func NewBuilder(desc string, ci sche.SharedCluster, region *core.RegionInfo, opt b.originPeers = originPeers b.unhealthyPeers = unhealthyPeers b.originLeaderStoreID = originLeaderStoreID - b.targetPeers = originPeers.Copy() + b.targetPeers = originPeers.copy() b.useJointConsensus = supportConfChangeV2 && b.GetSharedConfig().IsUseJointConsensus() b.err = err return b @@ -177,7 +177,7 @@ func (b *Builder) AddPeer(peer *metapb.Peer) *Builder { } else if old, ok := b.targetPeers[peer.GetStoreId()]; ok { b.err = errors.Errorf("cannot add peer %s: already have peer %s", peer, old) } else { - b.targetPeers.Set(peer) + b.targetPeers.set(peer) } return b } @@ -209,7 +209,7 @@ func (b *Builder) PromoteLearner(storeID uint64) *Builder { } else if _, ok := b.unhealthyPeers[storeID]; ok { b.err = errors.Errorf("cannot promote peer %d: unhealthy", storeID) } else { - b.targetPeers.Set(&metapb.Peer{ + b.targetPeers.set(&metapb.Peer{ Id: peer.GetId(), StoreId: peer.GetStoreId(), Role: metapb.PeerRole_Voter, @@ -229,7 +229,7 @@ func (b *Builder) DemoteVoter(storeID uint64) *Builder { } else if core.IsLearner(peer) { b.err = errors.Errorf("cannot demote voter %d: is already learner", storeID) } else { - b.targetPeers.Set(&metapb.Peer{ + b.targetPeers.set(&metapb.Peer{ Id: peer.GetId(), StoreId: peer.GetStoreId(), Role: metapb.PeerRole_Learner, @@ -249,7 +249,7 @@ func (b *Builder) BecomeWitness(storeID uint64) *Builder { } else if core.IsWitness(peer) { b.err = errors.Errorf("cannot switch peer to witness %d: is already witness", storeID) } else { - b.targetPeers.Set(&metapb.Peer{ + b.targetPeers.set(&metapb.Peer{ Id: peer.GetId(), StoreId: peer.GetStoreId(), Role: peer.GetRole(), @@ -269,7 +269,7 @@ func (b *Builder) BecomeNonWitness(storeID uint64) *Builder { } else if !core.IsWitness(peer) { b.err = errors.Errorf("cannot switch peer to non-witness %d: is already non-witness", storeID) } else { - b.targetPeers.Set(&metapb.Peer{ + b.targetPeers.set(&metapb.Peer{ Id: peer.GetId(), StoreId: peer.GetStoreId(), Role: peer.GetRole(), @@ -335,7 +335,7 @@ func (b *Builder) SetPeers(peers map[uint64]*metapb.Peer) *Builder { b.targetLeaderStoreID = 0 } - b.targetPeers = peersMap(peers).Copy() + b.targetPeers = peersMap(peers).copy() return b } @@ -439,7 +439,7 @@ func (b *Builder) prepareBuild() (string, error) { for _, o := range b.originPeers { n := b.targetPeers[o.GetStoreId()] if n == nil { - b.toRemove.Set(o) + b.toRemove.set(o) continue } @@ -461,25 +461,25 @@ func (b *Builder) prepareBuild() (string, error) { if !core.IsLearner(n) { n.Role = metapb.PeerRole_Learner n.IsWitness = true - b.toPromoteNonWitness.Set(n) + b.toPromoteNonWitness.set(n) } - b.toNonWitness.Set(n) + b.toNonWitness.set(n) } else if !isOriginPeerWitness && isTargetPeerWitness { - b.toWitness.Set(n) + b.toWitness.set(n) } isOriginPeerLearner := core.IsLearner(o) isTargetPeerLearner := core.IsLearner(n) if isOriginPeerLearner && !isTargetPeerLearner { // learner -> voter - b.toPromote.Set(n) + b.toPromote.set(n) } else if !isOriginPeerLearner && isTargetPeerLearner { // voter -> learner if b.useJointConsensus { - b.toDemote.Set(n) + b.toDemote.set(n) } else { - b.toRemove.Set(o) - // the targetPeers loop below will add `b.toAdd.Set(n)` + b.toRemove.set(o) + // the targetPeers loop below will add `b.toAdd.set(n)` } } } @@ -500,8 +500,8 @@ func (b *Builder) prepareBuild() (string, error) { IsWitness: n.GetIsWitness(), } } - // It is a pair with `b.toRemove.Set(o)` when `o != nil`. - b.toAdd.Set(n) + // It is a pair with `b.toRemove.set(o)` when `o != nil`. + b.toAdd.set(n) } } @@ -510,7 +510,7 @@ func (b *Builder) prepareBuild() (string, error) { b.targetLeaderStoreID = 0 } - b.currentPeers, b.currentLeaderStoreID = b.originPeers.Copy(), b.originLeaderStoreID + b.currentPeers, b.currentLeaderStoreID = b.originPeers.copy(), b.originLeaderStoreID if b.targetLeaderStoreID != 0 { targetLeader := b.targetPeers[b.targetLeaderStoreID] @@ -580,7 +580,7 @@ func (b *Builder) buildStepsWithJointConsensus(kind OpKind) (OpKind, error) { Role: metapb.PeerRole_Learner, IsWitness: peer.GetIsWitness(), }) - b.toPromote.Set(peer) + b.toPromote.set(peer) } else { b.execAddPeer(peer) } @@ -596,7 +596,7 @@ func (b *Builder) buildStepsWithJointConsensus(kind OpKind) (OpKind, error) { for _, remove := range b.toRemove.IDs() { peer := b.toRemove[remove] if !core.IsLearner(peer) { - b.toDemote.Set(&metapb.Peer{ + b.toDemote.set(&metapb.Peer{ Id: peer.GetId(), StoreId: peer.GetStoreId(), Role: metapb.PeerRole_Learner, @@ -637,7 +637,7 @@ func (b *Builder) buildStepsWithJointConsensus(kind OpKind) (OpKind, error) { for _, promote := range b.toPromoteNonWitness.IDs() { peer := b.toPromoteNonWitness[promote] peer.IsWitness = false - b.toPromote.Set(peer) + b.toPromote.set(peer) kind |= OpRegion } b.toPromoteNonWitness = newPeersMap() @@ -771,13 +771,13 @@ func (b *Builder) execTransferLeader(targetStoreID uint64, targetStoreIDs []uint func (b *Builder) execPromoteLearner(peer *metapb.Peer) { b.steps = append(b.steps, PromoteLearner{ToStore: peer.GetStoreId(), PeerID: peer.GetId(), IsWitness: peer.GetIsWitness()}) - b.currentPeers.Set(peer) + b.currentPeers.set(peer) delete(b.toPromote, peer.GetStoreId()) } func (b *Builder) execPromoteNonWitness(peer *metapb.Peer) { b.steps = append(b.steps, PromoteLearner{ToStore: peer.GetStoreId(), PeerID: peer.GetId(), IsWitness: false}) - b.currentPeers.Set(peer) + b.currentPeers.set(peer) delete(b.toPromoteNonWitness, peer.GetStoreId()) } @@ -786,7 +786,7 @@ func (b *Builder) execAddPeer(peer *metapb.Peer) { if !core.IsLearner(peer) { b.steps = append(b.steps, PromoteLearner{ToStore: peer.GetStoreId(), PeerID: peer.GetId(), IsWitness: peer.GetIsWitness()}) } - b.currentPeers.Set(peer) + b.currentPeers.set(peer) b.peerAddStep[peer.GetStoreId()] = len(b.steps) delete(b.toAdd, peer.GetStoreId()) } @@ -824,14 +824,14 @@ func (b *Builder) execChangePeerV2(needEnter bool, needTransferLeader bool) { for _, p := range b.toPromote.IDs() { peer := b.toPromote[p] step.PromoteLearners = append(step.PromoteLearners, PromoteLearner{ToStore: peer.GetStoreId(), PeerID: peer.GetId(), IsWitness: peer.GetIsWitness()}) - b.currentPeers.Set(peer) + b.currentPeers.set(peer) } b.toPromote = newPeersMap() for _, d := range b.toDemote.IDs() { peer := b.toDemote[d] step.DemoteVoters = append(step.DemoteVoters, DemoteVoter{ToStore: peer.GetStoreId(), PeerID: peer.GetId(), IsWitness: peer.GetIsWitness()}) - b.currentPeers.Set(peer) + b.currentPeers.set(peer) } b.toDemote = newPeersMap() @@ -1279,10 +1279,11 @@ func (pm peersMap) IDs() []uint64 { return ids } -func (pm peersMap) Set(peer *metapb.Peer) { +func (pm peersMap) set(peer *metapb.Peer) { pm[peer.GetStoreId()] = peer } +// String returns a brief description of the peersMap. func (pm peersMap) String() string { ids := make([]uint64, 0, len(pm)) for _, p := range pm { @@ -1291,10 +1292,10 @@ func (pm peersMap) String() string { return fmt.Sprintf("%v", ids) } -func (pm peersMap) Copy() peersMap { +func (pm peersMap) copy() peersMap { var pm2 peersMap = make(map[uint64]*metapb.Peer, len(pm)) for _, p := range pm { - pm2.Set(p) + pm2.set(p) } return pm2 } diff --git a/pkg/schedule/operator/create_operator.go b/pkg/schedule/operator/create_operator.go index 64680520933..4fae7f9e3f2 100644 --- a/pkg/schedule/operator/create_operator.go +++ b/pkg/schedule/operator/create_operator.go @@ -285,9 +285,9 @@ func CreateLeaveJointStateOperator(desc string, ci sche.SharedCluster, origin *c for _, o := range b.originPeers { switch o.GetRole() { case metapb.PeerRole_IncomingVoter: - b.toPromote.Set(o) + b.toPromote.set(o) case metapb.PeerRole_DemotingVoter: - b.toDemote.Set(o) + b.toDemote.set(o) } } @@ -298,7 +298,7 @@ func CreateLeaveJointStateOperator(desc string, ci sche.SharedCluster, origin *c b.targetLeaderStoreID = b.originLeaderStoreID } - b.currentPeers, b.currentLeaderStoreID = b.originPeers.Copy(), b.originLeaderStoreID + b.currentPeers, b.currentLeaderStoreID = b.originPeers.copy(), b.originLeaderStoreID b.peerAddStep = make(map[uint64]int) brief := b.brief() diff --git a/pkg/schedule/operator/operator_controller.go b/pkg/schedule/operator/operator_controller.go index fe93bd98756..e4da6ead0ef 100644 --- a/pkg/schedule/operator/operator_controller.go +++ b/pkg/schedule/operator/operator_controller.go @@ -235,10 +235,10 @@ func getNextPushOperatorTime(step OpStep, now time.Time) time.Time { // "next" is true to indicate that it may exist in next attempt, // and false is the end for the poll. func (oc *Controller) pollNeedDispatchRegion() (r *core.RegionInfo, next bool) { - if oc.opNotifierQueue.Len() == 0 { + if oc.opNotifierQueue.len() == 0 { return nil, false } - item, _ := oc.opNotifierQueue.Pop() + item, _ := oc.opNotifierQueue.pop() regionID := item.op.RegionID() opi, ok := oc.operators.Load(regionID) if !ok || opi.(*Operator) == nil { @@ -265,13 +265,13 @@ func (oc *Controller) pollNeedDispatchRegion() (r *core.RegionInfo, next bool) { } now := time.Now() if now.Before(item.time) { - oc.opNotifierQueue.Push(item) + oc.opNotifierQueue.push(item) return nil, false } // pushes with new notify time. item.time = getNextPushOperatorTime(step, now) - oc.opNotifierQueue.Push(item) + oc.opNotifierQueue.push(item) return r, true } @@ -561,7 +561,7 @@ func (oc *Controller) addOperatorInner(op *Operator) bool { } } - oc.opNotifierQueue.Push(&operatorWithTime{op: op, time: getNextPushOperatorTime(step, time.Now())}) + oc.opNotifierQueue.push(&operatorWithTime{op: op, time: getNextPushOperatorTime(step, time.Now())}) operatorCounter.WithLabelValues(op.Desc(), "create").Inc() for _, counter := range op.Counters { counter.Inc() @@ -753,7 +753,7 @@ func (oc *Controller) GetOperator(regionID uint64) *Operator { // GetOperators gets operators from the running operators. func (oc *Controller) GetOperators() []*Operator { - operators := make([]*Operator, 0, oc.opNotifierQueue.Len()) + operators := make([]*Operator, 0, oc.opNotifierQueue.len()) oc.operators.Range( func(_, value any) bool { operators = append(operators, value.(*Operator)) @@ -769,7 +769,7 @@ func (oc *Controller) GetWaitingOperators() []*Operator { // GetOperatorsOfKind returns the running operators of the kind. func (oc *Controller) GetOperatorsOfKind(mask OpKind) []*Operator { - operators := make([]*Operator, 0, oc.opNotifierQueue.Len()) + operators := make([]*Operator, 0, oc.opNotifierQueue.len()) oc.operators.Range( func(_, value any) bool { op := value.(*Operator) diff --git a/pkg/schedule/operator/operator_controller_test.go b/pkg/schedule/operator/operator_controller_test.go index 2b16516c4c7..3894df7e5e7 100644 --- a/pkg/schedule/operator/operator_controller_test.go +++ b/pkg/schedule/operator/operator_controller_test.go @@ -364,10 +364,10 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegion() { oc.SetOperator(op4) re.True(op2.Start()) oc.SetOperator(op2) - oc.opNotifierQueue.Push(&operatorWithTime{op: op1, time: time.Now().Add(100 * time.Millisecond)}) - oc.opNotifierQueue.Push(&operatorWithTime{op: op3, time: time.Now().Add(300 * time.Millisecond)}) - oc.opNotifierQueue.Push(&operatorWithTime{op: op4, time: time.Now().Add(499 * time.Millisecond)}) - oc.opNotifierQueue.Push(&operatorWithTime{op: op2, time: time.Now().Add(500 * time.Millisecond)}) + oc.opNotifierQueue.push(&operatorWithTime{op: op1, time: time.Now().Add(100 * time.Millisecond)}) + oc.opNotifierQueue.push(&operatorWithTime{op: op3, time: time.Now().Add(300 * time.Millisecond)}) + oc.opNotifierQueue.push(&operatorWithTime{op: op4, time: time.Now().Add(499 * time.Millisecond)}) + oc.opNotifierQueue.push(&operatorWithTime{op: op2, time: time.Now().Add(500 * time.Millisecond)}) } // first poll got nil r, next := oc.pollNeedDispatchRegion() @@ -447,7 +447,7 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegionForMergeRegion() r, next = controller.pollNeedDispatchRegion() re.True(next) re.Nil(r) - re.Equal(1, controller.opNotifierQueue.Len()) + re.Equal(1, controller.opNotifierQueue.len()) re.Empty(controller.GetOperators()) re.Empty(controller.wop.ListOperator()) re.NotNil(controller.records.Get(101)) @@ -458,7 +458,7 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegionForMergeRegion() r, next = controller.pollNeedDispatchRegion() re.True(next) re.Nil(r) - re.Equal(0, controller.opNotifierQueue.Len()) + re.Equal(0, controller.opNotifierQueue.len()) // Add the two ops to waiting operators again. source.GetMeta().RegionEpoch = &metapb.RegionEpoch{ConfVer: 0, Version: 0} @@ -478,7 +478,7 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegionForMergeRegion() r, next = controller.pollNeedDispatchRegion() re.True(next) re.Nil(r) - re.Equal(1, controller.opNotifierQueue.Len()) + re.Equal(1, controller.opNotifierQueue.len()) re.Empty(controller.GetOperators()) re.Empty(controller.wop.ListOperator()) re.NotNil(controller.records.Get(101)) @@ -488,7 +488,7 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegionForMergeRegion() r, next = controller.pollNeedDispatchRegion() re.True(next) re.Nil(r) - re.Equal(0, controller.opNotifierQueue.Len()) + re.Equal(0, controller.opNotifierQueue.len()) } func (suite *operatorControllerTestSuite) TestCheckOperatorLightly() { diff --git a/pkg/schedule/operator/operator_queue.go b/pkg/schedule/operator/operator_queue.go index 8643717d5ad..51991ff7ab4 100644 --- a/pkg/schedule/operator/operator_queue.go +++ b/pkg/schedule/operator/operator_queue.go @@ -67,19 +67,19 @@ func newConcurrentHeapOpQueue() *concurrentHeapOpQueue { return &concurrentHeapOpQueue{heap: make(operatorQueue, 0)} } -func (ch *concurrentHeapOpQueue) Len() int { +func (ch *concurrentHeapOpQueue) len() int { ch.Lock() defer ch.Unlock() return len(ch.heap) } -func (ch *concurrentHeapOpQueue) Push(x *operatorWithTime) { +func (ch *concurrentHeapOpQueue) push(x *operatorWithTime) { ch.Lock() defer ch.Unlock() heap.Push(&ch.heap, x) } -func (ch *concurrentHeapOpQueue) Pop() (*operatorWithTime, bool) { +func (ch *concurrentHeapOpQueue) pop() (*operatorWithTime, bool) { ch.Lock() defer ch.Unlock() if len(ch.heap) == 0 { diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 6762c8751e4..f6c8dd5d1b6 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -64,7 +64,7 @@ type balanceLeaderSchedulerConfig struct { Batch int `json:"batch"` } -func (conf *balanceLeaderSchedulerConfig) Update(data []byte) (int, any) { +func (conf *balanceLeaderSchedulerConfig) update(data []byte) (int, any) { conf.Lock() defer conf.Unlock() @@ -146,19 +146,19 @@ func newBalanceLeaderHandler(conf *balanceLeaderSchedulerConfig) http.Handler { rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", handler.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", handler.ListConfig).Methods(http.MethodGet) + router.HandleFunc("/config", handler.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", handler.listConfig).Methods(http.MethodGet) return router } -func (handler *balanceLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *balanceLeaderHandler) updateConfig(w http.ResponseWriter, r *http.Request) { data, _ := io.ReadAll(r.Body) r.Body.Close() - httpCode, v := handler.config.Update(data) + httpCode, v := handler.config.update(data) handler.rd.JSON(w, httpCode, v) } -func (handler *balanceLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { +func (handler *balanceLeaderHandler) listConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -348,7 +348,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun stores := cluster.GetStores() scoreFunc := func(store *core.StoreInfo) float64 { - return store.LeaderScore(solver.kind.Policy, solver.GetOpInfluence(store.GetID())) + return store.LeaderScore(solver.kind.Policy, solver.getOpInfluence(store.GetID())) } sourceCandidate := newCandidateStores(filter.SelectSourceStores(stores, l.filters, cluster.GetSchedulerConfig(), collector, l.filterCounter), false, scoreFunc) targetCandidate := newCandidateStores(filter.SelectTargetStores(stores, l.filters, cluster.GetSchedulerConfig(), nil, l.filterCounter), true, scoreFunc) @@ -379,7 +379,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun } } } - l.retryQuota.GC(append(sourceCandidate.stores, targetCandidate.stores...)) + l.retryQuota.gc(append(sourceCandidate.stores, targetCandidate.stores...)) return result, collector.GetPlans() } @@ -388,7 +388,7 @@ func createTransferLeaderOperator(cs *candidateStores, dir string, l *balanceLea store := cs.getStore() ssolver.Step++ defer func() { ssolver.Step-- }() - retryLimit := l.retryQuota.GetLimit(store) + retryLimit := l.retryQuota.getLimit(store) var creator func(*solver, *plan.Collector) *operator.Operator switch dir { case transferOut: @@ -408,9 +408,9 @@ func createTransferLeaderOperator(cs *candidateStores, dir string, l *balanceLea } } if op != nil { - l.retryQuota.ResetLimit(store) + l.retryQuota.resetLimit(store) } else { - l.Attenuate(store) + l.attenuate(store) log.Debug("no operator created for selected stores", zap.String("scheduler", l.GetName()), zap.Uint64(dir, store.GetID())) cs.next() } @@ -436,10 +436,10 @@ func makeInfluence(op *operator.Operator, plan *solver, usedRegions map[uint64]s // It randomly selects a health region from the source store, then picks // the best follower peer and transfers the leader. func (l *balanceLeaderScheduler) transferLeaderOut(solver *solver, collector *plan.Collector) *operator.Operator { - solver.Region = filter.SelectOneRegion(solver.RandLeaderRegions(solver.SourceStoreID(), l.conf.getRanges()), + solver.Region = filter.SelectOneRegion(solver.RandLeaderRegions(solver.sourceStoreID(), l.conf.getRanges()), collector, filter.NewRegionPendingFilter(), filter.NewRegionDownFilter()) if solver.Region == nil { - log.Debug("store has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", solver.SourceStoreID())) + log.Debug("store has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", solver.sourceStoreID())) balanceLeaderNoLeaderRegionCounter.Inc() return nil } @@ -462,8 +462,8 @@ func (l *balanceLeaderScheduler) transferLeaderOut(solver *solver, collector *pl targets = filter.SelectTargetStores(targets, finalFilters, conf, collector, l.filterCounter) leaderSchedulePolicy := conf.GetLeaderSchedulePolicy() sort.Slice(targets, func(i, j int) bool { - iOp := solver.GetOpInfluence(targets[i].GetID()) - jOp := solver.GetOpInfluence(targets[j].GetID()) + iOp := solver.getOpInfluence(targets[i].GetID()) + jOp := solver.getOpInfluence(targets[j].GetID()) return targets[i].LeaderScore(leaderSchedulePolicy, iOp) < targets[j].LeaderScore(leaderSchedulePolicy, jOp) }) for _, solver.Target = range targets { @@ -480,10 +480,10 @@ func (l *balanceLeaderScheduler) transferLeaderOut(solver *solver, collector *pl // It randomly selects a health region from the target store, then picks // the worst follower peer and transfers the leader. func (l *balanceLeaderScheduler) transferLeaderIn(solver *solver, collector *plan.Collector) *operator.Operator { - solver.Region = filter.SelectOneRegion(solver.RandFollowerRegions(solver.TargetStoreID(), l.conf.getRanges()), + solver.Region = filter.SelectOneRegion(solver.RandFollowerRegions(solver.targetStoreID(), l.conf.getRanges()), nil, filter.NewRegionPendingFilter(), filter.NewRegionDownFilter()) if solver.Region == nil { - log.Debug("store has no follower", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", solver.TargetStoreID())) + log.Debug("store has no follower", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", solver.targetStoreID())) balanceLeaderNoFollowerRegionCounter.Inc() return nil } @@ -536,7 +536,7 @@ func (l *balanceLeaderScheduler) createOperator(solver *solver, collector *plan. } solver.Step++ defer func() { solver.Step-- }() - op, err := operator.CreateTransferLeaderOperator(BalanceLeaderType, solver, solver.Region, solver.TargetStoreID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(BalanceLeaderType, solver, solver.Region, solver.targetStoreID(), []uint64{}, operator.OpLeader) if err != nil { log.Debug("fail to create balance leader operator", errs.ZapError(err)) if collector != nil { @@ -548,7 +548,7 @@ func (l *balanceLeaderScheduler) createOperator(solver *solver, collector *plan. balanceLeaderNewOpCounter, ) op.FinishedCounters = append(op.FinishedCounters, - balanceDirectionCounter.WithLabelValues(l.GetName(), solver.SourceMetricLabel(), solver.TargetMetricLabel()), + balanceDirectionCounter.WithLabelValues(l.GetName(), solver.sourceMetricLabel(), solver.targetMetricLabel()), ) op.SetAdditionalInfo("sourceScore", strconv.FormatFloat(solver.sourceScore, 'f', 2, 64)) op.SetAdditionalInfo("targetScore", strconv.FormatFloat(solver.targetScore, 'f', 2, 64)) diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 3ef01345aea..7c19187dd74 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -81,10 +81,12 @@ func WithBalanceRegionName(name string) BalanceRegionCreateOption { } } +// EncodeConfig implements the Scheduler interface. func (s *balanceRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// IsScheduleAllowed implements the Scheduler interface. func (s *balanceRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { @@ -93,6 +95,7 @@ func (s *balanceRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster return allowed } +// Schedule implements the Scheduler interface. func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { basePlan := plan.NewBalanceSchedulerPlan() defer s.filterCounter.Flush() @@ -112,8 +115,8 @@ func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun solver := newSolver(basePlan, kind, cluster, opInfluence) sort.Slice(sourceStores, func(i, j int) bool { - iOp := solver.GetOpInfluence(sourceStores[i].GetID()) - jOp := solver.GetOpInfluence(sourceStores[j].GetID()) + iOp := solver.getOpInfluence(sourceStores[i].GetID()) + jOp := solver.getOpInfluence(sourceStores[j].GetID()) return sourceStores[i].RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), iOp) > sourceStores[j].RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), jOp) }) @@ -138,7 +141,7 @@ func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun // sourcesStore is sorted by region score desc, so we pick the first store as source store. for sourceIndex, solver.Source = range sourceStores { - retryLimit := s.retryQuota.GetLimit(solver.Source) + retryLimit := s.retryQuota.getLimit(solver.Source) solver.sourceScore = solver.sourceStoreScore(s.GetName()) if sourceIndex == len(sourceStores)-1 { break @@ -146,22 +149,22 @@ func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun for i := 0; i < retryLimit; i++ { // Priority pick the region that has a pending peer. // Pending region may mean the disk is overload, remove the pending region firstly. - solver.Region = filter.SelectOneRegion(cluster.RandPendingRegions(solver.SourceStoreID(), s.conf.Ranges), collector, - append(baseRegionFilters, filter.NewRegionWitnessFilter(solver.SourceStoreID()))...) + solver.Region = filter.SelectOneRegion(cluster.RandPendingRegions(solver.sourceStoreID(), s.conf.Ranges), collector, + append(baseRegionFilters, filter.NewRegionWitnessFilter(solver.sourceStoreID()))...) if solver.Region == nil { // Then pick the region that has a follower in the source store. - solver.Region = filter.SelectOneRegion(cluster.RandFollowerRegions(solver.SourceStoreID(), s.conf.Ranges), collector, - append(baseRegionFilters, filter.NewRegionWitnessFilter(solver.SourceStoreID()), pendingFilter)...) + solver.Region = filter.SelectOneRegion(cluster.RandFollowerRegions(solver.sourceStoreID(), s.conf.Ranges), collector, + append(baseRegionFilters, filter.NewRegionWitnessFilter(solver.sourceStoreID()), pendingFilter)...) } if solver.Region == nil { // Then pick the region has the leader in the source store. - solver.Region = filter.SelectOneRegion(cluster.RandLeaderRegions(solver.SourceStoreID(), s.conf.Ranges), collector, - append(baseRegionFilters, filter.NewRegionWitnessFilter(solver.SourceStoreID()), pendingFilter)...) + solver.Region = filter.SelectOneRegion(cluster.RandLeaderRegions(solver.sourceStoreID(), s.conf.Ranges), collector, + append(baseRegionFilters, filter.NewRegionWitnessFilter(solver.sourceStoreID()), pendingFilter)...) } if solver.Region == nil { // Finally, pick learner. - solver.Region = filter.SelectOneRegion(cluster.RandLearnerRegions(solver.SourceStoreID(), s.conf.Ranges), collector, - append(baseRegionFilters, filter.NewRegionWitnessFilter(solver.SourceStoreID()), pendingFilter)...) + solver.Region = filter.SelectOneRegion(cluster.RandLearnerRegions(solver.sourceStoreID(), s.conf.Ranges), collector, + append(baseRegionFilters, filter.NewRegionWitnessFilter(solver.sourceStoreID()), pendingFilter)...) } if solver.Region == nil { balanceRegionNoRegionCounter.Inc() @@ -191,15 +194,15 @@ func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun // satisfy all the filters, so the region fit must belong the scheduled region. solver.fit = replicaFilter.(*filter.RegionReplicatedFilter).GetFit() if op := s.transferPeer(solver, collector, sourceStores[sourceIndex+1:], faultTargets); op != nil { - s.retryQuota.ResetLimit(solver.Source) + s.retryQuota.resetLimit(solver.Source) op.Counters = append(op.Counters, balanceRegionNewOpCounter) return []*operator.Operator{op}, collector.GetPlans() } solver.Step-- } - s.retryQuota.Attenuate(solver.Source) + s.retryQuota.attenuate(solver.Source) } - s.retryQuota.GC(stores) + s.retryQuota.gc(stores) return nil, collector.GetPlans() } diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_test.go index 26214ed5456..0cfaf510f1b 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_test.go @@ -1399,8 +1399,8 @@ func TestConcurrencyUpdateConfig(t *testing.T) { return default: } - sche.config.BuildWithArgs(args) - re.NoError(sche.config.Persist()) + sche.config.buildWithArgs(args) + re.NoError(sche.config.persist()) } }() for i := 0; i < 1000; i++ { diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 319a0f2493a..dbb0d012c72 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -143,19 +143,19 @@ func newBalanceWitnessHandler(conf *balanceWitnessSchedulerConfig) http.Handler rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", handler.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", handler.ListConfig).Methods(http.MethodGet) + router.HandleFunc("/config", handler.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", handler.listConfig).Methods(http.MethodGet) return router } -func (handler *balanceWitnessHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *balanceWitnessHandler) updateConfig(w http.ResponseWriter, r *http.Request) { data, _ := io.ReadAll(r.Body) r.Body.Close() httpCode, v := handler.config.Update(data) handler.rd.JSON(w, httpCode, v) } -func (handler *balanceWitnessHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { +func (handler *balanceWitnessHandler) listConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -191,6 +191,7 @@ func newBalanceWitnessScheduler(opController *operator.Controller, conf *balance return s } +// ServeHTTP implements the http.Handler interface. func (b *balanceWitnessScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { b.handler.ServeHTTP(w, r) } @@ -205,12 +206,14 @@ func WithBalanceWitnessCounter(counter *prometheus.CounterVec) BalanceWitnessCre } } +// EncodeConfig implements the Scheduler interface. func (b *balanceWitnessScheduler) EncodeConfig() ([]byte, error) { b.conf.RLock() defer b.conf.RUnlock() return EncodeConfig(b.conf) } +// ReloadConfig implements the Scheduler interface. func (b *balanceWitnessScheduler) ReloadConfig() error { b.conf.Lock() defer b.conf.Unlock() @@ -230,6 +233,7 @@ func (b *balanceWitnessScheduler) ReloadConfig() error { return nil } +// IsScheduleAllowed implements the Scheduler interface. func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := b.OpController.OperatorCount(operator.OpWitness) < cluster.GetSchedulerConfig().GetWitnessScheduleLimit() if !allowed { @@ -238,6 +242,7 @@ func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste return allowed } +// Schedule implements the Scheduler interface. func (b *balanceWitnessScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { basePlan := plan.NewBalanceSchedulerPlan() var collector *plan.Collector @@ -253,7 +258,7 @@ func (b *balanceWitnessScheduler) Schedule(cluster sche.SchedulerCluster, dryRun stores := cluster.GetStores() scoreFunc := func(store *core.StoreInfo) float64 { - return store.WitnessScore(solver.GetOpInfluence(store.GetID())) + return store.WitnessScore(solver.getOpInfluence(store.GetID())) } sourceCandidate := newCandidateStores(filter.SelectSourceStores(stores, b.filters, cluster.GetSchedulerConfig(), collector, b.filterCounter), false, scoreFunc) usedRegions := make(map[uint64]struct{}) @@ -269,7 +274,7 @@ func (b *balanceWitnessScheduler) Schedule(cluster sche.SchedulerCluster, dryRun makeInfluence(op, solver, usedRegions, sourceCandidate) } } - b.retryQuota.GC(sourceCandidate.stores) + b.retryQuota.gc(sourceCandidate.stores) return result, collector.GetPlans() } @@ -278,7 +283,7 @@ func createTransferWitnessOperator(cs *candidateStores, b *balanceWitnessSchedul store := cs.getStore() ssolver.Step++ defer func() { ssolver.Step-- }() - retryLimit := b.retryQuota.GetLimit(store) + retryLimit := b.retryQuota.getLimit(store) ssolver.Source, ssolver.Target = store, nil var op *operator.Operator for i := 0; i < retryLimit; i++ { @@ -291,9 +296,9 @@ func createTransferWitnessOperator(cs *candidateStores, b *balanceWitnessSchedul } } if op != nil { - b.retryQuota.ResetLimit(store) + b.retryQuota.resetLimit(store) } else { - b.Attenuate(store) + b.attenuate(store) log.Debug("no operator created for selected stores", zap.String("scheduler", b.GetName()), zap.Uint64("transfer-out", store.GetID())) cs.next() } @@ -304,10 +309,10 @@ func createTransferWitnessOperator(cs *candidateStores, b *balanceWitnessSchedul // It randomly selects a health region from the source store, then picks // the best follower peer and transfers the witness. func (b *balanceWitnessScheduler) transferWitnessOut(solver *solver, collector *plan.Collector) *operator.Operator { - solver.Region = filter.SelectOneRegion(solver.RandWitnessRegions(solver.SourceStoreID(), b.conf.getRanges()), + solver.Region = filter.SelectOneRegion(solver.RandWitnessRegions(solver.sourceStoreID(), b.conf.getRanges()), collector, filter.NewRegionPendingFilter(), filter.NewRegionDownFilter()) if solver.Region == nil { - log.Debug("store has no witness", zap.String("scheduler", b.GetName()), zap.Uint64("store-id", solver.SourceStoreID())) + log.Debug("store has no witness", zap.String("scheduler", b.GetName()), zap.Uint64("store-id", solver.sourceStoreID())) schedulerCounter.WithLabelValues(b.GetName(), "no-witness-region").Inc() return nil } @@ -321,8 +326,8 @@ func (b *balanceWitnessScheduler) transferWitnessOut(solver *solver, collector * } targets = filter.SelectTargetStores(targets, finalFilters, conf, collector, b.filterCounter) sort.Slice(targets, func(i, j int) bool { - iOp := solver.GetOpInfluence(targets[i].GetID()) - jOp := solver.GetOpInfluence(targets[j].GetID()) + iOp := solver.getOpInfluence(targets[i].GetID()) + jOp := solver.getOpInfluence(targets[j].GetID()) return targets[i].WitnessScore(iOp) < targets[j].WitnessScore(jOp) }) for _, solver.Target = range targets { @@ -352,7 +357,7 @@ func (b *balanceWitnessScheduler) createOperator(solver *solver, collector *plan } solver.Step++ defer func() { solver.Step-- }() - op, err := operator.CreateMoveWitnessOperator(BalanceWitnessType, solver, solver.Region, solver.SourceStoreID(), solver.TargetStoreID()) + op, err := operator.CreateMoveWitnessOperator(BalanceWitnessType, solver, solver.Region, solver.sourceStoreID(), solver.targetStoreID()) if err != nil { log.Debug("fail to create balance witness operator", errs.ZapError(err)) return nil @@ -361,9 +366,9 @@ func (b *balanceWitnessScheduler) createOperator(solver *solver, collector *plan schedulerCounter.WithLabelValues(b.GetName(), "new-operator"), ) op.FinishedCounters = append(op.FinishedCounters, - balanceDirectionCounter.WithLabelValues(b.GetName(), solver.SourceMetricLabel(), solver.TargetMetricLabel()), - b.counter.WithLabelValues("move-witness", solver.SourceMetricLabel()+"-out"), - b.counter.WithLabelValues("move-witness", solver.TargetMetricLabel()+"-in"), + balanceDirectionCounter.WithLabelValues(b.GetName(), solver.sourceMetricLabel(), solver.targetMetricLabel()), + b.counter.WithLabelValues("move-witness", solver.sourceMetricLabel()+"-out"), + b.counter.WithLabelValues("move-witness", solver.targetMetricLabel()+"-in"), ) op.SetAdditionalInfo("sourceScore", strconv.FormatFloat(solver.sourceScore, 'f', 2, 64)) op.SetAdditionalInfo("targetScore", strconv.FormatFloat(solver.targetScore, 'f', 2, 64)) diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index 6cd02d2b555..b3dae9856e6 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -102,6 +102,7 @@ func (*BaseScheduler) PrepareConfig(sche.SchedulerCluster) error { return nil } // CleanConfig does some cleanup work about config. func (*BaseScheduler) CleanConfig(sche.SchedulerCluster) {} +// GetName returns the name of the scheduler func (s *BaseScheduler) GetName() string { if len(s.name) == 0 { return s.tp.String() @@ -109,6 +110,7 @@ func (s *BaseScheduler) GetName() string { return s.name } +// GetType returns the type of the scheduler func (s *BaseScheduler) GetType() types.CheckerSchedulerType { return s.tp } diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 3aba9a5d184..7e5c4706043 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -265,26 +265,32 @@ func (s *evictLeaderScheduler) EvictStoreIDs() []uint64 { return s.conf.getStores() } +// ServeHTTP implements the http.Handler interface. func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.handler.ServeHTTP(w, r) } +// GetName implements the Scheduler interface. func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return s.conf.encodeConfig() } +// ReloadConfig reloads the config from the storage. func (s *evictLeaderScheduler) ReloadConfig() error { return s.conf.reloadConfig(s.GetName()) } +// PrepareConfig implements the Scheduler interface. func (s *evictLeaderScheduler) PrepareConfig(cluster sche.SchedulerCluster) error { return s.conf.pauseLeaderTransfer(cluster) } +// CleanConfig implements the Scheduler interface. func (s *evictLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { s.conf.resumeLeaderTransfer(cluster) } +// IsScheduleAllowed implements the Scheduler interface. func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { @@ -293,6 +299,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } +// Schedule implements the Scheduler interface. func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf), nil @@ -399,7 +406,7 @@ type evictLeaderHandler struct { config *evictLeaderSchedulerConfig } -func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictLeaderHandler) updateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return @@ -454,12 +461,12 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") } -func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { +func (handler *evictLeaderHandler) listConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } -func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictLeaderHandler) deleteConfig(w http.ResponseWriter, r *http.Request) { idStr := mux.Vars(r)["store_id"] id, err := strconv.ParseUint(idStr, 10, 64) if err != nil { @@ -486,8 +493,8 @@ func newEvictLeaderHandler(config *evictLeaderSchedulerConfig) http.Handler { rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) - router.HandleFunc("/delete/{store_id}", h.DeleteConfig).Methods(http.MethodDelete) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) + router.HandleFunc("/delete/{store_id}", h.deleteConfig).Methods(http.MethodDelete) return router } diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index 721444d1da7..bc0590531af 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -64,7 +64,7 @@ func initEvictSlowStoreSchedulerConfig(storage endpoint.ConfigStorage) *evictSlo } } -func (conf *evictSlowStoreSchedulerConfig) Clone() *evictSlowStoreSchedulerConfig { +func (conf *evictSlowStoreSchedulerConfig) clone() *evictSlowStoreSchedulerConfig { conf.RLock() defer conf.RUnlock() return &evictSlowStoreSchedulerConfig{ @@ -149,12 +149,12 @@ func newEvictSlowStoreHandler(config *evictSlowStoreSchedulerConfig) http.Handle rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) return router } -func (handler *evictSlowStoreHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictSlowStoreHandler) updateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return @@ -178,8 +178,8 @@ func (handler *evictSlowStoreHandler) UpdateConfig(w http.ResponseWriter, r *htt handler.rd.JSON(w, http.StatusOK, "Config updated.") } -func (handler *evictSlowStoreHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { - conf := handler.config.Clone() +func (handler *evictSlowStoreHandler) listConfig(w http.ResponseWriter, _ *http.Request) { + conf := handler.config.clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -189,14 +189,17 @@ type evictSlowStoreScheduler struct { handler http.Handler } +// ServeHTTP implements the http.Handler interface. func (s *evictSlowStoreScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.handler.ServeHTTP(w, r) } +// EncodeConfig implements the Scheduler interface. func (s *evictSlowStoreScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// ReloadConfig implements the Scheduler interface. func (s *evictSlowStoreScheduler) ReloadConfig() error { s.conf.Lock() defer s.conf.Unlock() @@ -225,6 +228,7 @@ func (s *evictSlowStoreScheduler) ReloadConfig() error { return nil } +// PrepareConfig implements the Scheduler interface. func (s *evictSlowStoreScheduler) PrepareConfig(cluster sche.SchedulerCluster) error { evictStore := s.conf.evictStore() if evictStore != 0 { @@ -233,6 +237,7 @@ func (s *evictSlowStoreScheduler) PrepareConfig(cluster sche.SchedulerCluster) e return nil } +// CleanConfig implements the Scheduler interface. func (s *evictSlowStoreScheduler) CleanConfig(cluster sche.SchedulerCluster) { s.cleanupEvictLeader(cluster) } @@ -262,6 +267,7 @@ func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.SchedulerClu return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf) } +// IsScheduleAllowed implements the Scheduler interface. func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { if s.conf.evictStore() != 0 { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() @@ -273,6 +279,7 @@ func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste return true } +// Schedule implements the Scheduler interface. func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictSlowStoreCounter.Inc() diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index d14cec1e06a..5fa799c45b5 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -238,12 +238,12 @@ func newEvictSlowTrendHandler(config *evictSlowTrendSchedulerConfig) http.Handle rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) return router } -func (handler *evictSlowTrendHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictSlowTrendHandler) updateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return @@ -267,7 +267,7 @@ func (handler *evictSlowTrendHandler) UpdateConfig(w http.ResponseWriter, r *htt handler.rd.JSON(w, http.StatusOK, "Config updated.") } -func (handler *evictSlowTrendHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { +func (handler *evictSlowTrendHandler) listConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -291,14 +291,17 @@ func (s *evictSlowTrendScheduler) GetNextInterval(time.Duration) time.Duration { return intervalGrow(s.GetMinInterval(), MaxScheduleInterval, growthType) } +// ServeHTTP implements the http.Handler interface. func (s *evictSlowTrendScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.handler.ServeHTTP(w, r) } +// EncodeConfig implements the Scheduler interface. func (s *evictSlowTrendScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// ReloadConfig implements the Scheduler interface. func (s *evictSlowTrendScheduler) ReloadConfig() error { s.conf.Lock() defer s.conf.Unlock() @@ -327,6 +330,7 @@ func (s *evictSlowTrendScheduler) ReloadConfig() error { return nil } +// PrepareConfig implements the Scheduler interface. func (s *evictSlowTrendScheduler) PrepareConfig(cluster sche.SchedulerCluster) error { evictedStoreID := s.conf.evictedStore() if evictedStoreID == 0 { @@ -335,6 +339,7 @@ func (s *evictSlowTrendScheduler) PrepareConfig(cluster sche.SchedulerCluster) e return cluster.SlowTrendEvicted(evictedStoreID) } +// CleanConfig implements the Scheduler interface. func (s *evictSlowTrendScheduler) CleanConfig(cluster sche.SchedulerCluster) { s.cleanupEvictLeader(cluster) } @@ -369,6 +374,7 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.SchedulerClus return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf) } +// IsScheduleAllowed implements the Scheduler interface. func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { if s.conf.evictedStore() == 0 { return true @@ -380,6 +386,7 @@ func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste return allowed } +// Schedule implements the Scheduler interface. func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 4289effd7bd..1e45096a881 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -69,19 +69,19 @@ func (conf *grantHotRegionSchedulerConfig) setStore(leaderID uint64, peers []uin return ret } -func (conf *grantHotRegionSchedulerConfig) GetStoreLeaderID() uint64 { +func (conf *grantHotRegionSchedulerConfig) getStoreLeaderID() uint64 { conf.RLock() defer conf.RUnlock() return conf.StoreLeaderID } -func (conf *grantHotRegionSchedulerConfig) SetStoreLeaderID(id uint64) { +func (conf *grantHotRegionSchedulerConfig) setStoreLeaderID(id uint64) { conf.Lock() defer conf.Unlock() conf.StoreLeaderID = id } -func (conf *grantHotRegionSchedulerConfig) Clone() *grantHotRegionSchedulerConfig { +func (conf *grantHotRegionSchedulerConfig) clone() *grantHotRegionSchedulerConfig { conf.RLock() defer conf.RUnlock() newStoreIDs := make([]uint64, len(conf.StoreIDs)) @@ -139,10 +139,12 @@ func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHo return ret } +// EncodeConfig implements the Scheduler interface. func (s *grantHotRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// ReloadConfig implements the Scheduler interface. func (s *grantHotRegionScheduler) ReloadConfig() error { s.conf.Lock() defer s.conf.Unlock() @@ -186,7 +188,7 @@ type grantHotRegionHandler struct { config *grantHotRegionSchedulerConfig } -func (handler *grantHotRegionHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *grantHotRegionHandler) updateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return @@ -216,15 +218,15 @@ func (handler *grantHotRegionHandler) UpdateConfig(w http.ResponseWriter, r *htt } if err = handler.config.Persist(); err != nil { - handler.config.SetStoreLeaderID(0) + handler.config.setStoreLeaderID(0) handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } handler.rd.JSON(w, http.StatusOK, nil) } -func (handler *grantHotRegionHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { - conf := handler.config.Clone() +func (handler *grantHotRegionHandler) listConfig(w http.ResponseWriter, _ *http.Request) { + conf := handler.config.clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -234,8 +236,8 @@ func newGrantHotRegionHandler(config *grantHotRegionSchedulerConfig) http.Handle rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) return router } @@ -269,7 +271,7 @@ func (s *grantHotRegionScheduler) randomSchedule(cluster sche.SchedulerCluster, continue } } else { - if !s.conf.has(srcStoreID) || srcStoreID == s.conf.GetStoreLeaderID() { + if !s.conf.has(srcStoreID) || srcStoreID == s.conf.getStoreLeaderID() { continue } } @@ -310,7 +312,7 @@ func (s *grantHotRegionScheduler) transfer(cluster sche.SchedulerCluster, region var candidate []uint64 if isLeader { filters = append(filters, &filter.StoreStateFilter{ActionScope: s.GetName(), TransferLeader: true, OperatorLevel: constant.High}) - candidate = []uint64{s.conf.GetStoreLeaderID()} + candidate = []uint64{s.conf.getStoreLeaderID()} } else { filters = append(filters, &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true, OperatorLevel: constant.High}, filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIDs(), srcRegion.GetStoreIDs())) diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 41e6debaafa..1cf194c5f49 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -70,7 +70,7 @@ func (conf *grantLeaderSchedulerConfig) BuildWithArgs(args []string) error { return nil } -func (conf *grantLeaderSchedulerConfig) Clone() *grantLeaderSchedulerConfig { +func (conf *grantLeaderSchedulerConfig) clone() *grantLeaderSchedulerConfig { conf.RLock() defer conf.RUnlock() newStoreIDWithRanges := make(map[uint64][]core.KeyRange) @@ -82,7 +82,7 @@ func (conf *grantLeaderSchedulerConfig) Clone() *grantLeaderSchedulerConfig { } } -func (conf *grantLeaderSchedulerConfig) Persist() error { +func (conf *grantLeaderSchedulerConfig) persist() error { conf.RLock() defer conf.RUnlock() data, err := EncodeConfig(conf) @@ -164,14 +164,17 @@ func newGrantLeaderScheduler(opController *operator.Controller, conf *grantLeade } } +// ServeHTTP implements the http.Handler interface. func (s *grantLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.handler.ServeHTTP(w, r) } +// EncodeConfig implements the Scheduler interface. func (s *grantLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// ReloadConfig implements the Scheduler interface. func (s *grantLeaderScheduler) ReloadConfig() error { s.conf.Lock() defer s.conf.Unlock() @@ -191,6 +194,7 @@ func (s *grantLeaderScheduler) ReloadConfig() error { return nil } +// PrepareConfig implements the Scheduler interface. func (s *grantLeaderScheduler) PrepareConfig(cluster sche.SchedulerCluster) error { s.conf.RLock() defer s.conf.RUnlock() @@ -203,6 +207,7 @@ func (s *grantLeaderScheduler) PrepareConfig(cluster sche.SchedulerCluster) erro return res } +// CleanConfig implements the Scheduler interface. func (s *grantLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { s.conf.RLock() defer s.conf.RUnlock() @@ -211,6 +216,7 @@ func (s *grantLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { } } +// IsScheduleAllowed implements the Scheduler interface. func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { @@ -219,6 +225,7 @@ func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } +// Schedule implements the Scheduler interface. func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { grantLeaderCounter.Inc() storeIDWithRanges := s.conf.getStoreIDWithRanges() @@ -250,7 +257,7 @@ type grantLeaderHandler struct { config *grantLeaderSchedulerConfig } -func (handler *grantLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *grantLeaderHandler) updateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return @@ -285,7 +292,7 @@ func (handler *grantLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusBadRequest, err.Error()) return } - err = handler.config.Persist() + err = handler.config.persist() if err != nil { handler.config.removeStore(id) handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) @@ -294,12 +301,12 @@ func (handler *grantLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") } -func (handler *grantLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { - conf := handler.config.Clone() +func (handler *grantLeaderHandler) listConfig(w http.ResponseWriter, _ *http.Request) { + conf := handler.config.clone() handler.rd.JSON(w, http.StatusOK, conf) } -func (handler *grantLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.Request) { +func (handler *grantLeaderHandler) deleteConfig(w http.ResponseWriter, r *http.Request) { idStr := mux.Vars(r)["store_id"] id, err := strconv.ParseUint(idStr, 10, 64) if err != nil { @@ -311,7 +318,7 @@ func (handler *grantLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.R keyRanges := handler.config.getKeyRangesByID(id) succ, last := handler.config.removeStore(id) if succ { - err = handler.config.Persist() + err = handler.config.persist() if err != nil { handler.config.resetStore(id, keyRanges) handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) @@ -342,8 +349,8 @@ func newGrantLeaderHandler(config *grantLeaderSchedulerConfig) http.Handler { rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) - router.HandleFunc("/delete/{store_id}", h.DeleteConfig).Methods(http.MethodDelete) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) + router.HandleFunc("/delete/{store_id}", h.deleteConfig).Methods(http.MethodDelete) return router } diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index fe9b3964139..ff837e67ad2 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -203,7 +203,7 @@ type hotScheduler struct { func newHotScheduler(opController *operator.Controller, conf *hotRegionSchedulerConfig) *hotScheduler { base := newBaseHotScheduler(opController, - conf.GetHistorySampleDuration(), conf.GetHistorySampleInterval()) + conf.getHistorySampleDuration(), conf.getHistorySampleInterval()) ret := &hotScheduler{ name: HotRegionName, baseHotScheduler: base, @@ -215,10 +215,12 @@ func newHotScheduler(opController *operator.Controller, conf *hotRegionScheduler return ret } +// EncodeConfig implements the Scheduler interface. func (h *hotScheduler) EncodeConfig() ([]byte, error) { - return h.conf.EncodeConfig() + return h.conf.encodeConfig() } +// ReloadConfig impl func (h *hotScheduler) ReloadConfig() error { h.conf.Lock() defer h.conf.Unlock() @@ -259,18 +261,22 @@ func (h *hotScheduler) ReloadConfig() error { return nil } +// ServeHTTP implements the http.Handler interface. func (h *hotScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.conf.ServeHTTP(w, r) } +// GetMinInterval implements the Scheduler interface. func (*hotScheduler) GetMinInterval() time.Duration { return minHotScheduleInterval } +// GetNextInterval implements the Scheduler interface. func (h *hotScheduler) GetNextInterval(time.Duration) time.Duration { return intervalGrow(h.GetMinInterval(), maxHotScheduleInterval, exponentialGrowth) } +// IsScheduleAllowed implements the Scheduler interface. func (h *hotScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetSchedulerConfig().GetHotRegionScheduleLimit() if !allowed { @@ -279,6 +285,7 @@ func (h *hotScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return allowed } +// Schedule implements the Scheduler interface. func (h *hotScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { hotSchedulerCounter.Inc() typ := h.randomType() @@ -288,22 +295,22 @@ func (h *hotScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*opera func (h *hotScheduler) dispatch(typ resourceType, cluster sche.SchedulerCluster) []*operator.Operator { h.Lock() defer h.Unlock() - h.updateHistoryLoadConfig(h.conf.GetHistorySampleDuration(), h.conf.GetHistorySampleInterval()) + h.updateHistoryLoadConfig(h.conf.getHistorySampleDuration(), h.conf.getHistorySampleInterval()) h.prepareForBalance(typ, cluster) - // IsForbidRWType can not be move earlier to support to use api and metrics. + // isForbidRWType can not be move earlier to support to use api and metrics. switch typ { case readLeader, readPeer: - if h.conf.IsForbidRWType(utils.Read) { + if h.conf.isForbidRWType(utils.Read) { return nil } return h.balanceHotReadRegions(cluster) case writePeer: - if h.conf.IsForbidRWType(utils.Write) { + if h.conf.isForbidRWType(utils.Write) { return nil } return h.balanceHotWritePeers(cluster) case writeLeader: - if h.conf.IsForbidRWType(utils.Write) { + if h.conf.isForbidRWType(utils.Write) { return nil } return h.balanceHotWriteLeaders(cluster) @@ -499,11 +506,11 @@ type balanceSolver struct { func (bs *balanceSolver) init() { // Load the configuration items of the scheduler. bs.resourceTy = toResourceType(bs.rwTy, bs.opTy) - bs.maxPeerNum = bs.sche.conf.GetMaxPeerNumber() + bs.maxPeerNum = bs.sche.conf.getMaxPeerNumber() bs.minHotDegree = bs.GetSchedulerConfig().GetHotRegionCacheHitsThreshold() bs.firstPriority, bs.secondPriority = prioritiesToDim(bs.getPriorities()) - bs.greatDecRatio, bs.minorDecRatio = bs.sche.conf.GetGreatDecRatio(), bs.sche.conf.GetMinorDecRatio() - switch bs.sche.conf.GetRankFormulaVersion() { + bs.greatDecRatio, bs.minorDecRatio = bs.sche.conf.getGreatDecRatio(), bs.sche.conf.getMinorDecRatio() + switch bs.sche.conf.getRankFormulaVersion() { case "v1": bs.rank = initRankV1(bs) default: @@ -534,16 +541,16 @@ func (bs *balanceSolver) init() { } rankStepRatios := []float64{ - utils.ByteDim: bs.sche.conf.GetByteRankStepRatio(), - utils.KeyDim: bs.sche.conf.GetKeyRankStepRatio(), - utils.QueryDim: bs.sche.conf.GetQueryRateRankStepRatio()} + utils.ByteDim: bs.sche.conf.getByteRankStepRatio(), + utils.KeyDim: bs.sche.conf.getKeyRankStepRatio(), + utils.QueryDim: bs.sche.conf.getQueryRateRankStepRatio()} stepLoads := make([]float64, utils.DimLen) for i := range stepLoads { stepLoads[i] = maxCur.Loads[i] * rankStepRatios[i] } bs.rankStep = &statistics.StoreLoad{ Loads: stepLoads, - Count: maxCur.Count * bs.sche.conf.GetCountRankStepRatio(), + Count: maxCur.Count * bs.sche.conf.getCountRankStepRatio(), } } @@ -557,11 +564,11 @@ func (bs *balanceSolver) getPriorities() []string { // For write, they are different switch bs.resourceTy { case readLeader, readPeer: - return adjustPrioritiesConfig(querySupport, bs.sche.conf.GetReadPriorities(), getReadPriorities) + return adjustPrioritiesConfig(querySupport, bs.sche.conf.getReadPriorities(), getReadPriorities) case writeLeader: - return adjustPrioritiesConfig(querySupport, bs.sche.conf.GetWriteLeaderPriorities(), getWriteLeaderPriorities) + return adjustPrioritiesConfig(querySupport, bs.sche.conf.getWriteLeaderPriorities(), getWriteLeaderPriorities) case writePeer: - return adjustPrioritiesConfig(querySupport, bs.sche.conf.GetWritePeerPriorities(), getWritePeerPriorities) + return adjustPrioritiesConfig(querySupport, bs.sche.conf.getWritePeerPriorities(), getWritePeerPriorities) } log.Error("illegal type or illegal operator while getting the priority", zap.String("type", bs.rwTy.String()), zap.String("operator", bs.opTy.String())) return []string{} @@ -763,16 +770,16 @@ func (bs *balanceSolver) calcMaxZombieDur() time.Duration { // We use store query info rather than total of hot write leader to guide hot write leader scheduler // when its first priority is `QueryDim`, because `Write-peer` does not have `QueryDim`. // The reason is the same with `tikvCollector.GetLoads`. - return bs.sche.conf.GetStoreStatZombieDuration() + return bs.sche.conf.getStoreStatZombieDuration() } - return bs.sche.conf.GetRegionsStatZombieDuration() + return bs.sche.conf.getRegionsStatZombieDuration() case writePeer: if bs.best.srcStore.IsTiFlash() { - return bs.sche.conf.GetRegionsStatZombieDuration() + return bs.sche.conf.getRegionsStatZombieDuration() } - return bs.sche.conf.GetStoreStatZombieDuration() + return bs.sche.conf.getStoreStatZombieDuration() default: - return bs.sche.conf.GetStoreStatZombieDuration() + return bs.sche.conf.getStoreStatZombieDuration() } } @@ -780,8 +787,8 @@ func (bs *balanceSolver) calcMaxZombieDur() time.Duration { // its expectation * ratio, the store would be selected as hot source store func (bs *balanceSolver) filterSrcStores() map[uint64]*statistics.StoreLoadDetail { ret := make(map[uint64]*statistics.StoreLoadDetail) - confSrcToleranceRatio := bs.sche.conf.GetSrcToleranceRatio() - confEnableForTiFlash := bs.sche.conf.GetEnableForTiFlash() + confSrcToleranceRatio := bs.sche.conf.getSrcToleranceRatio() + confEnableForTiFlash := bs.sche.conf.getEnableForTiFlash() for id, detail := range bs.stLoadDetail { srcToleranceRatio := confSrcToleranceRatio if detail.IsTiFlash() { @@ -1019,8 +1026,8 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai func (bs *balanceSolver) pickDstStores(filters []filter.Filter, candidates []*statistics.StoreLoadDetail) map[uint64]*statistics.StoreLoadDetail { ret := make(map[uint64]*statistics.StoreLoadDetail, len(candidates)) - confDstToleranceRatio := bs.sche.conf.GetDstToleranceRatio() - confEnableForTiFlash := bs.sche.conf.GetEnableForTiFlash() + confDstToleranceRatio := bs.sche.conf.getDstToleranceRatio() + confEnableForTiFlash := bs.sche.conf.getEnableForTiFlash() for _, detail := range candidates { store := detail.StoreInfo dstToleranceRatio := confDstToleranceRatio @@ -1113,7 +1120,7 @@ func (bs *balanceSolver) checkHistoryLoadsByPriorityAndToleranceFirstOnly(_ [][] } func (bs *balanceSolver) enableExpectation() bool { - return bs.sche.conf.GetDstToleranceRatio() > 0 && bs.sche.conf.GetSrcToleranceRatio() > 0 + return bs.sche.conf.getDstToleranceRatio() > 0 && bs.sche.conf.getSrcToleranceRatio() > 0 } func (bs *balanceSolver) isUniformFirstPriority(store *statistics.StoreLoadDetail) bool { @@ -1149,11 +1156,11 @@ func (bs *balanceSolver) isTolerance(dim int, reverse bool) bool { func (bs *balanceSolver) getMinRate(dim int) float64 { switch dim { case utils.KeyDim: - return bs.sche.conf.GetMinHotKeyRate() + return bs.sche.conf.getMinHotKeyRate() case utils.ByteDim: - return bs.sche.conf.GetMinHotByteRate() + return bs.sche.conf.getMinHotByteRate() case utils.QueryDim: - return bs.sche.conf.GetMinHotQueryRate() + return bs.sche.conf.getMinHotQueryRate() } return -1 } diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index 5f08d755f76..83121254cc0 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -157,181 +157,183 @@ type hotRegionSchedulerConfig struct { HistorySampleInterval typeutil.Duration `json:"history-sample-interval"` } -func (conf *hotRegionSchedulerConfig) EncodeConfig() ([]byte, error) { +func (conf *hotRegionSchedulerConfig) encodeConfig() ([]byte, error) { conf.RLock() defer conf.RUnlock() return EncodeConfig(conf) } -func (conf *hotRegionSchedulerConfig) GetStoreStatZombieDuration() time.Duration { +func (conf *hotRegionSchedulerConfig) getStoreStatZombieDuration() time.Duration { conf.RLock() defer conf.RUnlock() return time.Duration(conf.MaxZombieRounds*utils.StoreHeartBeatReportInterval) * time.Second } -func (conf *hotRegionSchedulerConfig) GetRegionsStatZombieDuration() time.Duration { +func (conf *hotRegionSchedulerConfig) getRegionsStatZombieDuration() time.Duration { conf.RLock() defer conf.RUnlock() return time.Duration(conf.MaxZombieRounds*utils.RegionHeartBeatReportInterval) * time.Second } -func (conf *hotRegionSchedulerConfig) GetMaxPeerNumber() int { +func (conf *hotRegionSchedulerConfig) getMaxPeerNumber() int { conf.RLock() defer conf.RUnlock() return conf.MaxPeerNum } -func (conf *hotRegionSchedulerConfig) GetSrcToleranceRatio() float64 { +func (conf *hotRegionSchedulerConfig) getSrcToleranceRatio() float64 { conf.RLock() defer conf.RUnlock() return conf.SrcToleranceRatio } -func (conf *hotRegionSchedulerConfig) SetSrcToleranceRatio(tol float64) { +func (conf *hotRegionSchedulerConfig) setSrcToleranceRatio(tol float64) { conf.Lock() defer conf.Unlock() conf.SrcToleranceRatio = tol } -func (conf *hotRegionSchedulerConfig) GetDstToleranceRatio() float64 { +func (conf *hotRegionSchedulerConfig) getDstToleranceRatio() float64 { conf.RLock() defer conf.RUnlock() return conf.DstToleranceRatio } -func (conf *hotRegionSchedulerConfig) SetDstToleranceRatio(tol float64) { +func (conf *hotRegionSchedulerConfig) setDstToleranceRatio(tol float64) { conf.Lock() defer conf.Unlock() conf.DstToleranceRatio = tol } -func (conf *hotRegionSchedulerConfig) GetByteRankStepRatio() float64 { +func (conf *hotRegionSchedulerConfig) getByteRankStepRatio() float64 { conf.RLock() defer conf.RUnlock() return conf.ByteRateRankStepRatio } -func (conf *hotRegionSchedulerConfig) GetKeyRankStepRatio() float64 { +func (conf *hotRegionSchedulerConfig) getKeyRankStepRatio() float64 { conf.RLock() defer conf.RUnlock() return conf.KeyRateRankStepRatio } -func (conf *hotRegionSchedulerConfig) GetQueryRateRankStepRatio() float64 { +func (conf *hotRegionSchedulerConfig) getQueryRateRankStepRatio() float64 { conf.RLock() defer conf.RUnlock() return conf.QueryRateRankStepRatio } -func (conf *hotRegionSchedulerConfig) GetCountRankStepRatio() float64 { +func (conf *hotRegionSchedulerConfig) getCountRankStepRatio() float64 { conf.RLock() defer conf.RUnlock() return conf.CountRankStepRatio } -func (conf *hotRegionSchedulerConfig) GetGreatDecRatio() float64 { +func (conf *hotRegionSchedulerConfig) getGreatDecRatio() float64 { conf.RLock() defer conf.RUnlock() return conf.GreatDecRatio } -func (conf *hotRegionSchedulerConfig) SetStrictPickingStore(v bool) { +func (conf *hotRegionSchedulerConfig) setStrictPickingStore(v bool) { conf.RLock() defer conf.RUnlock() conf.StrictPickingStore = v } -func (conf *hotRegionSchedulerConfig) GetMinorDecRatio() float64 { +func (conf *hotRegionSchedulerConfig) getMinorDecRatio() float64 { conf.RLock() defer conf.RUnlock() return conf.MinorDecRatio } -func (conf *hotRegionSchedulerConfig) GetMinHotKeyRate() float64 { +func (conf *hotRegionSchedulerConfig) getMinHotKeyRate() float64 { conf.RLock() defer conf.RUnlock() return conf.MinHotKeyRate } -func (conf *hotRegionSchedulerConfig) GetMinHotByteRate() float64 { +func (conf *hotRegionSchedulerConfig) getMinHotByteRate() float64 { conf.RLock() defer conf.RUnlock() return conf.MinHotByteRate } -func (conf *hotRegionSchedulerConfig) GetEnableForTiFlash() bool { +func (conf *hotRegionSchedulerConfig) getEnableForTiFlash() bool { conf.RLock() defer conf.RUnlock() return conf.EnableForTiFlash } -func (conf *hotRegionSchedulerConfig) SetEnableForTiFlash(enable bool) { +func (conf *hotRegionSchedulerConfig) setEnableForTiFlash(enable bool) { conf.Lock() defer conf.Unlock() conf.EnableForTiFlash = enable } -func (conf *hotRegionSchedulerConfig) GetMinHotQueryRate() float64 { +func (conf *hotRegionSchedulerConfig) getMinHotQueryRate() float64 { conf.RLock() defer conf.RUnlock() return conf.MinHotQueryRate } -func (conf *hotRegionSchedulerConfig) GetReadPriorities() []string { +func (conf *hotRegionSchedulerConfig) getReadPriorities() []string { conf.RLock() defer conf.RUnlock() return conf.ReadPriorities } -func (conf *hotRegionSchedulerConfig) GetWriteLeaderPriorities() []string { +func (conf *hotRegionSchedulerConfig) getWriteLeaderPriorities() []string { conf.RLock() defer conf.RUnlock() return conf.WriteLeaderPriorities } -func (conf *hotRegionSchedulerConfig) GetWritePeerPriorities() []string { +func (conf *hotRegionSchedulerConfig) getWritePeerPriorities() []string { conf.RLock() defer conf.RUnlock() return conf.WritePeerPriorities } -func (conf *hotRegionSchedulerConfig) IsStrictPickingStoreEnabled() bool { +func (conf *hotRegionSchedulerConfig) isStrictPickingStoreEnabled() bool { conf.RLock() defer conf.RUnlock() return conf.StrictPickingStore } -func (conf *hotRegionSchedulerConfig) SetRankFormulaVersion(v string) { +func (conf *hotRegionSchedulerConfig) setRankFormulaVersion(v string) { conf.Lock() defer conf.Unlock() conf.RankFormulaVersion = v } -func (conf *hotRegionSchedulerConfig) GetRankFormulaVersion() string { +func (conf *hotRegionSchedulerConfig) getRankFormulaVersion() string { conf.RLock() defer conf.RUnlock() return conf.getRankFormulaVersionLocked() } -func (conf *hotRegionSchedulerConfig) GetHistorySampleDuration() time.Duration { +func (conf *hotRegionSchedulerConfig) getHistorySampleDuration() time.Duration { conf.RLock() defer conf.RUnlock() return conf.HistorySampleDuration.Duration } -func (conf *hotRegionSchedulerConfig) GetHistorySampleInterval() time.Duration { +func (conf *hotRegionSchedulerConfig) getHistorySampleInterval() time.Duration { conf.RLock() defer conf.RUnlock() return conf.HistorySampleInterval.Duration } -func (conf *hotRegionSchedulerConfig) SetHistorySampleDuration(d time.Duration) { +// nolint: unused, unparam +func (conf *hotRegionSchedulerConfig) setHistorySampleDuration(d time.Duration) { conf.Lock() defer conf.Unlock() conf.HistorySampleDuration = typeutil.NewDuration(d) } -func (conf *hotRegionSchedulerConfig) SetHistorySampleInterval(d time.Duration) { +// nolint: unused +func (conf *hotRegionSchedulerConfig) setHistorySampleInterval(d time.Duration) { conf.Lock() defer conf.Unlock() conf.HistorySampleInterval = typeutil.NewDuration(d) @@ -346,7 +348,7 @@ func (conf *hotRegionSchedulerConfig) getRankFormulaVersionLocked() string { } } -func (conf *hotRegionSchedulerConfig) IsForbidRWType(rw utils.RWType) bool { +func (conf *hotRegionSchedulerConfig) isForbidRWType(rw utils.RWType) bool { conf.RLock() defer conf.RUnlock() return rw.String() == conf.ForbidRWType @@ -367,6 +369,7 @@ func (conf *hotRegionSchedulerConfig) getForbidRWTypeLocked() string { } } +// ServeHTTP implements the http.Handler interface. func (conf *hotRegionSchedulerConfig) ServeHTTP(w http.ResponseWriter, r *http.Request) { router := mux.NewRouter() router.HandleFunc("/list", conf.handleGetConfig).Methods(http.MethodGet) diff --git a/pkg/schedule/schedulers/hot_region_rank_v1.go b/pkg/schedule/schedulers/hot_region_rank_v1.go index ebf6e9bf744..9005dff8861 100644 --- a/pkg/schedule/schedulers/hot_region_rank_v1.go +++ b/pkg/schedule/schedulers/hot_region_rank_v1.go @@ -39,7 +39,7 @@ func (r *rankV1) checkByPriorityAndTolerance(loads []float64, f func(int) bool) switch { case r.resourceTy == writeLeader: return r.checkByPriorityAndToleranceFirstOnly(loads, f) - case r.sche.conf.IsStrictPickingStoreEnabled(): + case r.sche.conf.isStrictPickingStoreEnabled(): return r.checkByPriorityAndToleranceAllOf(loads, f) default: return r.checkByPriorityAndToleranceFirstOnly(loads, f) @@ -50,7 +50,7 @@ func (r *rankV1) checkHistoryLoadsByPriority(loads [][]float64, f func(int) bool switch { case r.resourceTy == writeLeader: return r.checkHistoryLoadsByPriorityAndToleranceFirstOnly(loads, f) - case r.sche.conf.IsStrictPickingStoreEnabled(): + case r.sche.conf.isStrictPickingStoreEnabled(): return r.checkHistoryLoadsByPriorityAndToleranceAllOf(loads, f) default: return r.checkHistoryLoadsByPriorityAndToleranceFirstOnly(loads, f) diff --git a/pkg/schedule/schedulers/hot_region_rank_v2_test.go b/pkg/schedule/schedulers/hot_region_rank_v2_test.go index 0237c2156ec..029d47c3c51 100644 --- a/pkg/schedule/schedulers/hot_region_rank_v2_test.go +++ b/pkg/schedule/schedulers/hot_region_rank_v2_test.go @@ -36,10 +36,10 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re.NoError(err) hb := sche.(*hotScheduler) hb.types = []resourceType{writePeer} - hb.conf.SetDstToleranceRatio(0.0) - hb.conf.SetSrcToleranceRatio(0.0) - hb.conf.SetRankFormulaVersion("v1") - hb.conf.SetHistorySampleDuration(0) + hb.conf.setDstToleranceRatio(0.0) + hb.conf.setSrcToleranceRatio(0.0) + hb.conf.setRankFormulaVersion("v1") + hb.conf.setHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -62,7 +62,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re.Empty(ops) re.False(hb.searchRevertRegions[writePeer]) - hb.conf.SetRankFormulaVersion("v2") + hb.conf.setRankFormulaVersion("v2") // searchRevertRegions becomes true after the first `Schedule`. ops, _ = hb.Schedule(tc, false) re.Empty(ops) @@ -97,10 +97,10 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { re.NoError(err) hb := sche.(*hotScheduler) hb.types = []resourceType{writePeer} - hb.conf.SetDstToleranceRatio(0.0) - hb.conf.SetSrcToleranceRatio(0.0) - hb.conf.SetRankFormulaVersion("v1") - hb.conf.SetHistorySampleDuration(0) + hb.conf.setDstToleranceRatio(0.0) + hb.conf.setSrcToleranceRatio(0.0) + hb.conf.setRankFormulaVersion("v1") + hb.conf.setHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -125,7 +125,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { re.False(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) - hb.conf.SetRankFormulaVersion("v2") + hb.conf.setRankFormulaVersion("v2") // searchRevertRegions becomes true after the first `Schedule`. ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) @@ -149,10 +149,10 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { re.NoError(err) hb := sche.(*hotScheduler) hb.types = []resourceType{writePeer} - hb.conf.SetDstToleranceRatio(0.0) - hb.conf.SetSrcToleranceRatio(0.0) - hb.conf.SetRankFormulaVersion("v1") - hb.conf.SetHistorySampleDuration(0) + hb.conf.setDstToleranceRatio(0.0) + hb.conf.setSrcToleranceRatio(0.0) + hb.conf.setRankFormulaVersion("v1") + hb.conf.setHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -177,7 +177,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { re.False(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) - hb.conf.SetRankFormulaVersion("v2") + hb.conf.setRankFormulaVersion("v2") // searchRevertRegions becomes true after the first `Schedule`. ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) @@ -209,10 +209,10 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) - hb.conf.SetDstToleranceRatio(0.0) - hb.conf.SetSrcToleranceRatio(0.0) - hb.conf.SetRankFormulaVersion("v1") - hb.conf.SetHistorySampleDuration(0) + hb.conf.setDstToleranceRatio(0.0) + hb.conf.setSrcToleranceRatio(0.0) + hb.conf.setRankFormulaVersion("v1") + hb.conf.setHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -235,7 +235,7 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re.Empty(ops) re.False(hb.searchRevertRegions[readLeader]) - hb.conf.SetRankFormulaVersion("v2") + hb.conf.setRankFormulaVersion("v2") // searchRevertRegions becomes true after the first `Schedule`. ops, _ = hb.Schedule(tc, false) re.Empty(ops) @@ -267,11 +267,11 @@ func TestSkipUniformStore(t *testing.T) { defer cancel() hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.SetRankFormulaVersion("v2") + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) + hb.(*hotScheduler).conf.setRankFormulaVersion("v2") hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -422,9 +422,9 @@ func checkHotReadRegionScheduleWithSmallHotRegion(re *require.Assertions, highLo sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) - hb.conf.SetSrcToleranceRatio(1) - hb.conf.SetDstToleranceRatio(1) - hb.conf.SetRankFormulaVersion("v2") + hb.conf.setSrcToleranceRatio(1) + hb.conf.setDstToleranceRatio(1) + hb.conf.setRankFormulaVersion("v2") hb.conf.ReadPriorities = []string{utils.QueryPriority, utils.BytePriority} tc.AddRegionStore(1, 40) tc.AddRegionStore(2, 10) diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 3b563106dc0..fc7a6ae4417 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -84,36 +84,36 @@ func TestUpgrade(t *testing.T) { sche, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(HotRegionType, nil)) re.NoError(err) hb := sche.(*hotScheduler) - re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) - re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetWriteLeaderPriorities()) - re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) - re.Equal("v2", hb.conf.GetRankFormulaVersion()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.getReadPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.getWriteLeaderPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.getWritePeerPriorities()) + re.Equal("v2", hb.conf.getRankFormulaVersion()) // upgrade from json(null) sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb = sche.(*hotScheduler) - re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) - re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetWriteLeaderPriorities()) - re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) - re.Equal("v2", hb.conf.GetRankFormulaVersion()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.getReadPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.getWriteLeaderPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.getWritePeerPriorities()) + re.Equal("v2", hb.conf.getRankFormulaVersion()) // upgrade from < 5.2 config51 := `{"min-hot-byte-rate":100,"min-hot-key-rate":10,"min-hot-query-rate":10,"max-zombie-rounds":5,"max-peer-number":1000,"byte-rate-rank-step-ratio":0.05,"key-rate-rank-step-ratio":0.05,"query-rate-rank-step-ratio":0.05,"count-rank-step-ratio":0.01,"great-dec-ratio":0.95,"minor-dec-ratio":0.99,"src-tolerance-ratio":1.05,"dst-tolerance-ratio":1.05,"strict-picking-store":"true","enable-for-tiflash":"true"}` sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte(config51))) re.NoError(err) hb = sche.(*hotScheduler) - re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetReadPriorities()) - re.Equal([]string{utils.KeyPriority, utils.BytePriority}, hb.conf.GetWriteLeaderPriorities()) - re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) - re.Equal("v1", hb.conf.GetRankFormulaVersion()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.getReadPriorities()) + re.Equal([]string{utils.KeyPriority, utils.BytePriority}, hb.conf.getWriteLeaderPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.getWritePeerPriorities()) + re.Equal("v1", hb.conf.getRankFormulaVersion()) // upgrade from < 6.4 config54 := `{"min-hot-byte-rate":100,"min-hot-key-rate":10,"min-hot-query-rate":10,"max-zombie-rounds":5,"max-peer-number":1000,"byte-rate-rank-step-ratio":0.05,"key-rate-rank-step-ratio":0.05,"query-rate-rank-step-ratio":0.05,"count-rank-step-ratio":0.01,"great-dec-ratio":0.95,"minor-dec-ratio":0.99,"src-tolerance-ratio":1.05,"dst-tolerance-ratio":1.05,"read-priorities":["query","byte"],"write-leader-priorities":["query","byte"],"write-peer-priorities":["byte","key"],"strict-picking-store":"true","enable-for-tiflash":"true","forbid-rw-type":"none"}` sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte(config54))) re.NoError(err) hb = sche.(*hotScheduler) - re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) - re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetWriteLeaderPriorities()) - re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) - re.Equal("v1", hb.conf.GetRankFormulaVersion()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.getReadPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.getWriteLeaderPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.getWritePeerPriorities()) + re.Equal("v1", hb.conf.getRankFormulaVersion()) } func TestGCPendingOpInfos(t *testing.T) { @@ -151,7 +151,7 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { op.Start() op.SetStatusReachTime(operator.CREATED, time.Now().Add(-5*utils.StoreHeartBeatReportInterval*time.Second)) op.SetStatusReachTime(operator.STARTED, time.Now().Add((-5*utils.StoreHeartBeatReportInterval+1)*time.Second)) - return newPendingInfluence(op, []uint64{2}, 4, statistics.Influence{}, hb.conf.GetStoreStatZombieDuration()) + return newPendingInfluence(op, []uint64{2}, 4, statistics.Influence{}, hb.conf.getStoreStatZombieDuration()) } justDoneOpInfluence := func(region *core.RegionInfo, ty opType) *pendingInfluence { infl := notDoneOpInfluence(region, ty) @@ -400,7 +400,7 @@ func checkHotWriteRegionPlacement(re *require.Assertions, enablePlacementRules b hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.AddLabelsStore(1, 2, map[string]string{"zone": "z1", "host": "h1"}) tc.AddLabelsStore(2, 2, map[string]string{"zone": "z1", "host": "h2"}) @@ -456,7 +456,7 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.BytePriority, utils.KeyPriority} // Add stores 1, 2, 3, 4, 5, 6 with region counts 3, 2, 2, 2, 0, 0. @@ -652,7 +652,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb := sche.(*hotScheduler) - hb.conf.SetHistorySampleDuration(0) + hb.conf.setHistorySampleDuration(0) // Add TiKV stores 1, 2, 3, 4, 5, 6, 7 (Down) with region counts 3, 3, 2, 2, 0, 0, 0. // Add TiFlash stores 8, 9, 10 with region counts 2, 1, 1. @@ -734,7 +734,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { } pdServerCfg.FlowRoundByDigit = 3 // Disable for TiFlash - hb.conf.SetEnableForTiFlash(false) + hb.conf.setEnableForTiFlash(false) for i := 0; i < 20; i++ { clearPendingInfluence(hb) ops, _ := hb.Schedule(tc, false) @@ -848,10 +848,10 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.QueryPriority, utils.BytePriority} - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -884,11 +884,11 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.RankFormulaVersion = "v1" - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) @@ -941,8 +941,8 @@ func TestHotWriteRegionScheduleUnhealthyStore(t *testing.T) { defer cancel() hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) @@ -986,8 +986,8 @@ func TestHotWriteRegionScheduleCheckHot(t *testing.T) { defer cancel() hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) @@ -1019,7 +1019,7 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) hb.(*hotScheduler).types = []resourceType{writeLeader} hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) re.NoError(err) tc.AddRegionStore(1, 20) @@ -1085,7 +1085,7 @@ func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.RankFormulaVersion = "v1" - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) @@ -1169,7 +1169,7 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) key, err := hex.DecodeString("") re.NoError(err) @@ -1250,7 +1250,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { re.NoError(err) hb := scheduler.(*hotScheduler) hb.conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} - hb.conf.SetHistorySampleDuration(0) + hb.conf.setHistorySampleDuration(0) // Add stores 1, 2, 3, 4, 5 with region counts 3, 2, 2, 2, 0. tc.AddRegionStore(1, 3) @@ -1370,10 +1370,10 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) { defer cancel() hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) hb.(*hotScheduler).conf.RankFormulaVersion = "v1" - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -1404,10 +1404,10 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.RankFormulaVersion = "v1" - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -1469,7 +1469,7 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim hb.(*hotScheduler).conf.MinorDecRatio = 1 hb.(*hotScheduler).conf.DstToleranceRatio = 1 hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) pendingAmpFactor = 0.0 tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) @@ -1575,9 +1575,9 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) { defer cancel() hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.SetStrictPickingStore(false) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) + hb.(*hotScheduler).conf.setStrictPickingStore(false) hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) @@ -2042,9 +2042,9 @@ func TestInfluenceByRWType(t *testing.T) { hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -2162,9 +2162,9 @@ func TestHotScheduleWithPriority(t *testing.T) { hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} - hb.(*hotScheduler).conf.SetDstToleranceRatio(1.05) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1.05) - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setDstToleranceRatio(1.05) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1.05) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) // skip stddev check stddevThreshold = -1.0 @@ -2207,7 +2207,7 @@ func TestHotScheduleWithPriority(t *testing.T) { addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 2 * units.MiB, 2 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) @@ -2222,7 +2222,7 @@ func TestHotScheduleWithPriority(t *testing.T) { hb.(*hotScheduler).types = []resourceType{writePeer} hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.RankFormulaVersion = "v1" - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) re.NoError(err) // assert loose store picking @@ -2264,8 +2264,8 @@ func TestHotScheduleWithStddev(t *testing.T) { hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} - hb.(*hotScheduler).conf.SetDstToleranceRatio(1.0) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1.0) + hb.(*hotScheduler).conf.setDstToleranceRatio(1.0) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1.0) hb.(*hotScheduler).conf.RankFormulaVersion = "v1" tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) @@ -2274,7 +2274,7 @@ func TestHotScheduleWithStddev(t *testing.T) { tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) hb.(*hotScheduler).conf.StrictPickingStore = false - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) // skip uniform cluster tc.UpdateStorageWrittenStats(1, 5*units.MiB*utils.StoreHeartBeatReportInterval, 5*units.MiB*utils.StoreHeartBeatReportInterval) @@ -2323,9 +2323,9 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writeLeader} - hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - hb.(*hotScheduler).conf.SetHistorySampleDuration(0) + hb.(*hotScheduler).conf.setDstToleranceRatio(1) + hb.(*hotScheduler).conf.setSrcToleranceRatio(1) + hb.(*hotScheduler).conf.setHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) @@ -2533,17 +2533,17 @@ func TestConfigValidation(t *testing.T) { // rank-formula-version // default hc = initHotRegionScheduleConfig() - re.Equal("v2", hc.GetRankFormulaVersion()) + re.Equal("v2", hc.getRankFormulaVersion()) // v1 hc.RankFormulaVersion = "v1" err = hc.validateLocked() re.NoError(err) - re.Equal("v1", hc.GetRankFormulaVersion()) + re.Equal("v1", hc.getRankFormulaVersion()) // v2 hc.RankFormulaVersion = "v2" err = hc.validateLocked() re.NoError(err) - re.Equal("v2", hc.GetRankFormulaVersion()) + re.Equal("v2", hc.getRankFormulaVersion()) // illegal hc.RankFormulaVersion = "v0" err = hc.validateLocked() @@ -2552,20 +2552,20 @@ func TestConfigValidation(t *testing.T) { // forbid-rw-type // default hc = initHotRegionScheduleConfig() - re.False(hc.IsForbidRWType(utils.Read)) - re.False(hc.IsForbidRWType(utils.Write)) + re.False(hc.isForbidRWType(utils.Read)) + re.False(hc.isForbidRWType(utils.Write)) // read hc.ForbidRWType = "read" err = hc.validateLocked() re.NoError(err) - re.True(hc.IsForbidRWType(utils.Read)) - re.False(hc.IsForbidRWType(utils.Write)) + re.True(hc.isForbidRWType(utils.Read)) + re.False(hc.isForbidRWType(utils.Write)) // write hc.ForbidRWType = "write" err = hc.validateLocked() re.NoError(err) - re.False(hc.IsForbidRWType(utils.Read)) - re.True(hc.IsForbidRWType(utils.Write)) + re.False(hc.isForbidRWType(utils.Read)) + re.True(hc.isForbidRWType(utils.Write)) // illegal hc.ForbidRWType = "test" err = hc.validateLocked() diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index 988bbc30475..5990aa2cda3 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -225,7 +225,7 @@ func schedulersRegister() { // For clusters with the initial version >= v5.2, it will be overwritten by the default config. conf.applyPrioritiesConfig(compatiblePrioritiesConfig) // For clusters with the initial version >= v6.4, it will be overwritten by the default config. - conf.SetRankFormulaVersion("") + conf.setRankFormulaVersion("") if err := decoder(conf); err != nil { return nil, err } diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 814f525a76c..f57d82b3149 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -55,10 +55,12 @@ func newLabelScheduler(opController *operator.Controller, conf *labelSchedulerCo } } +// EncodeConfig implements the Scheduler interface. func (s *labelScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// IsScheduleAllowed implements the Scheduler interface. func (s *labelScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { @@ -67,6 +69,7 @@ func (s *labelScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return allowed } +// Schedule implements the Scheduler interface. func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { labelCounter.Inc() stores := cluster.GetStores() diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index 2d425746cea..751ab1eaa9d 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -56,10 +56,12 @@ func newRandomMergeScheduler(opController *operator.Controller, conf *randomMerg } } +// EncodeConfig implements the Scheduler interface. func (s *randomMergeScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// IsScheduleAllowed implements the Scheduler interface. func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpMerge) < cluster.GetSchedulerConfig().GetMergeScheduleLimit() if !allowed { @@ -68,6 +70,7 @@ func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } +// Schedule implements the Scheduler interface. func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { randomMergeCounter.Inc() diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 8874eb19cff..76a47dd973b 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -47,7 +47,7 @@ type scatterRangeSchedulerConfig struct { EndKey string `json:"end-key"` } -func (conf *scatterRangeSchedulerConfig) BuildWithArgs(args []string) error { +func (conf *scatterRangeSchedulerConfig) buildWithArgs(args []string) error { if len(args) != 3 { return errs.ErrSchedulerConfig.FastGenByArgs("ranges and name") } @@ -60,7 +60,7 @@ func (conf *scatterRangeSchedulerConfig) BuildWithArgs(args []string) error { return nil } -func (conf *scatterRangeSchedulerConfig) Clone() *scatterRangeSchedulerConfig { +func (conf *scatterRangeSchedulerConfig) clone() *scatterRangeSchedulerConfig { conf.RLock() defer conf.RUnlock() return &scatterRangeSchedulerConfig{ @@ -70,7 +70,7 @@ func (conf *scatterRangeSchedulerConfig) Clone() *scatterRangeSchedulerConfig { } } -func (conf *scatterRangeSchedulerConfig) Persist() error { +func (conf *scatterRangeSchedulerConfig) persist() error { name := conf.getSchedulerName() conf.RLock() defer conf.RUnlock() @@ -81,19 +81,19 @@ func (conf *scatterRangeSchedulerConfig) Persist() error { return conf.storage.SaveSchedulerConfig(name, data) } -func (conf *scatterRangeSchedulerConfig) GetRangeName() string { +func (conf *scatterRangeSchedulerConfig) getRangeName() string { conf.RLock() defer conf.RUnlock() return conf.RangeName } -func (conf *scatterRangeSchedulerConfig) GetStartKey() []byte { +func (conf *scatterRangeSchedulerConfig) getStartKey() []byte { conf.RLock() defer conf.RUnlock() return []byte(conf.StartKey) } -func (conf *scatterRangeSchedulerConfig) GetEndKey() []byte { +func (conf *scatterRangeSchedulerConfig) getEndKey() []byte { conf.RLock() defer conf.RUnlock() return []byte(conf.EndKey) @@ -139,16 +139,19 @@ func newScatterRangeScheduler(opController *operator.Controller, config *scatter return scheduler } +// ServeHTTP implements the http.Handler interface. func (l *scatterRangeScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { l.handler.ServeHTTP(w, r) } +// EncodeConfig implements the Scheduler interface. func (l *scatterRangeScheduler) EncodeConfig() ([]byte, error) { l.config.RLock() defer l.config.RUnlock() return EncodeConfig(l.config) } +// ReloadConfig implements the Scheduler interface. func (l *scatterRangeScheduler) ReloadConfig() error { l.config.Lock() defer l.config.Unlock() @@ -169,6 +172,7 @@ func (l *scatterRangeScheduler) ReloadConfig() error { return nil } +// IsScheduleAllowed implements the Scheduler interface. func (l *scatterRangeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return l.allowBalanceLeader(cluster) || l.allowBalanceRegion(cluster) } @@ -189,15 +193,16 @@ func (l *scatterRangeScheduler) allowBalanceRegion(cluster sche.SchedulerCluster return allowed } +// Schedule implements the Scheduler interface. func (l *scatterRangeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { scatterRangeCounter.Inc() // isolate a new cluster according to the key range - c := genRangeCluster(cluster, l.config.GetStartKey(), l.config.GetEndKey()) + c := genRangeCluster(cluster, l.config.getStartKey(), l.config.getEndKey()) c.SetTolerantSizeRatio(2) if l.allowBalanceLeader(cluster) { ops, _ := l.balanceLeader.Schedule(c, false) if len(ops) > 0 { - ops[0].SetDesc(fmt.Sprintf("scatter-range-leader-%s", l.config.GetRangeName())) + ops[0].SetDesc(fmt.Sprintf("scatter-range-leader-%s", l.config.getRangeName())) ops[0].AttachKind(operator.OpRange) ops[0].Counters = append(ops[0].Counters, scatterRangeNewOperatorCounter, @@ -209,7 +214,7 @@ func (l *scatterRangeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) if l.allowBalanceRegion(cluster) { ops, _ := l.balanceRegion.Schedule(c, false) if len(ops) > 0 { - ops[0].SetDesc(fmt.Sprintf("scatter-range-region-%s", l.config.GetRangeName())) + ops[0].SetDesc(fmt.Sprintf("scatter-range-region-%s", l.config.getRangeName())) ops[0].AttachKind(operator.OpRange) ops[0].Counters = append(ops[0].Counters, scatterRangeNewOperatorCounter, @@ -227,7 +232,7 @@ type scatterRangeHandler struct { config *scatterRangeSchedulerConfig } -func (handler *scatterRangeHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *scatterRangeHandler) updateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return @@ -235,42 +240,42 @@ func (handler *scatterRangeHandler) UpdateConfig(w http.ResponseWriter, r *http. var args []string name, ok := input["range-name"].(string) if ok { - if name != handler.config.GetRangeName() { + if name != handler.config.getRangeName() { handler.rd.JSON(w, http.StatusInternalServerError, errors.New("Cannot change the range name, please delete this schedule").Error()) return } args = append(args, name) } else { - args = append(args, handler.config.GetRangeName()) + args = append(args, handler.config.getRangeName()) } startKey, ok := input["start-key"].(string) if ok { args = append(args, startKey) } else { - args = append(args, string(handler.config.GetStartKey())) + args = append(args, string(handler.config.getStartKey())) } endKey, ok := input["end-key"].(string) if ok { args = append(args, endKey) } else { - args = append(args, string(handler.config.GetEndKey())) + args = append(args, string(handler.config.getEndKey())) } - err := handler.config.BuildWithArgs(args) + err := handler.config.buildWithArgs(args) if err != nil { handler.rd.JSON(w, http.StatusBadRequest, err.Error()) return } - err = handler.config.Persist() + err = handler.config.persist() if err != nil { handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) } handler.rd.JSON(w, http.StatusOK, nil) } -func (handler *scatterRangeHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { - conf := handler.config.Clone() +func (handler *scatterRangeHandler) listConfig(w http.ResponseWriter, _ *http.Request) { + conf := handler.config.clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -280,7 +285,7 @@ func newScatterRangeHandler(config *scatterRangeSchedulerConfig) http.Handler { rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) return router } diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index 32384a19df1..5bb5d269b63 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -94,14 +94,17 @@ func newShuffleHotRegionScheduler(opController *operator.Controller, conf *shuff return ret } +// ServeHTTP implements the http.Handler interface. func (s *shuffleHotRegionScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.handler.ServeHTTP(w, r) } +// EncodeConfig implements the Scheduler interface. func (s *shuffleHotRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// ReloadConfig implements the Scheduler interface. func (s *shuffleHotRegionScheduler) ReloadConfig() error { s.conf.Lock() defer s.conf.Unlock() @@ -120,6 +123,7 @@ func (s *shuffleHotRegionScheduler) ReloadConfig() error { return nil } +// IsScheduleAllowed implements the Scheduler interface. func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { hotRegionAllowed := s.OpController.OperatorCount(operator.OpHotRegion) < s.conf.getLimit() conf := cluster.GetSchedulerConfig() @@ -137,6 +141,7 @@ func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerClus return hotRegionAllowed && regionAllowed && leaderAllowed } +// Schedule implements the Scheduler interface. func (s *shuffleHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { shuffleHotRegionCounter.Inc() typ := s.randomType() @@ -211,7 +216,7 @@ type shuffleHotRegionHandler struct { config *shuffleHotRegionSchedulerConfig } -func (handler *shuffleHotRegionHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +func (handler *shuffleHotRegionHandler) updateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return @@ -234,7 +239,7 @@ func (handler *shuffleHotRegionHandler) UpdateConfig(w http.ResponseWriter, r *h handler.rd.JSON(w, http.StatusOK, nil) } -func (handler *shuffleHotRegionHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { +func (handler *shuffleHotRegionHandler) listConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -245,7 +250,7 @@ func newShuffleHotRegionHandler(config *shuffleHotRegionSchedulerConfig) http.Ha rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) return router } diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index ce2c8cd31d5..46e04efb23d 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -60,10 +60,12 @@ func newShuffleLeaderScheduler(opController *operator.Controller, conf *shuffleL } } +// EncodeConfig implements the Scheduler interface. func (s *shuffleLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } +// IsScheduleAllowed implements the Scheduler interface. func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { @@ -72,6 +74,7 @@ func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster return allowed } +// Schedule implements the Scheduler interface. func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { // We shuffle leaders between stores by: // 1. random select a valid store. diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index b59e97b2a11..ca759042e8f 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -55,14 +55,17 @@ func newShuffleRegionScheduler(opController *operator.Controller, conf *shuffleR } } +// ServeHTTP implements the http.Handler interface. func (s *shuffleRegionScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.conf.ServeHTTP(w, r) } +// EncodeConfig implements the Scheduler interface. func (s *shuffleRegionScheduler) EncodeConfig() ([]byte, error) { - return s.conf.EncodeConfig() + return s.conf.encodeConfig() } +// ReloadConfig implements the Scheduler interface. func (s *shuffleRegionScheduler) ReloadConfig() error { s.conf.Lock() defer s.conf.Unlock() @@ -82,6 +85,7 @@ func (s *shuffleRegionScheduler) ReloadConfig() error { return nil } +// IsScheduleAllowed implements the Scheduler interface. func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { @@ -90,6 +94,7 @@ func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster return allowed } +// Schedule implements the Scheduler interface. func (s *shuffleRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { shuffleRegionCounter.Inc() region, oldPeer := s.scheduleRemovePeer(cluster) @@ -122,18 +127,18 @@ func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster sche.SchedulerCluste pendingFilter := filter.NewRegionPendingFilter() downFilter := filter.NewRegionDownFilter() replicaFilter := filter.NewRegionReplicatedFilter(cluster) - ranges := s.conf.GetRanges() + ranges := s.conf.getRanges() for _, source := range candidates.Stores { var region *core.RegionInfo - if s.conf.IsRoleAllow(roleFollower) { + if s.conf.isRoleAllow(roleFollower) { region = filter.SelectOneRegion(cluster.RandFollowerRegions(source.GetID(), ranges), nil, pendingFilter, downFilter, replicaFilter) } - if region == nil && s.conf.IsRoleAllow(roleLeader) { + if region == nil && s.conf.isRoleAllow(roleLeader) { region = filter.SelectOneRegion(cluster.RandLeaderRegions(source.GetID(), ranges), nil, pendingFilter, downFilter, replicaFilter) } - if region == nil && s.conf.IsRoleAllow(roleLearner) { + if region == nil && s.conf.isRoleAllow(roleLearner) { region = filter.SelectOneRegion(cluster.RandLearnerRegions(source.GetID(), ranges), nil, pendingFilter, downFilter, replicaFilter) } diff --git a/pkg/schedule/schedulers/shuffle_region_config.go b/pkg/schedule/schedulers/shuffle_region_config.go index bce64f743b8..fbf53cfeb4d 100644 --- a/pkg/schedule/schedulers/shuffle_region_config.go +++ b/pkg/schedule/schedulers/shuffle_region_config.go @@ -43,19 +43,19 @@ type shuffleRegionSchedulerConfig struct { Roles []string `json:"roles"` // can include `leader`, `follower`, `learner`. } -func (conf *shuffleRegionSchedulerConfig) EncodeConfig() ([]byte, error) { +func (conf *shuffleRegionSchedulerConfig) encodeConfig() ([]byte, error) { conf.RLock() defer conf.RUnlock() return EncodeConfig(conf) } -func (conf *shuffleRegionSchedulerConfig) GetRoles() []string { +func (conf *shuffleRegionSchedulerConfig) getRoles() []string { conf.RLock() defer conf.RUnlock() return conf.Roles } -func (conf *shuffleRegionSchedulerConfig) GetRanges() []core.KeyRange { +func (conf *shuffleRegionSchedulerConfig) getRanges() []core.KeyRange { conf.RLock() defer conf.RUnlock() ranges := make([]core.KeyRange, len(conf.Ranges)) @@ -63,12 +63,13 @@ func (conf *shuffleRegionSchedulerConfig) GetRanges() []core.KeyRange { return ranges } -func (conf *shuffleRegionSchedulerConfig) IsRoleAllow(role string) bool { +func (conf *shuffleRegionSchedulerConfig) isRoleAllow(role string) bool { conf.RLock() defer conf.RUnlock() return slice.AnyOf(conf.Roles, func(i int) bool { return conf.Roles[i] == role }) } +// ServeHTTP implements the http.Handler interface. func (conf *shuffleRegionSchedulerConfig) ServeHTTP(w http.ResponseWriter, r *http.Request) { router := mux.NewRouter() router.HandleFunc("/list", conf.handleGetRoles).Methods(http.MethodGet) @@ -79,7 +80,7 @@ func (conf *shuffleRegionSchedulerConfig) ServeHTTP(w http.ResponseWriter, r *ht func (conf *shuffleRegionSchedulerConfig) handleGetRoles(w http.ResponseWriter, _ *http.Request) { rd := render.New(render.Options{IndentJSON: true}) - rd.JSON(w, http.StatusOK, conf.GetRoles()) + rd.JSON(w, http.StatusOK, conf.getRoles()) } func (conf *shuffleRegionSchedulerConfig) handleSetRoles(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 2031e232aee..7f33b996f1c 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -60,7 +60,7 @@ type splitBucketSchedulerConfig struct { SplitLimit uint64 `json:"split-limit"` } -func (conf *splitBucketSchedulerConfig) Clone() *splitBucketSchedulerConfig { +func (conf *splitBucketSchedulerConfig) clone() *splitBucketSchedulerConfig { conf.RLock() defer conf.RUnlock() return &splitBucketSchedulerConfig{ @@ -100,7 +100,7 @@ type splitBucketHandler struct { } func (h *splitBucketHandler) listConfig(w http.ResponseWriter, _ *http.Request) { - conf := h.conf.Clone() + conf := h.conf.clone() h.rd.JSON(w, http.StatusOK, conf) } @@ -213,7 +213,7 @@ type splitBucketPlan struct { // Schedule return operators if some bucket is too hot. func (s *splitBucketScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { splitBucketScheduleCounter.Inc() - conf := s.conf.Clone() + conf := s.conf.clone() plan := &splitBucketPlan{ conf: conf, cluster: cluster, diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 8b6e9c39f1d..c1c59620735 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -54,10 +54,12 @@ func newTransferWitnessLeaderScheduler(opController *operator.Controller) Schedu } } +// IsScheduleAllowed implements the Scheduler interface. func (*transferWitnessLeaderScheduler) IsScheduleAllowed(sche.SchedulerCluster) bool { return true } +// Schedule implements the Scheduler interface. func (s *transferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { transferWitnessLeaderCounter.Inc() return s.scheduleTransferWitnessLeaderBatch(s.GetName(), cluster, transferWitnessLeaderBatchSize), nil diff --git a/pkg/schedule/schedulers/utils.go b/pkg/schedule/schedulers/utils.go index c708541e02e..1e911cf7b06 100644 --- a/pkg/schedule/schedulers/utils.go +++ b/pkg/schedule/schedulers/utils.go @@ -65,24 +65,24 @@ func newSolver(basePlan *plan.BalanceSchedulerPlan, kind constant.ScheduleKind, } } -func (p *solver) GetOpInfluence(storeID uint64) int64 { +func (p *solver) getOpInfluence(storeID uint64) int64 { return p.opInfluence.GetStoreInfluence(storeID).ResourceProperty(p.kind) } -func (p *solver) SourceStoreID() uint64 { +func (p *solver) sourceStoreID() uint64 { return p.Source.GetID() } -func (p *solver) SourceMetricLabel() string { - return strconv.FormatUint(p.SourceStoreID(), 10) +func (p *solver) sourceMetricLabel() string { + return strconv.FormatUint(p.sourceStoreID(), 10) } -func (p *solver) TargetStoreID() uint64 { +func (p *solver) targetStoreID() uint64 { return p.Target.GetID() } -func (p *solver) TargetMetricLabel() string { - return strconv.FormatUint(p.TargetStoreID(), 10) +func (p *solver) targetMetricLabel() string { + return strconv.FormatUint(p.targetStoreID(), 10) } func (p *solver) sourceStoreScore(scheduleName string) float64 { @@ -90,7 +90,7 @@ func (p *solver) sourceStoreScore(scheduleName string) float64 { tolerantResource := p.getTolerantResource() // to avoid schedule too much, if A's core greater than B and C a little // we want that A should be moved out one region not two - influence := p.GetOpInfluence(sourceID) + influence := p.getOpInfluence(sourceID) if influence > 0 { influence = -influence } @@ -121,7 +121,7 @@ func (p *solver) targetStoreScore(scheduleName string) float64 { tolerantResource := p.getTolerantResource() // to avoid schedule call back // A->B, A's influence is negative, so A will be target, C may move region to A - influence := p.GetOpInfluence(targetID) + influence := p.getOpInfluence(targetID) if influence < 0 { influence = -influence } @@ -358,7 +358,7 @@ func newRetryQuota() *retryQuota { } } -func (q *retryQuota) GetLimit(store *core.StoreInfo) int { +func (q *retryQuota) getLimit(store *core.StoreInfo) int { id := store.GetID() if limit, ok := q.limits[id]; ok { return limit @@ -367,19 +367,19 @@ func (q *retryQuota) GetLimit(store *core.StoreInfo) int { return q.initialLimit } -func (q *retryQuota) ResetLimit(store *core.StoreInfo) { +func (q *retryQuota) resetLimit(store *core.StoreInfo) { q.limits[store.GetID()] = q.initialLimit } -func (q *retryQuota) Attenuate(store *core.StoreInfo) { - newLimit := q.GetLimit(store) / q.attenuation +func (q *retryQuota) attenuate(store *core.StoreInfo) { + newLimit := q.getLimit(store) / q.attenuation if newLimit < q.minLimit { newLimit = q.minLimit } q.limits[store.GetID()] = newLimit } -func (q *retryQuota) GC(keepStores []*core.StoreInfo) { +func (q *retryQuota) gc(keepStores []*core.StoreInfo) { set := make(map[uint64]struct{}, len(keepStores)) for _, store := range keepStores { set[store.GetID()] = struct{}{} diff --git a/pkg/schedule/schedulers/utils_test.go b/pkg/schedule/schedulers/utils_test.go index a2f5aa4dad0..deb7c6e1038 100644 --- a/pkg/schedule/schedulers/utils_test.go +++ b/pkg/schedule/schedulers/utils_test.go @@ -30,24 +30,24 @@ func TestRetryQuota(t *testing.T) { store2 := core.NewStoreInfo(&metapb.Store{Id: 2}) keepStores := []*core.StoreInfo{store1} - // test GetLimit - re.Equal(10, q.GetLimit(store1)) + // test getLimit + re.Equal(10, q.getLimit(store1)) - // test Attenuate + // test attenuate for _, expected := range []int{5, 2, 1, 1, 1} { - q.Attenuate(store1) - re.Equal(expected, q.GetLimit(store1)) + q.attenuate(store1) + re.Equal(expected, q.getLimit(store1)) } // test GC - re.Equal(10, q.GetLimit(store2)) - q.Attenuate(store2) - re.Equal(5, q.GetLimit(store2)) - q.GC(keepStores) - re.Equal(1, q.GetLimit(store1)) - re.Equal(10, q.GetLimit(store2)) - - // test ResetLimit - q.ResetLimit(store1) - re.Equal(10, q.GetLimit(store1)) + re.Equal(10, q.getLimit(store2)) + q.attenuate(store2) + re.Equal(5, q.getLimit(store2)) + q.gc(keepStores) + re.Equal(1, q.getLimit(store1)) + re.Equal(10, q.getLimit(store2)) + + // test resetLimit + q.resetLimit(store1) + re.Equal(10, q.getLimit(store1)) } diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 83d4771adc4..49156abc40c 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -44,7 +44,7 @@ const ( EvictLeaderType = "user-evict-leader" noStoreInSchedulerInfo = "No store in user-evict-leader-scheduler-config" - UserEvictLeaderScheduler types.CheckerSchedulerType = "user-evict-leader-scheduler" + userEvictLeaderScheduler types.CheckerSchedulerType = "user-evict-leader-scheduler" ) func init() { @@ -158,7 +158,7 @@ type evictLeaderScheduler struct { // newEvictLeaderScheduler creates an admin scheduler that transfers all leaders // out of a store. func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) schedulers.Scheduler { - base := schedulers.NewBaseScheduler(opController, UserEvictLeaderScheduler) + base := schedulers.NewBaseScheduler(opController, userEvictLeaderScheduler) handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ BaseScheduler: base, @@ -172,6 +172,7 @@ func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } +// EncodeConfig implements the Scheduler interface. func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() @@ -244,8 +245,8 @@ type evictLeaderHandler struct { config *evictLeaderSchedulerConfig } -// UpdateConfig updates the config. -func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) { +// updateConfig updates the config. +func (handler *evictLeaderHandler) updateConfig(w http.ResponseWriter, r *http.Request) { var input map[string]any if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return @@ -285,14 +286,12 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusOK, nil) } -// ListConfig lists the config. -func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { +func (handler *evictLeaderHandler) listConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } -// DeleteConfig deletes the config. -func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictLeaderHandler) deleteConfig(w http.ResponseWriter, r *http.Request) { idStr := mux.Vars(r)["store_id"] id, err := strconv.ParseUint(idStr, 10, 64) if err != nil { @@ -331,9 +330,9 @@ func newEvictLeaderHandler(config *evictLeaderSchedulerConfig) http.Handler { rd: render.New(render.Options{IndentJSON: true}), } router := mux.NewRouter() - router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost) - router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet) - router.HandleFunc("/delete/{store_id}", h.DeleteConfig).Methods(http.MethodDelete) + router.HandleFunc("/config", h.updateConfig).Methods(http.MethodPost) + router.HandleFunc("/list", h.listConfig).Methods(http.MethodGet) + router.HandleFunc("/delete/{store_id}", h.deleteConfig).Methods(http.MethodDelete) return router } diff --git a/server/forward.go b/server/forward.go index 650833e1fc1..5c49b871020 100644 --- a/server/forward.go +++ b/server/forward.go @@ -122,7 +122,7 @@ func (s *GrpcServer) forwardTSO(stream pdpb.PD_TsoServer) error { default: } - request, err := server.Recv(s.GetTSOProxyRecvFromClientTimeout()) + request, err := server.recv(s.GetTSOProxyRecvFromClientTimeout()) if err == io.EOF { return nil } @@ -189,7 +189,7 @@ func (s *GrpcServer) forwardTSO(stream pdpb.PD_TsoServer) error { Count: tsopbResp.GetCount(), Timestamp: tsopbResp.GetTimestamp(), } - if err := server.Send(response); err != nil { + if err := server.send(response); err != nil { return errors.WithStack(err) } } diff --git a/server/grpc_service.go b/server/grpc_service.go index 7b18be47fde..fa9156e884e 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -112,7 +112,7 @@ type pdpbTSORequest struct { err error } -func (s *tsoServer) Send(m *pdpb.TsoResponse) error { +func (s *tsoServer) send(m *pdpb.TsoResponse) error { if atomic.LoadInt32(&s.closed) == 1 { return io.EOF } @@ -139,7 +139,7 @@ func (s *tsoServer) Send(m *pdpb.TsoResponse) error { } } -func (s *tsoServer) Recv(timeout time.Duration) (*pdpb.TsoRequest, error) { +func (s *tsoServer) recv(timeout time.Duration) (*pdpb.TsoRequest, error) { if atomic.LoadInt32(&s.closed) == 1 { return nil, io.EOF } @@ -176,6 +176,7 @@ type heartbeatServer struct { closed int32 } +// Send wraps Send() of PD_RegionHeartbeatServer. func (s *heartbeatServer) Send(m core.RegionHeartbeatResponse) error { if atomic.LoadInt32(&s.closed) == 1 { return io.EOF @@ -199,6 +200,7 @@ func (s *heartbeatServer) Send(m core.RegionHeartbeatResponse) error { } } +// Recv wraps Recv() of PD_RegionHeartbeatServer. func (s *heartbeatServer) Recv() (*pdpb.RegionHeartbeatRequest, error) { if atomic.LoadInt32(&s.closed) == 1 { return nil, io.EOF diff --git a/server/handler.go b/server/handler.go index d36dd6656ae..34a78a93c3c 100644 --- a/server/handler.go +++ b/server/handler.go @@ -53,6 +53,7 @@ type server struct { *Server } +// GetCoordinator returns the coordinator. func (s *server) GetCoordinator() *schedule.Coordinator { c := s.GetRaftCluster() if c == nil { @@ -61,6 +62,7 @@ func (s *server) GetCoordinator() *schedule.Coordinator { return c.GetCoordinator() } +// GetCluster returns RaftCluster. func (s *server) GetCluster() sche.SchedulerCluster { return s.GetRaftCluster() }