diff --git a/server/api/store.go b/server/api/store.go index 1d0da0e9825..ba8c71c1020 100644 --- a/server/api/store.go +++ b/server/api/store.go @@ -305,6 +305,10 @@ func (h *storeHandler) SetStoreWeight(w http.ResponseWriter, r *http.Request) { // @Router /store/{id}/limit [post] func (h *storeHandler) SetStoreLimit(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) + if version := rc.GetScheduleConfig().StoreLimitVersion; version != storelimit.VersionV1 { + h.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("current store limit version:%s not support set limit", version)) + return + } vars := mux.Vars(r) storeID, errParse := apiutil.ParseUint64VarsField(vars, "id") if errParse != nil { @@ -405,6 +409,11 @@ func (h *storesHandler) RemoveTombStone(w http.ResponseWriter, r *http.Request) // @Failure 500 {string} string "PD server failed to proceed the request." // @Router /stores/limit [post] func (h *storesHandler) SetAllStoresLimit(w http.ResponseWriter, r *http.Request) { + cfg := h.GetScheduleConfig() + if version := cfg.StoreLimitVersion; version != storelimit.VersionV1 { + h.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("current store limit version:%s not support get limit", version)) + return + } var input map[string]any if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &input); err != nil { return @@ -485,7 +494,12 @@ func (h *storesHandler) SetAllStoresLimit(w http.ResponseWriter, r *http.Request // @Failure 500 {string} string "PD server failed to proceed the request." // @Router /stores/limit [get] func (h *storesHandler) GetAllStoresLimit(w http.ResponseWriter, r *http.Request) { - limits := h.GetScheduleConfig().StoreLimit + cfg := h.GetScheduleConfig() + if version := cfg.StoreLimitVersion; version != storelimit.VersionV1 { + h.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("current store limit version:%s not support get limit", version)) + return + } + limits := cfg.StoreLimit includeTombstone := false var err error if includeStr := r.URL.Query().Get("include_tombstone"); includeStr != "" { diff --git a/tools/pd-ctl/tests/store/store_test.go b/tools/pd-ctl/tests/store/store_test.go index afb97401168..b5cff2e8e5c 100644 --- a/tools/pd-ctl/tests/store/store_test.go +++ b/tools/pd-ctl/tests/store/store_test.go @@ -38,6 +38,38 @@ import ( "go.etcd.io/etcd/pkg/transport" ) +func TestStoreLimitV2(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := pdTests.NewTestCluster(ctx, 1) + re.NoError(err) + err = cluster.RunInitialServers() + re.NoError(err) + cluster.WaitLeader() + pdAddr := cluster.GetConfig().GetClientURL() + cmd := ctl.GetRootCmd() + + leaderServer := cluster.GetLeaderServer() + re.NoError(leaderServer.BootstrapCluster()) + defer cluster.Destroy() + + // store command + args := []string{"-u", pdAddr, "config", "set", "store-limit-version", "v2"} + _, err = tests.ExecuteCommand(cmd, args...) + re.NoError(err) + + args = []string{"-u", pdAddr, "store", "limit"} + output, err := tests.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Contains(string(output), "not support get limit") + + args = []string{"-u", pdAddr, "store", "limit", "1", "10"} + output, err = tests.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Contains(string(output), "not support set limit") +} + func TestStore(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) diff --git a/tools/pd-simulator/README.md b/tools/pd-simulator/README.md index c47024fc24b..107f6c40f64 100644 --- a/tools/pd-simulator/README.md +++ b/tools/pd-simulator/README.md @@ -43,3 +43,14 @@ Run a specific case with an external PD: ```shell ./pd-simulator -pd="http://127.0.0.1:2379" -case="casename" ``` + +Run with tiup playgroudn : +```shell +tiup playground nightly --host 127.0.0.1 --kv.binpath ./pd-simulator --kv=1 --db=0 --kv.config=./tikv.conf +``` +tikv conf +``` +case-name="redundant-balance-region" +sim-tick-interval="1s" +store-io-per-second=100 +``` \ No newline at end of file diff --git a/tools/pd-simulator/simulator/cases/cases.go b/tools/pd-simulator/simulator/cases/cases.go index 238b54c935a..0ddd66608b1 100644 --- a/tools/pd-simulator/simulator/cases/cases.go +++ b/tools/pd-simulator/simulator/cases/cases.go @@ -89,6 +89,7 @@ var IDAllocator idAllocator var CaseMap = map[string]func(*config.SimConfig) *Case{ "balance-leader": newBalanceLeader, "redundant-balance-region": newRedundantBalanceRegion, + "scale-in-out": newScaleInOut, "region-split": newRegionSplit, "region-merge": newRegionMerge, "hot-read": newHotRead, diff --git a/tools/pd-simulator/simulator/cases/scale_tikv.go b/tools/pd-simulator/simulator/cases/scale_tikv.go new file mode 100644 index 00000000000..96d44513ae7 --- /dev/null +++ b/tools/pd-simulator/simulator/cases/scale_tikv.go @@ -0,0 +1,83 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cases + +import ( + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/tikv/pd/pkg/core" + sc "github.com/tikv/pd/tools/pd-simulator/simulator/config" + "github.com/tikv/pd/tools/pd-simulator/simulator/info" + "github.com/tikv/pd/tools/pd-simulator/simulator/simutil" +) + +func newScaleInOut(config *sc.SimConfig) *Case { + var simCase Case + + totalStore := config.TotalStore + totalRegion := config.TotalRegion + replica := int(config.ServerConfig.Replication.MaxReplicas) + if totalStore == 0 || totalRegion == 0 { + totalStore, totalRegion = 6, 4000 + } + + for i := 0; i < totalStore; i++ { + s := &Store{ + ID: IDAllocator.nextID(), + Status: metapb.StoreState_Up, + } + if i%2 == 1 { + s.HasExtraUsedSpace = true + } + simCase.Stores = append(simCase.Stores, s) + } + + for i := 0; i < totalRegion; i++ { + peers := make([]*metapb.Peer, 0, replica) + for j := 0; j < replica; j++ { + peers = append(peers, &metapb.Peer{ + Id: simutil.IDAllocator.NextID(), + StoreId: uint64((i+j)%totalStore + 1), + }) + } + simCase.Regions = append(simCase.Regions, Region{ + ID: IDAllocator.nextID(), + Peers: peers, + Leader: peers[0], + }) + } + + scaleInTick := int64(totalRegion * 3 / totalStore) + addEvent := &AddNodesDescriptor{} + addEvent.Step = func(tick int64) uint64 { + if tick == scaleInTick { + return uint64(totalStore + 1) + } + return 0 + } + + removeEvent := &DeleteNodesDescriptor{} + removeEvent.Step = func(tick int64) uint64 { + if tick == scaleInTick*2 { + return uint64(totalStore + 1) + } + return 0 + } + simCase.Events = []EventDescriptor{addEvent, removeEvent} + + simCase.Checker = func([]*metapb.Store, *core.RegionsInfo, []info.StoreStats) bool { + return false + } + return &simCase +}