Skip to content

Commit

Permalink
merge master to 2.0 to release rc5 (#1023)
Browse files Browse the repository at this point in the history
  • Loading branch information
nolouch authored and siddontang committed Apr 16, 2018
1 parent 48b7903 commit 9b824d2
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 40 deletions.
24 changes: 24 additions & 0 deletions server/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ package server

import (
. "github.com/pingcap/check"
"github.com/pingcap/pd/server/core"
)

var _ = Suite(&testConfigSuite{})
Expand All @@ -33,3 +34,26 @@ func (s *testConfigSuite) TestBadFormatJoinAddr(c *C) {
cfg.Join = "127.0.0.1:2379" // Wrong join addr without scheme.
c.Assert(cfg.adjust(), NotNil)
}

func (s *testConfigSuite) TestReloadConfig(c *C) {
_, opt := newTestScheduleConfig()
kv := core.NewKV(core.NewMemoryKV())
scheduleCfg := opt.load()
scheduleCfg.MaxSnapshotCount = 10
opt.SetMaxReplicas(5)
opt.persist(kv)

// suppose we add a new default enable scheduler "adjacent-region"
defaultSchedulers := []string{"balance-region", "balance-leader", "hot-region", "label", "adjacent-region"}
_, newOpt := newTestScheduleConfig()
newOpt.AddSchedulerCfg("adjacent-region", []string{})
newOpt.reload(kv)
schedulers := newOpt.GetSchedulers()
c.Assert(schedulers, HasLen, 5)
for i, s := range schedulers {
c.Assert(s.Type, Equals, defaultSchedulers[i])
c.Assert(s.Disable, IsFalse)
}
c.Assert(newOpt.GetMaxReplicas("default"), Equals, 5)
c.Assert(newOpt.GetMaxSnapshotCount(), Equals, uint64(10))
}
19 changes: 0 additions & 19 deletions server/coordinator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -505,25 +505,6 @@ func (s *testCoordinatorSuite) TestPersistScheduler(c *C) {
c.Assert(co.schedulers, HasLen, 3)
}

func (s *testCoordinatorSuite) TestAddDefaultScheduler(c *C) {
_, opt := newTestScheduleConfig()
kv := core.NewKV(core.NewMemoryKV())
opt.persist(kv)

// suppose we add a new default enable scheduler "adjacent-region"
defaultSchedulers := []string{"balance-region", "balance-leader", "hot-region", "label", "adjacent-region"}
_, newOpt := newTestScheduleConfig()
newOpt.AddSchedulerCfg("adjacent-region", []string{})
newOpt.reload(kv)
schedulers := newOpt.GetSchedulers()
c.Assert(schedulers, HasLen, 5)
for i, s := range schedulers {
c.Assert(s.Type, Equals, defaultSchedulers[i])
c.Assert(s.Disable, IsFalse)
}

}

func (s *testCoordinatorSuite) TestRestart(c *C) {
// Turn off balance, we test add replica only.
cfg, opt := newTestScheduleConfig()
Expand Down
7 changes: 4 additions & 3 deletions server/option.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,8 +231,9 @@ func (o *scheduleOption) reload(kv *core.KV) error {
if err != nil {
return errors.Trace(err)
}
o.adjustScheduleCfg(cfg)
if isExist {
o.store(o.checkoutScheduleCfg(cfg))
o.store(&cfg.Schedule)
o.rep.store(&cfg.Replication)
for name, nsCfg := range cfg.Namespace {
nsCfg := nsCfg
Expand All @@ -243,7 +244,7 @@ func (o *scheduleOption) reload(kv *core.KV) error {
return nil
}

func (o *scheduleOption) checkoutScheduleCfg(persistentCfg *Config) *ScheduleConfig {
func (o *scheduleOption) adjustScheduleCfg(persistentCfg *Config) {
scheduleCfg := *o.load()
for i, s := range scheduleCfg.Schedulers {
for _, ps := range persistentCfg.Schedule.Schedulers {
Expand All @@ -267,7 +268,7 @@ func (o *scheduleOption) checkoutScheduleCfg(persistentCfg *Config) *ScheduleCon
}
}
scheduleCfg.Schedulers = append(scheduleCfg.Schedulers, restoredSchedulers...)
return &scheduleCfg
persistentCfg.Schedule.Schedulers = scheduleCfg.Schedulers
}

func (o *scheduleOption) GetHotRegionLowThreshold() int {
Expand Down
20 changes: 2 additions & 18 deletions server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ func (s *Server) startServer() error {

func (s *Server) initClusterID() error {
// Get any cluster key to parse the cluster ID.
resp, err := kvGet(s.client, pdRootPath, clientv3.WithFirstCreate()...)
resp, err := kvGet(s.client, pdClusterIDPath)
if err != nil {
return errors.Trace(err)
}
Expand All @@ -229,23 +229,7 @@ func (s *Server) initClusterID() error {
s.clusterID, err = initOrGetClusterID(s.client, pdClusterIDPath)
return errors.Trace(err)
}

key := string(resp.Kvs[0].Key)

// If the key is "pdClusterIDPath", parse the cluster ID from it.
if key == pdClusterIDPath {
s.clusterID, err = bytesToUint64(resp.Kvs[0].Value)
return errors.Trace(err)
}

// Parse the cluster ID from any other keys for compatibility.
elems := strings.Split(key, "/")
if len(elems) < 3 {
return errors.Errorf("invalid cluster key %v", key)
}
s.clusterID, err = strconv.ParseUint(elems[2], 10, 64)

log.Infof("init and load cluster id: %d", s.clusterID)
s.clusterID, err = bytesToUint64(resp.Kvs[0].Value)
return errors.Trace(err)
}

Expand Down

0 comments on commit 9b824d2

Please sign in to comment.