Skip to content

Commit

Permalink
Merge pull request #102 from ThomasObenaus/f/98_disable_watcher_in_dr…
Browse files Browse the repository at this point in the history
…y_run

Disable watcher in dry run
  • Loading branch information
ThomasObenaus authored Oct 10, 2019
2 parents 33b674d + 90ca036 commit 5842316
Show file tree
Hide file tree
Showing 21 changed files with 505 additions and 152 deletions.
8 changes: 5 additions & 3 deletions capacityPlanner/capacityPlannerImpl.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,19 @@ func (cp *CapacityPlanner) Plan(scaleFactor float32, currentScale uint) uint {

// IsCoolingDown returns true if the CapacityPlanner thinks that currently a new scaling
// would not be a good idea.
func (cp *CapacityPlanner) IsCoolingDown(timeOfLastScale time.Time, scaleDown bool) bool {
func (cp *CapacityPlanner) IsCoolingDown(timeOfLastScale time.Time, scaleDown bool) (cooldownActive bool, cooldownTimeLeft time.Duration) {
now := time.Now()

dur := cp.upScaleCooldownPeriod
if scaleDown {
dur = cp.downScaleCooldownPeriod
}

// still cooling down
if timeOfLastScale.Add(dur).After(now) {
return true
return true, timeOfLastScale.Add(dur).Sub(now)
}

return false
// not cooling down any more
return false, time.Second * 0
}
19 changes: 13 additions & 6 deletions capacityPlanner/capacityPlanner_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package capacityPlanner

import (
"fmt"
"testing"
"time"

Expand Down Expand Up @@ -101,27 +102,33 @@ func Test_IsCoolingDown(t *testing.T) {
require.NotNil(t, capa)

lastScale := time.Now()
result := capa.IsCoolingDown(lastScale, false)
result, timeLeft := capa.IsCoolingDown(lastScale, false)
assert.True(t, result)
assert.InEpsilon(t, time.Second*10, timeLeft, 0.1, fmt.Sprintf("left %s", timeLeft.String()))

result = capa.IsCoolingDown(lastScale, true)
result, timeLeft = capa.IsCoolingDown(lastScale, true)
assert.True(t, result)
assert.InEpsilon(t, time.Second*20, timeLeft, 0.1, fmt.Sprintf("left %s", timeLeft.String()))

// Upscaling
lastScale = time.Now().Add(time.Second * -11)
result = capa.IsCoolingDown(lastScale, false)
result, timeLeft = capa.IsCoolingDown(lastScale, false)
assert.False(t, result)
assert.Equal(t, time.Second*0, timeLeft, fmt.Sprintf("left %s", timeLeft.String()))

lastScale = time.Now().Add(time.Second * -9)
result = capa.IsCoolingDown(lastScale, false)
result, timeLeft = capa.IsCoolingDown(lastScale, false)
assert.True(t, result)
assert.InEpsilon(t, time.Second*1, timeLeft, 0.1, fmt.Sprintf("left %s", timeLeft.String()))

// Downscaling
lastScale = time.Now().Add(time.Second * -21)
result = capa.IsCoolingDown(lastScale, true)
result, timeLeft = capa.IsCoolingDown(lastScale, true)
assert.False(t, result)
assert.Equal(t, time.Second*0, timeLeft, fmt.Sprintf("left %s", timeLeft.String()))

lastScale = time.Now().Add(time.Second * -19)
result = capa.IsCoolingDown(lastScale, true)
result, timeLeft = capa.IsCoolingDown(lastScale, true)
assert.True(t, result)
assert.InEpsilon(t, time.Second*1, timeLeft, 0.1, fmt.Sprintf("left %s", timeLeft.String()))
}
10 changes: 10 additions & 0 deletions doc/DryRunMode.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Dry Run Mode

The following table shows how sokar behaves in case the dry run mode is activated.

| Feature | Dry Run Mode Active | Dry Run Mode Deactivated |
| :-------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------- | :----------------------- |
| Automatic Scaling | Deactivated | Active |
| Manual Scaling | Possible | Not Possible |
| ScaleObjectWatcher | Deactivated | Active |
| PlanedButSkippedScalingOpen<br>_(The metric `sokar_sca_planned_but_skipped_scaling_open`,<br>for more information see [Metrics.md](../Metrics.md))_ | Set to 1 if a scaling was skipped<br>Set to 0 after manual scaling | Stays 0 |
5 changes: 3 additions & 2 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func main() {
logger.Info().Msgf("Scaling Target: %s", scalingTarget.String())

logger.Info().Msg("5. Setup: Scaler")
scaler := helper.Must(setupScaler(cfg.ScaleObject.Name, cfg.ScaleObject.MinCount, cfg.ScaleObject.MaxCount, cfg.Scaler.WatcherInterval, scalingTarget, loggingFactory)).(*scaler.Scaler)
scaler := helper.Must(setupScaler(cfg.ScaleObject.Name, cfg.ScaleObject.MinCount, cfg.ScaleObject.MaxCount, cfg.Scaler.WatcherInterval, scalingTarget, loggingFactory, cfg.DryRunMode)).(*scaler.Scaler)

logger.Info().Msg("6. Setup: CapacityPlanner")

Expand Down Expand Up @@ -259,7 +259,7 @@ func setupScalingTarget(cfg config.Scaler, logF logging.LoggerFactory) (scaler.S
}

// setupScaler creates and configures the Scaler. Internally nomad is used as scaling target.
func setupScaler(scalingObjName string, min uint, max uint, watcherInterval time.Duration, scalingTarget scaler.ScalingTarget, logF logging.LoggerFactory) (*scaler.Scaler, error) {
func setupScaler(scalingObjName string, min uint, max uint, watcherInterval time.Duration, scalingTarget scaler.ScalingTarget, logF logging.LoggerFactory, dryRunMode bool) (*scaler.Scaler, error) {

if logF == nil {
return nil, fmt.Errorf("Logging factory is nil")
Expand All @@ -276,6 +276,7 @@ func setupScaler(scalingObjName string, min uint, max uint, watcherInterval time
scaler.NewMetrics(),
scaler.WithLogger(logF.NewNamedLogger("sokar.scaler")),
scaler.WatcherInterval(watcherInterval),
scaler.DryRunMode(dryRunMode),
)
if err != nil {
return nil, fmt.Errorf("Failed setting up scaler: %s", err)
Expand Down
8 changes: 4 additions & 4 deletions main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,16 @@ func Test_SetupScaler_Failures(t *testing.T) {
logF := mock_logging.NewMockLoggerFactory(mockCtrl)

// no logging factory
scaler, err := setupScaler("any", 0, 1, time.Second*1, nil, nil)
scaler, err := setupScaler("any", 0, 1, time.Second*1, nil, nil, false)
assert.Error(t, err)
assert.Nil(t, scaler)

scaler, err = setupScaler("any", 0, 1, time.Second*1, nil, logF)
scaler, err = setupScaler("any", 0, 1, time.Second*1, nil, logF, false)
assert.Error(t, err)
assert.Nil(t, scaler)

// invalid watcher-interval
scaler, err = setupScaler("any", 0, 1, time.Second*0, nil, nil)
scaler, err = setupScaler("any", 0, 1, time.Second*0, nil, nil, false)
assert.Error(t, err)
assert.Nil(t, scaler)
}
Expand All @@ -75,7 +75,7 @@ func Test_SetupScaler(t *testing.T) {
scalingTarget := mock_scaler.NewMockScalingTarget(mockCtrl)
logF.EXPECT().NewNamedLogger(gomock.Any()).Times(1)

scaler, err := setupScaler("any", 0, 1, time.Second*1, scalingTarget, logF)
scaler, err := setupScaler("any", 0, 1, time.Second*1, scalingTarget, logF, false)
assert.NoError(t, err)
assert.NotNil(t, scaler)
}
Expand Down
59 changes: 34 additions & 25 deletions scaler/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package scaler

import (
"fmt"
"time"

"github.com/thomasobenaus/sokar/helper"
)
Expand Down Expand Up @@ -69,27 +70,14 @@ func checkScalingPolicy(desiredCount uint, min uint, max uint) policyCheckResult
return result
}

// trueIfNil returns a scaleResult filled in with an appropriate error message in case the given scaler is nil
func trueIfNil(s *Scaler) (result scaleResult, ok bool) {
ok = false
result = scaleResult{state: scaleUnknown}

if s == nil {
ok = true
result = scaleResult{
state: scaleFailed,
stateDescription: "Scaler is nil",
newCount: 0,
}
}
return result, ok
}

// scale scales the scalingObject from currentCount to desiredCount.
// Internally it is checked if a scaling is needed and if the scaling policy is valid.
func (s *Scaler) scale(desiredCount uint, currentCount uint, dryRun bool) scaleResult {
if r, ok := trueIfNil(s); ok {
return r
// If the force flag is true then even in dry-run mode the scaling will be applied.
func (s *Scaler) scale(desiredCount uint, currentCount uint, force bool) scaleResult {

// memorize the time the scaling started
if isScalePermitted(s.dryRunMode, force) {
s.lastScaleAction = time.Now()
}

sObjName := s.scalingObject.Name
Expand All @@ -103,13 +91,15 @@ func (s *Scaler) scale(desiredCount uint, currentCount uint, dryRun bool) scaleR
return scaleResult{
state: scaleFailed,
stateDescription: fmt.Sprintf("Error obtaining if scalingObject is dead: %s.", err.Error()),
newCount: currentCount,
}
}

if dead {
return scaleResult{
state: scaleIgnored,
stateDescription: fmt.Sprintf("ScalingObject '%s' is dead. Can't scale", sObjName),
newCount: currentCount,
}
}

Expand All @@ -136,10 +126,26 @@ func (s *Scaler) scale(desiredCount uint, currentCount uint, dryRun bool) scaleR
}
}

return s.executeScale(currentCount, newCount, force)
}

// executeScale just executes the wanted scale, even if it is not needed in case currentCount == newCount.
// The only thing that is checked is if the scaler is in dry run mode or not.
// In dry-run mode the scaling is not applied by actually scaling the scalingObject, only a metric is
// updated, that reflects the fact that a scaling was skipped/ ignored.
// Only the force flag can be used to override this behavior. If the force flag is true then even in
// dry-run mode the scaling will be applied.
func (s *Scaler) executeScale(currentCount, newCount uint, force bool) scaleResult {
sObjName := s.scalingObject.Name
min := s.scalingObject.MinCount
max := s.scalingObject.MaxCount

diff := helper.SubUint(newCount, currentCount)
scaleTypeStr := amountToScaleType(diff)

if dryRun {
s.logger.Info().Str("scalingObject", sObjName).Msgf("Skip scale %s by %d to %d (DryRun).", scaleTypeStr, diff, newCount)
// the force flag can overrule the dry run mode
if !isScalePermitted(s.dryRunMode, force) {
s.logger.Info().Str("scalingObject", sObjName).Msgf("Skip scale %s by %d to %d (DryRun, force=%v).", scaleTypeStr, diff, newCount, force)
s.metrics.plannedButSkippedScalingOpen.WithLabelValues(scaleTypeStr).Set(1)

return scaleResult{
Expand All @@ -149,16 +155,15 @@ func (s *Scaler) scale(desiredCount uint, currentCount uint, dryRun bool) scaleR
}
}

s.logger.Info().Str("scalingObject", sObjName).Msgf("Scale %s by %d to %d.", scaleTypeStr, diff, newCount)
s.logger.Info().Str("scalingObject", sObjName).Msgf("Scale %s by %d to %d (force=%v).", scaleTypeStr, diff, newCount, force)
s.metrics.plannedButSkippedScalingOpen.WithLabelValues(scaleTypeStr).Set(0)

// Set the new scalingObject count
s.desiredScale.setValue(newCount)
err = s.scalingTarget.AdjustScalingObjectCount(s.scalingObject.Name, s.scalingObject.MinCount, s.scalingObject.MaxCount, currentCount, newCount)
err := s.scalingTarget.AdjustScalingObjectCount(sObjName, min, max, currentCount, newCount)
if err != nil {
return scaleResult{
state: scaleFailed,
stateDescription: fmt.Sprintf("Error adjusting scalingObject count to %d: %s.", newCount, err.Error()),
newCount: currentCount,
}
}

Expand All @@ -168,3 +173,7 @@ func (s *Scaler) scale(desiredCount uint, currentCount uint, dryRun bool) scaleR
newCount: newCount,
}
}

func isScalePermitted(dryRun, force bool) bool {
return !dryRun || force
}
Loading

0 comments on commit 5842316

Please sign in to comment.