diff --git a/qa/workunits/mon/pg_autoscaler.sh b/qa/workunits/mon/pg_autoscaler.sh index bc5003f4c4e0c..4cf71a31cf4c9 100755 --- a/qa/workunits/mon/pg_autoscaler.sh +++ b/qa/workunits/mon/pg_autoscaler.sh @@ -137,10 +137,20 @@ ceph osd pool set bulk0 target_size_bytes 1000 ceph osd pool set meta0 target_size_ratio 1 wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO" +# test autoscale warn + +ceph osd pool create warn0 1 --autoscale-mode=warn +wait_for 120 "ceph health detail | grep POOL_TOO_FEW_PGS" + +ceph osd pool create warn1 256 --autoscale-mode=warn +wait_for 120 "ceph health detail | grep POOL_TOO_MANY_PGS" + ceph osd pool rm meta0 meta0 --yes-i-really-really-mean-it ceph osd pool rm bulk0 bulk0 --yes-i-really-really-mean-it ceph osd pool rm bulk1 bulk1 --yes-i-really-really-mean-it ceph osd pool rm bulk2 bulk2 --yes-i-really-really-mean-it +ceph osd pool rm warn0 warn0 --yes-i-really-really-mean-it +ceph osd pool rm warn1 warn1 --yes-i-really-really-mean-it echo OK diff --git a/src/pybind/mgr/pg_autoscaler/module.py b/src/pybind/mgr/pg_autoscaler/module.py index b7a95fc7b97b6..13e1cff9e8838 100644 --- a/src/pybind/mgr/pg_autoscaler/module.py +++ b/src/pybind/mgr/pg_autoscaler/module.py @@ -719,8 +719,6 @@ def _maybe_adjust(self) -> None: if p['target_bytes'] > 0: total_target_bytes[p['crush_root_id']] += p['target_bytes'] * p['raw_used_rate'] target_bytes_pools[p['crush_root_id']].append(p['pool_name']) - if not p['would_adjust']: - continue if p['pg_autoscale_mode'] == 'warn': msg = 'Pool %s has %d placement groups, should have %d' % ( p['pool_name'], @@ -730,7 +728,8 @@ def _maybe_adjust(self) -> None: too_few.append(msg) else: too_many.append(msg) - + if not p['would_adjust']: + continue if p['pg_autoscale_mode'] == 'on': # Note that setting pg_num actually sets pg_num_target (see # OSDMonitor.cc)