Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

backend: provide per-arch & per-owner worker limit #2909

Merged
merged 2 commits into from
Sep 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions backend/conf/copr-be.conf.example
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ sleeptime=30
# Maximum number of concurrently running tasks per architecture.
#builds_max_workers_arch=x86_64=10,ppc64le=12

# Maximum number of concurrent build workers per architecture and owner. For
# example, give at most 15 ppc64le and 10 s390x machines to one copr owner:
#build_max_workers_arch_per_owner=ppc64le=15,s390x=10

# Maximum number of concurrently running tasks per project owner.
#builds_max_workers_owner=20

Expand Down
2 changes: 1 addition & 1 deletion backend/copr-backend.spec
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
%global tests_version 4
%global tests_tar test-data-copr-backend

%global copr_common_version 0.16.4.dev
%global copr_common_version 0.20.1.dev1
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why do we need to jump to version 20 instead of 17?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We ar jumping from 0.20.1 => 0.20.1.dev1; this is just a requirement that we haven't had to bump for quite some time.


Name: copr-backend
Version: 1.172
Expand Down
16 changes: 11 additions & 5 deletions backend/copr_backend/daemons/build_dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
BuildDispatcher related classes.
"""

from copr_common.worker_manager import GroupWorkerLimit
from copr_common.worker_manager import HashWorkerLimit
from copr_backend.dispatcher import BackendDispatcher
from copr_backend.rpm_builds import (
ArchitectureWorkerLimit,
ArchitectureUserWorkerLimit,
BuildTagLimit,
RPMBuildWorkerManager,
BuildQueueTask,
Expand Down Expand Up @@ -83,17 +84,22 @@ def __init__(self, backend_opts):
super().__init__(backend_opts)
self.max_workers = backend_opts.builds_max_workers

for tag_type in ["arch", "tag"]:
lclass = ArchitectureWorkerLimit if tag_type == "arch" else \
BuildTagLimit
for tag_type in ["arch", "tag", "arch_per_owner"]:
match tag_type:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Whaaat, we have pattern matching in python?!
TIL ... :-)

Copy link
Member Author

@praiskup praiskup Sep 15, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, but when someone says "pattern" matching, I expect it to work with fnmatch patterns (my shell deformation..) :-) TIL too, I actually thought this was just a dummy C switch-case alternative.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not a huge fan of switch cases in python, I'd personally use a tuple in for loop to get rid of match but it's just syntax preference

case "arch":
lclass = ArchitectureWorkerLimit
case "tag":
lclass = BuildTagLimit
case "arch_per_owner":
lclass = ArchitectureUserWorkerLimit
for tag, limit in backend_opts.builds_limits[tag_type].items():
self.log.info("setting %s(%s) limit to %s", tag_type, tag, limit)
self.limits.append(lclass(tag, limit))

for limit_type in ['sandbox', 'owner']:
max_builders = backend_opts.builds_limits[limit_type]
self.log.info("setting %s limit to %s", limit_type, max_builders)
self.limits.append(GroupWorkerLimit(
self.limits.append(HashWorkerLimit(
lambda x, limit=limit_type: getattr(x, limit),
max_builders,
name=limit_type,
Expand Down
4 changes: 2 additions & 2 deletions backend/copr_backend/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,9 @@ def _get_limits_conf(parser):
"option. Please use format: "
"builds_max_workers_{0} = {1}1=COUNT,{1}2=COUNT")
err2 = ("Duplicate left value '{}' in 'builds_max_workers_{}' configuration")
limits = {"arch": {}, "tag": {}}
limits = {"arch": {}, "tag": {}, "arch_per_owner": {}}

for config_type in ["arch", "tag"]:
for config_type in ["arch", "tag", "arch_per_owner"]:
option = "builds_max_workers_{}".format(config_type)
raw = _get_conf(parser, "backend", option, None)
if raw:
Expand Down
19 changes: 18 additions & 1 deletion backend/copr_backend/rpm_builds.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,11 @@
Abstraction for RPM and SRPM builds on backend.
"""

from copr_common.worker_manager import WorkerManager, PredicateWorkerLimit
from copr_common.worker_manager import (
HashWorkerLimit,
WorkerManager,
PredicateWorkerLimit,
)
from copr_backend.worker_manager import BackendQueueTask
from copr_backend.helpers import get_chroot_arch

Expand Down Expand Up @@ -133,6 +137,19 @@ def predicate(x):
super().__init__(predicate, limit, name="arch_{}".format(architecture))


class ArchitectureUserWorkerLimit(HashWorkerLimit):
"""
Limit number of machines of specific architecture we give to a single
Copr owner (user or group).
"""
def __init__(self, architecture, limit):
super().__init__(
lambda x: f"{x.requested_arch}_{x.owner}",
limit,
name=f"arch_{architecture}_owner",
)


class BuildTagLimit(PredicateWorkerLimit):
"""
Limit the amount of concurrently running builds per given build tag.
Expand Down
10 changes: 8 additions & 2 deletions backend/tests/test_config_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ def get_minimal_config_file(self):
def test_minimal_file_and_defaults(self):
opts = BackendConfigReader(self.get_minimal_config_file()).read()
assert opts.destdir == "/tmp"
assert opts.builds_limits == {'arch': {}, 'tag': {}, 'owner': 20, 'sandbox': 10}
assert opts.builds_limits == {'arch': {}, 'tag': {}, 'owner': 20,
'sandbox': 10, 'arch_per_owner': {}}

def test_correct_build_limits(self):
opts = BackendConfigReader(
Expand All @@ -48,6 +49,7 @@ def test_correct_build_limits(self):
"builds_max_workers_tag = Power9=9\n"
"builds_max_workers_owner = 5\n"
"builds_max_workers_sandbox = 3\n"
"builds_max_workers_arch_per_owner = ppc64le=11, s390x=5\n"
))).read()
assert opts.builds_limits == {
'arch': {
Expand All @@ -58,7 +60,11 @@ def test_correct_build_limits(self):
'Power9': 9,
},
'owner': 5,
'sandbox': 3
'sandbox': 3,
'arch_per_owner': {
'ppc64le': 11,
's390x': 5,
},
}

@pytest.mark.parametrize("broken_config", [
Expand Down
18 changes: 12 additions & 6 deletions backend/tests/test_worker_limits.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@
# pylint: disable=protected-access

from copr_common.worker_manager import (
GroupWorkerLimit,
HashWorkerLimit,
PredicateWorkerLimit,
StringCounter,
)
from copr_backend.worker_manager import BackendQueueTask
from copr_backend.rpm_builds import (
ArchitectureWorkerLimit,
ArchitectureUserWorkerLimit,
BuildTagLimit,
BuildQueueTask,
)
Expand All @@ -23,16 +24,19 @@
}, {
"build_id": 7,
"task_id": "7-fedora-rawhide-x86_64",
"chroot": "fedora-rawhide-x86_64",
"project_owner": "cecil",
"sandbox": "sb1",
}, {
"build_id": 4,
"task_id": "7-fedora-32-x86_64",
"chroot": "fedora-32-x86_64",
"project_owner": "bedrich",
"sandbox": "sb2",
}, {
"build_id": 4,
"task_id": "7-fedora-31-x86_64",
"chroot": "fedora-31-x86_64",
"project_owner": "bedrich",
"sandbox": "sb2",
"tags": ["special_requirement"],
Expand Down Expand Up @@ -89,7 +93,7 @@ def test_predicate_worker_limit_sometimes():
assert wl.check(_QT(5)) is False

def test_group_worker_limit():
wl = GroupWorkerLimit(lambda x: x.group, 2)
wl = HashWorkerLimit(lambda x: x.group, 2)
for _ in ["first", "cleared", "cleared"]:
for task in [0, 1, 2]:
wl.worker_added(str(task), _QT(str(task)))
Expand All @@ -111,10 +115,11 @@ def test_worker_limit_info():
limits = [
PredicateWorkerLimit(lambda _: True, 8),
PredicateWorkerLimit(lambda _: True, 8, name='allmatch'),
GroupWorkerLimit(lambda x: x.owner, 4),
GroupWorkerLimit(lambda x: x.sandbox, 2, name='sandbox'),
HashWorkerLimit(lambda x: x.owner, 4),
HashWorkerLimit(lambda x: x.sandbox, 2, name='sandbox'),
ArchitectureWorkerLimit("x86_64", 3),
ArchitectureWorkerLimit("aarch64", 2),
ArchitectureUserWorkerLimit("aarch64", 2),
BuildTagLimit("special_requirement", 1),
]
tasks = [BuildQueueTask(t) for t in TASKS]
Expand All @@ -126,10 +131,11 @@ def test_worker_limit_info():
'w:7-fedora-rawhide-x86_64, w:7-fedora-32-x86_64, w:7-fedora-31-x86_64',
"limit info: 'allmatch', matching: w:7, w:7-fedora-rawhide-x86_64, "
'w:7-fedora-32-x86_64, w:7-fedora-31-x86_64',
"limit info: Unnamed 'GroupWorkerLimit' limit, counter: cecil=2, bedrich=2",
"limit info: Unnamed 'HashWorkerLimit' limit, counter: cecil=2, bedrich=2",
"limit info: 'sandbox', counter: sb1=1, sb2=2",
"limit info: 'arch_x86_64'",
"limit info: 'arch_x86_64', matching: w:7-fedora-rawhide-x86_64, w:7-fedora-32-x86_64, w:7-fedora-31-x86_64",
"limit info: 'arch_aarch64'",
"limit info: 'arch_aarch64_owner', counter: None_cecil=1, x86_64_cecil=1, x86_64_bedrich=2",
"limit info: 'tag_special_requirement', matching: w:7-fedora-31-x86_64",
]

Expand Down
5 changes: 3 additions & 2 deletions common/copr_common/worker_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,10 @@ def __str__(self):
return ", ".join(items)


class GroupWorkerLimit(WorkerLimit):
class HashWorkerLimit(WorkerLimit):
"""
Assign task to groups, and set maximum number of workers per each group.
Assign tasks to groups per the return value of the HASHER(TASK) method. Set
maximum number of workers **per each such group**.
"""
def __init__(self, hasher, limit, name=None):
"""
Expand Down
2 changes: 1 addition & 1 deletion common/python-copr-common.spec
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
%endif

Name: python-copr-common
Version: 0.20
Version: 0.20.1.dev1
Release: 1%{?dist}
Summary: Python code used by Copr

Expand Down
2 changes: 1 addition & 1 deletion common/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

setup(
name='copr-common',
version="0.20",
version="0.20.1.dev1",
description=__description__,
long_description=long_description,
author=__author__,
Expand Down
2 changes: 1 addition & 1 deletion dist-git/copr-dist-git.spec
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
%global copr_common_version 0.16.4.dev
%global copr_common_version 0.20.1.dev1

Name: copr-dist-git
Version: 0.67
Expand Down
4 changes: 2 additions & 2 deletions dist-git/copr_dist_git/import_dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import sys
import logging
from copr_common.dispatcher import Dispatcher
from copr_common.worker_manager import GroupWorkerLimit
from copr_common.worker_manager import HashWorkerLimit
from copr_dist_git.importer import Importer, ImportWorkerManager


Expand Down Expand Up @@ -48,7 +48,7 @@ def __init__(self, opts):
for limit_type in ['sandbox', 'owner']:
limit = LIMITS[limit_type]
self.log.info("setting %s limit to %s", limit_type, limit)
self.limits.append(GroupWorkerLimit(
self.limits.append(HashWorkerLimit(
lambda x, limit=limit_type: getattr(x, limit),
limit,
name=limit_type,
Expand Down