getNodeCount(String kubevela) throws JsonProcessingException {
+ return getNodeCount(parseKubevela(kubevela));
+ }
+
+ /**
+ * Extract node requirements from a KubeVela file in a form we can send to
+ * the SAL `findNodeCandidates` endpoint.
+ *
+ * We read the following attributes for each component:
+ *
+ * - `properties.cpu`, `properties.requests.cpu`: round up to next integer
+ * and generate requirement `hardware.cores`
+ *
+ * - `properties.memory`, `properties.requests.memory`: Handle "200Mi",
+ * "0.2Gi" and bare number, convert to MB and generate requirement
+ * `hardware.memory`
+ *
+ * Notes:
+ *
+ * - We add the requirement that OS family == Ubuntu.
+ *
+ * - For the first version, we specify all requirements as "greater or
+ * equal", i.e., we might not find precisely the node candidates that
+ * are asked for.
+ *
+ * - Related, KubeVela specifies "cpu" as a fractional value, while SAL
+ * wants the number of cores as a whole number. We round up to the
+ * nearest integer and ask for "this or more" cores, since we might end
+ * up with needing, e.g., 3 cores, which is not a configuration commonly
+ * provided by cloud providers.
+ *
+ * @param kubevela the parsed KubeVela file.
+ * @return a map of component name to (potentially empty, except for OS
+ * family) list of requirements for that component. No requirements mean
+ * any node will suffice.
+ */
+ public static Map> getRequirements(JsonNode kubevela) {
+ Map> result = new HashMap<>();
+ ArrayNode components = kubevela.withArray("/spec/components");
+ for (final JsonNode c : components) {
+ String componentName = c.get("name").asText();
+ ArrayList reqs = new ArrayList<>();
+ reqs.add(new AttributeRequirement("image", "operatingSystem.family",
+ RequirementOperator.IN, OperatingSystemFamily.UBUNTU.toString()));
+ JsonNode cpu = c.at("/properties/cpu");
+ if (cpu.isMissingNode()) cpu = c.at("/properties/resources/requests/cpu");
+ if (!cpu.isMissingNode()) {
+ // KubeVela has fractional core /cpu requirements, and the
+ // value might be given as a string instead of a number, so
+ // parse string in all cases.
+ double kubevela_cpu = -1;
+ try {
+ kubevela_cpu = Double.parseDouble(cpu.asText());
+ } catch (NumberFormatException e) {
+ log.warn("CPU spec in {} is not a number, value seen is {}",
+ componentName, cpu.asText());
+ }
+ long sal_cores = Math.round(Math.ceil(kubevela_cpu));
+ if (sal_cores > 0) {
+ reqs.add(new AttributeRequirement("hardware", "cores",
+ RequirementOperator.GEQ, Long.toString(sal_cores)));
+ } else {
+ // floatValue returns 0.0 if node is not numeric
+ log.warn("CPU of component {} is 0 or not a number, value seen is {}",
+ componentName, cpu.asText());
+ }
+ }
+ JsonNode memory = c.at("/properties/memory");
+ if (memory.isMissingNode()) cpu = c.at("/properties/resources/requests/memory");
+ if (!memory.isMissingNode()) {;
+ String sal_memory = memory.asText();
+ if (sal_memory.endsWith("Mi")) {
+ sal_memory = sal_memory.substring(0, sal_memory.length() - 2);
+ } else if (sal_memory.endsWith("Gi")) {
+ sal_memory = String.valueOf(Integer.parseInt(sal_memory.substring(0, sal_memory.length() - 2)) * 1024);
+ } else if (!memory.isNumber()) {
+ log.warn("Unsupported memory specification in component {} :{} (wanted 'Mi' or 'Gi') ",
+ componentName,
+ memory.asText());
+ sal_memory = null;
+ }
+ // Fall-through: we rewrote the KubeVela file and didn't add
+ // the "Mi" suffix, but it's a number
+ if (sal_memory != null) {
+ reqs.add(new AttributeRequirement("hardware", "memory",
+ RequirementOperator.GEQ, sal_memory));
+ }
+ }
+ for (final JsonNode t : c.withArray("/traits")) {
+ // TODO: Check for node affinity / geoLocation / country /
+ // node type (edge or cloud)
+ }
+ // Finally, add requirements for this job to the map
+ result.put(componentName, reqs);
+ }
+ return result;
+ }
+
+ /**
+ * Extract node requirements from a KubeVela file.
+ *
+ * @see #getRequirements(JsonNode)
+ * @param kubevela The KubeVela file, as a YAML string.
+ * @return a map of component name to (potentially empty, except for OS
+ * family) list of requirements for that component. No requirements mean
+ * any node will suffice.
+ * @throws JsonProcessingException if kubevela does not contain valid YAML.
+ */
+ public static Map> getRequirements(String kubevela) throws JsonProcessingException {
+ return getRequirements(parseKubevela(kubevela));
+ }
+
+ /**
+ * Convert YAML KubeVela into a parsed representation.
+ *
+ * @param kubevela The KubeVela YAML.
+ * @return A parsed representation of the KubeVela file, or null for a parse error.
+ * @throws JsonProcessingException if kubevela does not contain valid YAML.
+ */
+ public static JsonNode parseKubevela(String kubevela) throws JsonProcessingException {
+ return yamlMapper.readTree(kubevela);
+ }
+
+ /**
+ * Convert the parsed representation of a KubeVela file to yaml.
+ *
+ * @param kubevela The KubeVela parsed file.
+ * @return A YAML representation of the KubeVela file.
+ * @throws JsonProcessingException if YAML cannot be generated from kubevela.
+ */
+ public static String generateKubevela(JsonNode kubevela) throws JsonProcessingException {
+ return yamlMapper.writeValueAsString(kubevela);
+ }
+}
diff --git a/nebulous-requirements-extractor/src/main/java/eu/nebulouscloud/optimiser/kubevela/package-info.java b/nebulous-requirements-extractor/src/main/java/eu/nebulouscloud/optimiser/kubevela/package-info.java
new file mode 100644
index 0000000..6945cd0
--- /dev/null
+++ b/nebulous-requirements-extractor/src/main/java/eu/nebulouscloud/optimiser/kubevela/package-info.java
@@ -0,0 +1,7 @@
+/**
+ * This library provides the class {@link KubevelaAnalyzer}, which factors out
+ * common code to extract node requirements from KubeVela files.
+ *
+ * @author Rudolf Schlatte
+ */
+package eu.nebulouscloud.optimiser.kubevela;
diff --git a/optimiser-controller/build.gradle b/optimiser-controller/build.gradle
index f2420ce..2b1e612 100644
--- a/optimiser-controller/build.gradle
+++ b/optimiser-controller/build.gradle
@@ -57,6 +57,9 @@ dependencies {
// https://openproject.nebulouscloud.eu/projects/nebulous-collaboration-hub/wiki/asynchronous-messaging-specification
implementation 'eu.nebulouscloud:exn-connector-java:1.0-SNAPSHOT'
+ // Analysing KubeVela
+ implementation project(':nebulous-requirements-extractor')
+
// Use JUnit Jupiter for testing.
testImplementation 'org.junit.jupiter:junit-jupiter:5.10.1'
testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.10.1'
diff --git a/optimiser-controller/src/main/java/eu/nebulouscloud/optimiser/controller/NebulousAppDeployer.java b/optimiser-controller/src/main/java/eu/nebulouscloud/optimiser/controller/NebulousAppDeployer.java
index ef98b74..5b9ce68 100644
--- a/optimiser-controller/src/main/java/eu/nebulouscloud/optimiser/controller/NebulousAppDeployer.java
+++ b/optimiser-controller/src/main/java/eu/nebulouscloud/optimiser/controller/NebulousAppDeployer.java
@@ -1,29 +1,19 @@
package eu.nebulouscloud.optimiser.controller;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
+import eu.nebulouscloud.optimiser.kubevela.KubevelaAnalyzer;
import org.ow2.proactive.sal.model.AttributeRequirement;
import org.ow2.proactive.sal.model.CommandsInstallation;
-import org.ow2.proactive.sal.model.Communication;
-import org.ow2.proactive.sal.model.IaasDefinition;
-import org.ow2.proactive.sal.model.JobDefinition;
-import org.ow2.proactive.sal.model.JobInformation;
import org.ow2.proactive.sal.model.NodeCandidate;
import org.ow2.proactive.sal.model.NodeType;
import org.ow2.proactive.sal.model.NodeTypeRequirement;
import org.ow2.proactive.sal.model.OperatingSystemFamily;
import org.ow2.proactive.sal.model.Requirement;
import org.ow2.proactive.sal.model.RequirementOperator;
-import org.ow2.proactive.sal.model.TaskDefinition;
-
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -80,134 +70,6 @@ public static List getControllerRequirements(String jobID) {
new AttributeRequirement("hardware", "cpu", RequirementOperator.GEQ, "4"));
}
- /**
- * Given a KubeVela file, extract how many nodes to deploy for
- * each component. Note that this can be zero, when the component
- * should not be deployed at all, e.g., when there is a cloud and
- * an edge version of the component.
- *
- * We currently look for the following component trait:
- *
- * {@code
- * traits:
- * - type: scaler
- * properties:
- * replicas: 2
- * }
- *
- * @param kubevela the parsed KubeVela file.
- * @return A map from component name to number of instances to generate.
- */
- public static Map getNodeCountFromKubevela (JsonNode kubevela) {
- Map result = new HashMap<>();
- ArrayNode components = kubevela.withArray("/spec/components");
- for (final JsonNode c : components) {
- result.put(c.get("name").asText(), 1); // default value
- for (final JsonNode t : c.withArray("/traits")) {
- if (t.at("/type").asText().equals("scaler")
- && t.at("/properties/replicas").canConvertToExactIntegral())
- {
- result.put(c.get("name").asText(),
- t.at("/properties/replicas").asInt());
- }
- }
- }
- return result;
- }
-
- /**
- * Given a KubeVela file, extract its VM requirements in a form we can
- * send to the SAL `findNodeCandidates` endpoint.
- *
- * We add the requirement that OS family == Ubuntu.
- *
- * We read the following attributes for each component:
- *
- * - `properties.cpu`, `properties.requests.cpu`: round up to next integer
- * and generate requirement `hardware.cores`
- *
- * - `properties.memory`, `properties.requests.memory`: Handle "200Mi",
- * "0.2Gi" and bare number, convert to MB and generate requirement
- * `hardware.memory`
- *
- * Notes:
- *
- * - For the first version, we specify all requirements as "greater or
- * equal", i.e., we might not find precisely the node candidates that
- * are asked for.
- *
- * - Related, KubeVela specifies "cpu" as a fractional value, while SAL
- * wants the number of cores as a whole number. We round up to the
- * nearest integer and ask for "this or more" cores, since we might end
- * up with “strange” numbers of cores.
- *
- * @param kubevela the parsed KubeVela file.
- * @return a map of component name to (potentially empty) list of
- * requirements for that component. No requirements mean any node will
- * suffice.
- */
- public static Map> getWorkerRequirementsFromKubevela(JsonNode kubevela) {
- Map> result = new HashMap<>();
- ArrayNode components = kubevela.withArray("/spec/components");
- for (final JsonNode c : components) {
- String componentName = c.get("name").asText();
- ArrayList reqs = new ArrayList<>();
- reqs.add(new AttributeRequirement("image", "operatingSystem.family",
- RequirementOperator.IN, OperatingSystemFamily.UBUNTU.toString()));
- JsonNode cpu = c.at("/properties/cpu");
- if (cpu.isMissingNode()) cpu = c.at("/properties/resources/requests/cpu");
- if (!cpu.isMissingNode()) {
- // KubeVela has fractional core /cpu requirements, and the
- // value might be given as a string instead of a number, so
- // parse string in all cases.
- double kubevela_cpu = -1;
- try {
- kubevela_cpu = Double.parseDouble(cpu.asText());
- } catch (NumberFormatException e) {
- log.warn("CPU spec in {} is not a number, value seen is {}",
- componentName, cpu.asText());
- }
- long sal_cores = Math.round(Math.ceil(kubevela_cpu));
- if (sal_cores > 0) {
- reqs.add(new AttributeRequirement("hardware", "cores",
- RequirementOperator.GEQ, Long.toString(sal_cores)));
- } else {
- // floatValue returns 0.0 if node is not numeric
- log.warn("CPU of component {} is 0 or not a number, value seen is {}",
- componentName, cpu.asText());
- }
- }
- JsonNode memory = c.at("/properties/memory");
- if (memory.isMissingNode()) cpu = c.at("/properties/resources/requests/memory");
- if (!memory.isMissingNode()) {;
- String sal_memory = memory.asText();
- if (sal_memory.endsWith("Mi")) {
- sal_memory = sal_memory.substring(0, sal_memory.length() - 2);
- } else if (sal_memory.endsWith("Gi")) {
- sal_memory = String.valueOf(Integer.parseInt(sal_memory.substring(0, sal_memory.length() - 2)) * 1024);
- } else if (!memory.isNumber()) {
- log.warn("Unsupported memory specification in component {} :{} (wanted 'Mi' or 'Gi') ",
- componentName,
- memory.asText());
- sal_memory = null;
- }
- // Fall-through: we rewrote the KubeVela file and didn't add
- // the "Mi" suffix, but it's a number
- if (sal_memory != null) {
- reqs.add(new AttributeRequirement("hardware", "memory",
- RequirementOperator.GEQ, sal_memory));
- }
- }
- for (final JsonNode t : c.withArray("/traits")) {
- // TODO: Check for node affinity / geoLocation / country /
- // node type (edge or cloud)
- }
- // Finally, add requirements for this job to the map
- result.put(componentName, reqs);
- }
- return result;
- }
-
/**
* Produce a fresh KubeVela specification with added node affinity traits.
*
@@ -280,8 +142,8 @@ public static void deployApplication(NebulousApp app, JsonNode kubevela) {
// ------------------------------------------------------------
// 1. Extract node requirements
- Map> workerRequirements = getWorkerRequirementsFromKubevela(kubevela);
- Map nodeCounts = getNodeCountFromKubevela(kubevela);
+ Map> workerRequirements = KubevelaAnalyzer.getRequirements(kubevela);
+ Map nodeCounts = KubevelaAnalyzer.getNodeCount(kubevela);
List controllerRequirements = getControllerRequirements(appUUID);
Main.logFile("worker-requirements-" + appUUID + ".txt", workerRequirements);
@@ -290,25 +152,25 @@ public static void deployApplication(NebulousApp app, JsonNode kubevela) {
// ----------------------------------------
// 2. Find node candidates
- ArrayNode controllerCandidates = SalConnector.findNodeCandidates(controllerRequirements, appUUID);
- if (controllerCandidates.isEmpty()) {
- log.error("Could not find node candidates for requirements: {}",
- controllerRequirements, keyValue("appId", appUUID));
- // Continue here while we don't really deploy
- // return;
- }
- Map workerCandidates = new HashMap<>();
- for (Map.Entry> e : workerRequirements.entrySet()) {
- String nodeName = e.getKey();
- List requirements = e.getValue();
- ArrayNode candidates = SalConnector.findNodeCandidates(requirements, appUUID);
- if (candidates.isEmpty()) {
- log.error("Could not find node candidates for requirements: {}", requirements);
- // Continue here while we don't really deploy
- // return;
- }
- workerCandidates.put(nodeName, candidates);
- }
+ // ArrayNode controllerCandidates = SalConnector.findNodeCandidates(controllerRequirements, appUUID);
+ // if (controllerCandidates.isEmpty()) {
+ // log.error("Could not find node candidates for requirements: {}",
+ // controllerRequirements, keyValue("appId", appUUID));
+ // // Continue here while we don't really deploy
+ // // return;
+ // }
+ // Map workerCandidates = new HashMap<>();
+ // for (Map.Entry> e : workerRequirements.entrySet()) {
+ // String nodeName = e.getKey();
+ // List requirements = e.getValue();
+ // ArrayNode candidates = SalConnector.findNodeCandidates(requirements, appUUID);
+ // if (candidates.isEmpty()) {
+ // log.error("Could not find node candidates for requirements: {}", requirements);
+ // // Continue here while we don't really deploy
+ // // return;
+ // }
+ // workerCandidates.put(nodeName, candidates);
+ // }
// ------------------------------------------------------------
// 3. Select node candidates
@@ -334,17 +196,17 @@ public static void deployApplication(NebulousApp app, JsonNode kubevela) {
// candidate is an edge node, we should select it and fill the
// rest of the nodes with second-best cloud nodes.
- // TODO: make sure we only choose the same edge node once; it
- // might be in all node candidate lists :)
- if (!workerCandidates.get(componentName).isEmpty()) {
- // should always be true, except currently we don't abort
- // in Step 2 if we don't find candidates.
- JsonNode candidate = workerCandidates.get(componentName).get(0);
- NodeCandidate c = mapper.convertValue(((ObjectNode)candidate).deepCopy()
- .remove(List.of("score", "ranking")),
- NodeCandidate.class);
- nodeNameToCandidate.put(nodeName, c);
- }
+ // // TODO: make sure we only choose the same edge node once; it
+ // // might be in all node candidate lists :)
+ // if (!workerCandidates.get(componentName).isEmpty()) {
+ // // should always be true, except currently we don't abort
+ // // in Step 2 if we don't find candidates.
+ // JsonNode candidate = workerCandidates.get(componentName).get(0);
+ // NodeCandidate c = mapper.convertValue(((ObjectNode)candidate).deepCopy()
+ // .remove(List.of("score", "ranking")),
+ // NodeCandidate.class);
+ // nodeNameToCandidate.put(nodeName, c);
+ // }
}
app.getComponentMachineNames().put(componentName, nodeNames);
}
@@ -405,8 +267,8 @@ public static void redeployApplication(NebulousApp app, ObjectNode kubevela) {
// ------------------------------------------------------------
// 1. Extract node requirements
- Map> workerRequirements = getWorkerRequirementsFromKubevela(kubevela);
- Map nodeCounts = getNodeCountFromKubevela(kubevela);
+ Map> workerRequirements = KubevelaAnalyzer.getRequirements(kubevela);
+ Map nodeCounts = KubevelaAnalyzer.getNodeCount(kubevela);
List controllerRequirements = getControllerRequirements(appUUID);
Main.logFile("worker-requirements-" + appUUID + ".txt", workerRequirements);
diff --git a/optimiser-controller/src/test/java/eu/nebulouscloud/optimiser/controller/NebulousAppTests.java b/optimiser-controller/src/test/java/eu/nebulouscloud/optimiser/controller/NebulousAppTests.java
index f4b0d2d..a31b87c 100644
--- a/optimiser-controller/src/test/java/eu/nebulouscloud/optimiser/controller/NebulousAppTests.java
+++ b/optimiser-controller/src/test/java/eu/nebulouscloud/optimiser/controller/NebulousAppTests.java
@@ -8,6 +8,8 @@
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import eu.nebulouscloud.optimiser.kubevela.KubevelaAnalyzer;
+
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
@@ -89,7 +91,7 @@ void calculateNodeRequirements() throws IOException, URISyntaxException {
String kubevela_str = Files.readString(getResourcePath("vela-deployment-v2.yml"),
StandardCharsets.UTF_8);
JsonNode kubevela = yaml_mapper.readTree(kubevela_str);
- Map> requirements = NebulousAppDeployer.getWorkerRequirementsFromKubevela(kubevela);
+ Map> requirements = KubevelaAnalyzer.getRequirements(kubevela);
// We could compare the requirements with what is contained in
// KubeVela, or compare keys with component names, but this would
// essentially duplicate the method code--so we just make sure the
@@ -109,7 +111,7 @@ void calculateRewrittenNodeRequirements() throws IOException, URISyntaxException
ObjectNode replacements = solutions.withObject("VariableValues");
ObjectNode kubevela1 = app.rewriteKubevelaWithSolution(replacements);
- Map> requirements = NebulousAppDeployer.getWorkerRequirementsFromKubevela(kubevela1);
+ Map> requirements = KubevelaAnalyzer.getRequirements(kubevela1);
// We could compare the requirements with what is contained in
// KubeVela, or compare keys with component names, but this would
// essentially duplicate the method code--so we just make sure the
diff --git a/settings.gradle b/settings.gradle
index e438ea9..c8e1b75 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -12,4 +12,4 @@ plugins {
rootProject.name = 'optimiser-controller'
-include('optimiser-controller')
+include('optimiser-controller', 'nebulous-requirements-extractor')
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index 248d607..eb69286 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -8,11 +8,11 @@
- nebulous-optimiser-controller-container-images
description: Build the container images.
files: &image_files
- - ^optimiser-controller/
+ - ^/
vars: &image_vars
promote_container_image_job: nebulous-optimiser-controller-upload-container-images
container_images:
- - context: optimiser-controller
+ - context: .
registry: quay.io
repository: quay.io/nebulous/optimiser-controller
namespace: nebulous
@@ -44,7 +44,7 @@
description: Run Hadolint on Dockerfile(s).
vars:
dockerfiles:
- - optimiser-controller/Dockerfile
+ - Dockerfile
- job:
name: nebulous-optimiser-controller-helm-lint
@@ -70,3 +70,24 @@
vars:
helm_charts:
nebulous-optimiser-controller: ./charts/nebulous-optimiser-controller
+
+- job:
+ name: nebulous-optimiser-controller-java-build-java-libraries
+ parent: nebulous-build-java-libraries
+ provides:
+ - nebulous-optimiser-controller-java-java-libraries
+ description: Build the java libraries.
+ files: &library_files
+ - ^nebulous-requirements-extractor/
+ vars: &library_vars
+ java_libraries:
+ - context: nebulous-requirements-extractor
+
+- job:
+ name: nebulous-optimiser-controller-java-upload-java-libraries
+ parent: nebulous-upload-java-libraries
+ provides:
+ - nebulous-optimiser-controller-java-java-libraries
+ description: Build and upload the java libraries.
+ files: *library_files
+ vars: *library_vars
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 7210db0..4c4bda9 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -6,6 +6,7 @@
- nebulous-optimiser-controller-build-container-images
- nebulous-optimiser-controller-hadolint
- nebulous-platform-apply-helm-charts
+ - nebulous-optimiser-controller-java-build-java-libraries
- nox-linters
gate:
jobs:
@@ -14,7 +15,9 @@
- nebulous-optimiser-controller-upload-container-images
- nebulous-optimiser-controller-hadolint
- nebulous-platform-apply-helm-charts
+ - nebulous-optimiser-controller-java-build-java-libraries
- nox-linters
promote:
jobs:
+ - nebulous-optimiser-controller-java-upload-java-libraries
- nebulous-optimiser-controller-promote-container-images