diff --git a/nebulous-requirements-extractor/src/main/java/eu/nebulouscloud/optimiser/kubevela/KubevelaAnalyzer.java b/nebulous-requirements-extractor/src/main/java/eu/nebulouscloud/optimiser/kubevela/KubevelaAnalyzer.java index 884d65e..0865902 100644 --- a/nebulous-requirements-extractor/src/main/java/eu/nebulouscloud/optimiser/kubevela/KubevelaAnalyzer.java +++ b/nebulous-requirements-extractor/src/main/java/eu/nebulouscloud/optimiser/kubevela/KubevelaAnalyzer.java @@ -197,22 +197,14 @@ public static Map getNodeCount(String kubevela) throws JsonProc /** * Add the following requirements: * * * @param reqs The list of requirements to add to. - * @param cloudIDs the Cloud IDs to filter for. */ - public static void addNebulousRequirements(List reqs, Set cloudIDs) { + public static void addNebulousRequirements(List reqs) { reqs.add(new AttributeRequirement("hardware", "ram", RequirementOperator.GEQ, "2048")); - if (cloudIDs != null && !cloudIDs.isEmpty()) { - reqs.add(new AttributeRequirement("cloud", "id", - RequirementOperator.IN, String.join(" ", cloudIDs))); - } - } /** @@ -346,14 +338,11 @@ private static long getMemoryRequirement(JsonNode c, String componentName) { * @param includeNebulousRequirements if true, include requirements for * minimum memory size, Ubuntu OS. These requirements ensure that the * node candidate can run the Nebulous software. - * @param cloudIDs The IDs of the clouds that the node candidates should - * come from. Will only be handled if non-null and - * includeNebulousRequirements is true. * @return a map of component name to (potentially empty) list of * requirements for that component. No requirements mean any node will * suffice. No requirements are generated for volume storage components. */ - public static Map> getBoundedRequirements(JsonNode kubevela, boolean includeNebulousRequirements, Set cloudIDs) { + public static Map> getBoundedRequirements(JsonNode kubevela, boolean includeNebulousRequirements) { Map> result = new HashMap<>(); ArrayNode components = kubevela.withArray("/spec/components"); for (final JsonNode c : components) { @@ -361,7 +350,7 @@ public static Map> getBoundedRequirements(JsonNode kub String componentName = c.get("name").asText(); ArrayList reqs = new ArrayList<>(); if (includeNebulousRequirements) { - addNebulousRequirements(reqs, cloudIDs); + addNebulousRequirements(reqs); } long cores = getCpuRequirement(c, componentName); if (cores > 0) { @@ -390,8 +379,8 @@ public static Map> getBoundedRequirements(JsonNode kub * * @see #getBoundedRequirements(JsonNode, boolean) */ - public static Map> getBoundedRequirements(JsonNode kubevela, Set cloudIDs) { - return getBoundedRequirements(kubevela, true, cloudIDs); + public static Map> getBoundedRequirements(JsonNode kubevela) { + return getBoundedRequirements(kubevela, true); } /** @@ -401,13 +390,13 @@ public static Map> getBoundedRequirements(JsonNode kub * cpu >= 2, cpu <= 4. Take care to not ask for less than 2048Mb of * memory since that's the minimum Nebulous requirement for now. */ - public static Map> getClampedRequirements(JsonNode kubevela, Set cloudIDs) { + public static Map> getClampedRequirements(JsonNode kubevela) { Map> result = new HashMap<>(); ArrayNode components = kubevela.withArray("/spec/components"); for (final JsonNode c : components) { String componentName = c.get("name").asText(); ArrayList reqs = new ArrayList<>(); - addNebulousRequirements(reqs, cloudIDs); + addNebulousRequirements(reqs); long cores = getCpuRequirement(c, componentName); if (cores > 0) { reqs.add(new AttributeRequirement("hardware", "cores", @@ -441,13 +430,13 @@ public static Map> getClampedRequirements(JsonNode kub * asking for >= or <=. Note that we still ask for >= 2048 Mb since * that's the nebulous lower bound for now. */ - public static Map> getPreciseRequirements(JsonNode kubevela, Set cloudIDs) { + public static Map> getPreciseRequirements(JsonNode kubevela) { Map> result = new HashMap<>(); ArrayNode components = kubevela.withArray("/spec/components"); for (final JsonNode c : components) { String componentName = c.get("name").asText(); ArrayList reqs = new ArrayList<>(); - addNebulousRequirements(reqs, cloudIDs); + addNebulousRequirements(reqs); long cores = getCpuRequirement(c, componentName); if (cores > 0) { reqs.add(new AttributeRequirement("hardware", "cores", @@ -475,16 +464,13 @@ public static Map> getPreciseRequirements(JsonNode kub * * @see #getBoundedRequirements(JsonNode) * @param kubevela The KubeVela file, as a YAML string. - * @param cloudIDs The IDs of the clouds that the node candidates should - * come from. Will only be handled if non-null and - * includeNebulousRequirements is true. * @return a map of component name to (potentially empty, except for OS * family) list of requirements for that component. No requirements mean * any node will suffice. * @throws JsonProcessingException if kubevela does not contain valid YAML. */ - public static Map> getBoundedRequirements(String kubevela, Set cloudIDs) throws JsonProcessingException { - return getBoundedRequirements(parseKubevela(kubevela), cloudIDs); + public static Map> getBoundedRequirements(String kubevela) throws JsonProcessingException { + return getBoundedRequirements(parseKubevela(kubevela)); } /** diff --git a/optimiser-controller/src/main/java/eu/nebulouscloud/optimiser/controller/NebulousAppDeployer.java b/optimiser-controller/src/main/java/eu/nebulouscloud/optimiser/controller/NebulousAppDeployer.java index bf015c6..5825858 100644 --- a/optimiser-controller/src/main/java/eu/nebulouscloud/optimiser/controller/NebulousAppDeployer.java +++ b/optimiser-controller/src/main/java/eu/nebulouscloud/optimiser/controller/NebulousAppDeployer.java @@ -13,6 +13,8 @@ import org.ow2.proactive.sal.model.AttributeRequirement; import org.ow2.proactive.sal.model.NodeCandidate; import org.ow2.proactive.sal.model.NodeCandidate.NodeCandidateTypeEnum; +import org.ow2.proactive.sal.model.NodeType; +import org.ow2.proactive.sal.model.NodeTypeRequirement; import org.ow2.proactive.sal.model.Requirement; import org.ow2.proactive.sal.model.RequirementOperator; import com.fasterxml.jackson.core.JsonProcessingException; @@ -40,15 +42,35 @@ public class NebulousAppDeployer { * This machine runs the Kubernetes cluster and KubeVela. For * now, we ask for 8GB memory and 4 cores. */ - public static List getControllerRequirements(String jobID, Set cloudIDs) { + public static List getControllerRequirements(String jobID) { List reqs = new ArrayList<>( Arrays.asList( new AttributeRequirement("hardware", "ram", RequirementOperator.GEQ, "8192"), new AttributeRequirement("hardware", "cores", RequirementOperator.GEQ, "4"))); - KubevelaAnalyzer.addNebulousRequirements(reqs, cloudIDs); return reqs; } + /** + * Given a list of requirements, create one list each for each of the + * cloud providers the app wants to be deployed on. This transforms a + * list of requirements suitable for {@link + * ExnConnector#findNodeCandidates} into a value suitable for {@link + * ExnConnector#findNodeCandidatesMultiple}. + */ + private static List> perCloudRequirements(List requirements, Map> clouds) { + List> result = new ArrayList<>(); + clouds.forEach((id, regions) -> { + List cloud_reqs = new ArrayList<>(requirements); + cloud_reqs.add(new NodeTypeRequirement(List.of(NodeType.IAAS), "", "")); + cloud_reqs.add(new AttributeRequirement("cloud", "id", RequirementOperator.EQ, id)); + if (!regions.isEmpty()) { + cloud_reqs.add(new AttributeRequirement("location", "name", RequirementOperator.IN, String.join(" ", regions))); + } + result.add(cloud_reqs); + }); + return result; + } + /** * Produce a fresh KubeVela specification with added node affinity traits * and without resource specifications. @@ -284,9 +306,9 @@ public static void deployApplication(NebulousApp app, JsonNode kubevela) { // ------------------------------------------------------------ // Extract node requirements - Map> componentRequirements = KubevelaAnalyzer.getBoundedRequirements(kubevela, app.getClouds().keySet()); + Map> componentRequirements = KubevelaAnalyzer.getBoundedRequirements(kubevela); Map nodeCounts = KubevelaAnalyzer.getNodeCount(kubevela); - List controllerRequirements = getControllerRequirements(appUUID, app.getClouds().keySet()); + List controllerRequirements = getControllerRequirements(appUUID); // // HACK: do this only when cloud id = nrec // componentRequirements.forEach( // (k, reqs) -> reqs.add(new AttributeRequirement("location", "name", RequirementOperator.EQ, "bgo"))); @@ -317,9 +339,7 @@ public static void deployApplication(NebulousApp app, JsonNode kubevela) { // ---------------------------------------- // Find node candidates - - // TODO: filter by app resources / cloud? (check enabled: true in resources array) - List controllerCandidates = conn.findNodeCandidates(controllerRequirements, appUUID); + List controllerCandidates = conn.findNodeCandidatesMultiple(perCloudRequirements(controllerRequirements, app.getClouds()), appUUID); if (controllerCandidates.isEmpty()) { log.error("Could not find node candidates for requirements: {}, aborting deployment", controllerRequirements); @@ -330,8 +350,7 @@ public static void deployApplication(NebulousApp app, JsonNode kubevela) { for (Map.Entry> e : componentRequirements.entrySet()) { String nodeName = e.getKey(); List requirements = e.getValue(); - // TODO: filter by app resources / cloud? (check enabled: true in resources array) - List candidates = conn.findNodeCandidates(requirements, appUUID); + List candidates = conn.findNodeCandidatesMultiple(perCloudRequirements(requirements, app.getClouds()), appUUID); if (candidates.isEmpty()) { log.error("Could not find node candidates for for node {}, requirements: {}, aborting deployment", nodeName, requirements); app.setStateFailed(); @@ -601,7 +620,7 @@ public static void redeployApplication(NebulousApp app, ObjectNode updatedKubeve // ------------------------------------------------------------ // 1. Extract node requirements - Map> componentRequirements = KubevelaAnalyzer.getBoundedRequirements(updatedKubevela, app.getClouds().keySet()); + Map> componentRequirements = KubevelaAnalyzer.getBoundedRequirements(updatedKubevela); Map componentReplicaCounts = KubevelaAnalyzer.getNodeCount(updatedKubevela); Map> oldComponentRequirements = app.getComponentRequirements(); @@ -630,8 +649,7 @@ public static void redeployApplication(NebulousApp app, ObjectNode updatedKubeve int nAdd = newCount - oldCount; allMachineNames = componentNodeNames.get(componentName); log.info("Node requirements unchanged but need to add {} nodes to component {}", nAdd, componentName); - // TODO: filter by app resources (check enabled: true in resources array) - List candidates = conn.findNodeCandidates(newR, appUUID); + List candidates = conn.findNodeCandidatesMultiple(perCloudRequirements(newR, app.getClouds()), appUUID); if (candidates.isEmpty()) { log.error("Could not find node candidates for requirements: {}", newR); continue; @@ -684,8 +702,7 @@ public static void redeployApplication(NebulousApp app, ObjectNode updatedKubeve nodesToRemove.addAll(componentNodeNames.get(componentName)); allMachineNames = new HashSet<>(); log.info("Node requirements changed, need to redeploy all nodes of component {}", componentName); - // TODO: filter by app resources (check enabled: true in resources array) - List candidates = conn.findNodeCandidates(newR, appUUID); + List candidates = conn.findNodeCandidatesMultiple(perCloudRequirements(newR, app.getClouds()), appUUID); if (candidates.size() == 0) { log.error("Empty node candidate list for component {}, continuing without creating node", componentName); continue; diff --git a/optimiser-controller/src/test/java/eu/nebulouscloud/optimiser/controller/NebulousAppTests.java b/optimiser-controller/src/test/java/eu/nebulouscloud/optimiser/controller/NebulousAppTests.java index b653059..b2c79f5 100644 --- a/optimiser-controller/src/test/java/eu/nebulouscloud/optimiser/controller/NebulousAppTests.java +++ b/optimiser-controller/src/test/java/eu/nebulouscloud/optimiser/controller/NebulousAppTests.java @@ -92,7 +92,7 @@ void calculateNodeRequirementsSize() throws IOException, URISyntaxException { String kubevela_str = Files.readString(getResourcePath("vela-deployment-v2.yml"), StandardCharsets.UTF_8); JsonNode kubevela = yaml_mapper.readTree(kubevela_str); - Map> requirements = KubevelaAnalyzer.getBoundedRequirements(kubevela, null); + Map> requirements = KubevelaAnalyzer.getBoundedRequirements(kubevela); // We could compare the requirements with what is contained in // KubeVela, or compare keys with component names, but this would // essentially duplicate the method code--so we just make sure the @@ -104,7 +104,7 @@ void calculateNodeRequirementsSize() throws IOException, URISyntaxException { @Test void calculateServerlessRequirementsSize() throws IOException, URISyntaxException { JsonNode kubevela = KubevelaAnalyzer.parseKubevela(Files.readString(getResourcePath("serverless-deployment.yaml"), StandardCharsets.UTF_8)); - Map> requirements = KubevelaAnalyzer.getBoundedRequirements(kubevela, null); + Map> requirements = KubevelaAnalyzer.getBoundedRequirements(kubevela); // We have one serverless component, so we need n-1 VMs assertTrue(requirements.size() == kubevela.withArray("/spec/components").size() - 1); // Check that we detect serverless components @@ -130,7 +130,7 @@ void calculateRewrittenNodeRequirements() throws IOException, URISyntaxException ObjectNode replacements = solutions.withObject("VariableValues"); ObjectNode kubevela1 = app.rewriteKubevelaWithSolution(replacements); - Map> requirements = KubevelaAnalyzer.getBoundedRequirements(kubevela1, null); + Map> requirements = KubevelaAnalyzer.getBoundedRequirements(kubevela1); // We could compare the requirements with what is contained in // KubeVela, or compare keys with component names, but this would // essentially duplicate the method code--so we just make sure the