Extract KubeVela analysis to separate library
The static methods in the new class `eu.nebulouscloud.optimiser.kubevela.KubevelaAnalyzer` are used by multiple components in the NebulOuS Solver. Create a library `nebulous-requirements-extractor` that houses this class. Adapt the container build job to run at project root, not inside the `nebulous-controller` subdirectory. Add jobs for library build and repository upload. Change-Id: Ic61c7064d216201031659bc362e3ed218f9c2b67
This commit is contained in:
parent
12a9e8aaaf
commit
3f75c54bee
12
.gitignore
vendored
12
.gitignore
vendored
@ -4,20 +4,26 @@ __pycache__/
|
||||
|
||||
# Gradle project-specific cache directory
|
||||
.gradle
|
||||
# Gradle build output directory
|
||||
/optimiser-controller/build/
|
||||
|
||||
# Various output directories
|
||||
build/
|
||||
bin/
|
||||
/nebulous-requirements-extractor/target/
|
||||
|
||||
# generated artefact directory
|
||||
/optimiser-controller/dist/
|
||||
|
||||
# jdtls (Java LSP server) and/or eclipse data files
|
||||
.classpath
|
||||
.factorypath
|
||||
.project
|
||||
.settings/
|
||||
/optimiser-controller/bin/
|
||||
|
||||
# IntelliJ IDEA configuration files
|
||||
/.idea/
|
||||
|
||||
# Visual Studio Code files
|
||||
/.vscode/
|
||||
|
||||
# Emacs files
|
||||
/.dir-locals.el
|
||||
|
@ -10,5 +10,5 @@ RUN gradle --no-daemon -Dorg.gradle.logging.level=info clean build
|
||||
# Package stage
|
||||
#
|
||||
FROM docker.io/library/eclipse-temurin:17-jre
|
||||
COPY --from=build /home/optimiser-controller/dist/optimiser-controller-all.jar /usr/local/lib/optimiser-controller-all.jar
|
||||
COPY --from=build /home/optimiser-controller/optimiser-controller/dist/optimiser-controller-all.jar /usr/local/lib/optimiser-controller-all.jar
|
||||
ENTRYPOINT ["java","-jar","/usr/local/lib/optimiser-controller-all.jar", "-vv"]
|
@ -27,7 +27,6 @@ To compile, install a JDK (Java Development Kit) version 17 or greater on the bu
|
||||
A container can be built and run with the following commands:
|
||||
|
||||
```sh
|
||||
cd optimiser-controller
|
||||
docker build -t optimiser-controller -f Dockerfile .
|
||||
docker run --rm optimiser-controller
|
||||
```
|
||||
|
13
nebulous-requirements-extractor/.mvn/local-settings.xml
Normal file
13
nebulous-requirements-extractor/.mvn/local-settings.xml
Normal file
@ -0,0 +1,13 @@
|
||||
<settings xmlns="http://maven.apache.org/SETTINGS/1.2.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.2.0 http://maven.apache.org/xsd/settings-1.2.0.xsd">
|
||||
<mirrors>
|
||||
<mirror>
|
||||
<id>my-repository-http-unblocker</id>
|
||||
<mirrorOf>activeeon</mirrorOf>
|
||||
<name></name>
|
||||
<url>http://repository.activeeon.com/content/groups/proactive/</url>
|
||||
<blocked>false</blocked>
|
||||
</mirror>
|
||||
</mirrors>
|
||||
</settings>
|
1
nebulous-requirements-extractor/.mvn/maven.config
Normal file
1
nebulous-requirements-extractor/.mvn/maven.config
Normal file
@ -0,0 +1 @@
|
||||
--settings .mvn/local-settings.xml
|
61
nebulous-requirements-extractor/build.gradle
Normal file
61
nebulous-requirements-extractor/build.gradle
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* This file was generated by the Gradle 'init' task.
|
||||
*
|
||||
* This project uses @Incubating APIs which are subject to change.
|
||||
*/
|
||||
|
||||
plugins {
|
||||
// Apply the java-library plugin for API and implementation separation.
|
||||
id 'java-library'
|
||||
// Use this to check for newer versions of dependency libraries via
|
||||
// ./gradlew dependencyUpdates
|
||||
id "com.github.ben-manes.versions" version "0.50.0"
|
||||
// https://docs.freefair.io/gradle-plugins/8.4/reference/#_lombok
|
||||
id "io.freefair.lombok" version "8.4"
|
||||
}
|
||||
|
||||
repositories {
|
||||
// Use Maven Central for resolving dependencies.
|
||||
mavenCentral()
|
||||
|
||||
// 7bulls, activeeon maven repositories for SAL
|
||||
maven {
|
||||
url 'http://repository.activeeon.com/content/groups/proactive/'
|
||||
allowInsecureProtocol = true
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
// JSON parsing; exported to consumers
|
||||
// https://github.com/FasterXML/jackson
|
||||
api 'com.fasterxml.jackson.core:jackson-databind:2.16.1'
|
||||
|
||||
// SAL client library; exported to consumers
|
||||
api 'org.ow2.proactive:sal-common:13.1.0-SNAPSHOT'
|
||||
|
||||
// YAML parsing: https://github.com/FasterXML/jackson-dataformats-text
|
||||
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.16.1'
|
||||
// Logging (only the API; backends will be set up by the application)
|
||||
implementation 'org.slf4j:slf4j-api:1.7.32'
|
||||
|
||||
}
|
||||
|
||||
testing {
|
||||
suites {
|
||||
// Configure the built-in test suite
|
||||
test {
|
||||
// Use JUnit Jupiter test framework
|
||||
useJUnitJupiter('5.10.0')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply a specific Java toolchain to ease working on different environments.
|
||||
java {
|
||||
toolchain {
|
||||
languageVersion = JavaLanguageVersion.of(17)
|
||||
}
|
||||
}
|
||||
|
||||
group = 'eu.nebulouscloud'
|
||||
version = '1.0'
|
13
nebulous-requirements-extractor/local-settings.xml
Normal file
13
nebulous-requirements-extractor/local-settings.xml
Normal file
@ -0,0 +1,13 @@
|
||||
<settings xmlns="http://maven.apache.org/SETTINGS/1.2.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.2.0 http://maven.apache.org/xsd/settings-1.2.0.xsd">
|
||||
<mirrors>
|
||||
<mirror>
|
||||
<id>my-repository-http-unblocker</id>
|
||||
<mirrorOf>activeeon</mirrorOf>
|
||||
<name></name>
|
||||
<url>http://repository.activeeon.com/content/groups/proactive/</url>
|
||||
<blocked>false</blocked>
|
||||
</mirror>
|
||||
</mirrors>
|
||||
</settings>
|
102
nebulous-requirements-extractor/pom.xml
Normal file
102
nebulous-requirements-extractor/pom.xml
Normal file
@ -0,0 +1,102 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>eu.nebulouscloud</groupId>
|
||||
<artifactId>nebulous-requirements-extractor</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<properties>
|
||||
<!-- Define properties for versions -->
|
||||
<jackson.version>2.16.1</jackson.version>
|
||||
<sal.version>13.1.0-SNAPSHOT</sal.version>
|
||||
<slf4j.version>1.7.32</slf4j.version>
|
||||
<junit.jupiter.version>5.10.0</junit.jupiter.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<!-- JSON parsing -->
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
<version>${jackson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<version>1.18.30</version>
|
||||
</dependency>
|
||||
<!-- SAL client library -->
|
||||
<dependency>
|
||||
<groupId>org.ow2.proactive</groupId>
|
||||
<artifactId>sal-common</artifactId>
|
||||
<version>${sal.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<!-- YAML parsing -->
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.dataformat</groupId>
|
||||
<artifactId>jackson-dataformat-yaml</artifactId>
|
||||
<version>${jackson.version}</version>
|
||||
</dependency>
|
||||
|
||||
<!-- Logging API -->
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>${slf4j.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.12.1</version>
|
||||
<configuration>
|
||||
<source>17</source>
|
||||
<target>17</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://s01.oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
<repository>
|
||||
<id>ossrh</id>
|
||||
<url>https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/</url>
|
||||
</repository>
|
||||
</distributionManagement>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<!-- Maven Central -->
|
||||
<id>central</id>
|
||||
<url>https://repo.maven.apache.org/maven2</url>
|
||||
</repository>
|
||||
<repository>
|
||||
<!-- Activeeon Repository -->
|
||||
<id>activeeon</id>
|
||||
<url>http://repository.activeeon.com/content/groups/proactive/</url>
|
||||
<name>repository.activeeon</name>
|
||||
</repository>
|
||||
<repository>
|
||||
<!-- Apache Log4j Repository -->
|
||||
<id>apache-log4j</id>
|
||||
<url>https://repo1.maven.org/maven2</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
</project>
|
@ -0,0 +1,210 @@
|
||||
package eu.nebulouscloud.optimiser.kubevela;
|
||||
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.ArrayNode;
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
import org.ow2.proactive.sal.model.AttributeRequirement;
|
||||
import org.ow2.proactive.sal.model.OperatingSystemFamily;
|
||||
import org.ow2.proactive.sal.model.Requirement;
|
||||
import org.ow2.proactive.sal.model.RequirementOperator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A collection of methods to extract node requirements from KubeVela files.
|
||||
*/
|
||||
@Slf4j
|
||||
public class KubevelaAnalyzer {
|
||||
|
||||
private static final ObjectMapper yamlMapper = new ObjectMapper(new YAMLFactory());
|
||||
|
||||
/**
|
||||
* Given a KubeVela file, extract how many nodes to deploy for each
|
||||
* component. Note that this can be zero when the component should not be
|
||||
* deployed at all. This can happen for example when there is a cloud and
|
||||
* an edge version of the component and only one of them should run.<p>
|
||||
*
|
||||
* We currently look for the following component trait:
|
||||
*
|
||||
* <pre>{@code
|
||||
* traits:
|
||||
* - type: scaler
|
||||
* properties:
|
||||
* replicas: 2
|
||||
* }</pre>
|
||||
*
|
||||
* If this trait is not found for a component, its count will be 1.
|
||||
*
|
||||
* @param kubevela the parsed KubeVela file.
|
||||
* @return A map from component name to number of instances to generate.
|
||||
*/
|
||||
public static Map<String, Integer> getNodeCount(JsonNode kubevela) {
|
||||
Map<String, Integer> result = new HashMap<>();
|
||||
ArrayNode components = kubevela.withArray("/spec/components");
|
||||
for (final JsonNode c : components) {
|
||||
result.put(c.get("name").asText(), 1); // default value; might get overwritten
|
||||
for (final JsonNode t : c.withArray("/traits")) {
|
||||
if (t.at("/type").asText().equals("scaler")
|
||||
&& t.at("/properties/replicas").canConvertToExactIntegral())
|
||||
{
|
||||
result.put(c.get("name").asText(),
|
||||
t.at("/properties/replicas").asInt());
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract node count from a KubeVela file.
|
||||
*
|
||||
* @see #getNodeCount(JsonNode)
|
||||
* @param kubevela The KubeVela file, as a YAML string.
|
||||
* @return A map from component name to number of instances to generate.
|
||||
* @throws JsonProcessingException if the argument does not contain valid YAML.
|
||||
*/
|
||||
public static Map<String, Integer> getNodeCount(String kubevela) throws JsonProcessingException {
|
||||
return getNodeCount(parseKubevela(kubevela));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract node requirements from a KubeVela file in a form we can send to
|
||||
* the SAL `findNodeCandidates` endpoint. <p>
|
||||
*
|
||||
* We read the following attributes for each component:
|
||||
*
|
||||
* - `properties.cpu`, `properties.requests.cpu`: round up to next integer
|
||||
* and generate requirement `hardware.cores`
|
||||
*
|
||||
* - `properties.memory`, `properties.requests.memory`: Handle "200Mi",
|
||||
* "0.2Gi" and bare number, convert to MB and generate requirement
|
||||
* `hardware.memory`
|
||||
*
|
||||
* Notes:<p>
|
||||
*
|
||||
* - We add the requirement that OS family == Ubuntu.<p>
|
||||
*
|
||||
* - For the first version, we specify all requirements as "greater or
|
||||
* equal", i.e., we might not find precisely the node candidates that
|
||||
* are asked for. <p>
|
||||
*
|
||||
* - Related, KubeVela specifies "cpu" as a fractional value, while SAL
|
||||
* wants the number of cores as a whole number. We round up to the
|
||||
* nearest integer and ask for "this or more" cores, since we might end
|
||||
* up with needing, e.g., 3 cores, which is not a configuration commonly
|
||||
* provided by cloud providers. <p>
|
||||
*
|
||||
* @param kubevela the parsed KubeVela file.
|
||||
* @return a map of component name to (potentially empty, except for OS
|
||||
* family) list of requirements for that component. No requirements mean
|
||||
* any node will suffice.
|
||||
*/
|
||||
public static Map<String, List<Requirement>> getRequirements(JsonNode kubevela) {
|
||||
Map<String, List<Requirement>> result = new HashMap<>();
|
||||
ArrayNode components = kubevela.withArray("/spec/components");
|
||||
for (final JsonNode c : components) {
|
||||
String componentName = c.get("name").asText();
|
||||
ArrayList<Requirement> reqs = new ArrayList<>();
|
||||
reqs.add(new AttributeRequirement("image", "operatingSystem.family",
|
||||
RequirementOperator.IN, OperatingSystemFamily.UBUNTU.toString()));
|
||||
JsonNode cpu = c.at("/properties/cpu");
|
||||
if (cpu.isMissingNode()) cpu = c.at("/properties/resources/requests/cpu");
|
||||
if (!cpu.isMissingNode()) {
|
||||
// KubeVela has fractional core /cpu requirements, and the
|
||||
// value might be given as a string instead of a number, so
|
||||
// parse string in all cases.
|
||||
double kubevela_cpu = -1;
|
||||
try {
|
||||
kubevela_cpu = Double.parseDouble(cpu.asText());
|
||||
} catch (NumberFormatException e) {
|
||||
log.warn("CPU spec in {} is not a number, value seen is {}",
|
||||
componentName, cpu.asText());
|
||||
}
|
||||
long sal_cores = Math.round(Math.ceil(kubevela_cpu));
|
||||
if (sal_cores > 0) {
|
||||
reqs.add(new AttributeRequirement("hardware", "cores",
|
||||
RequirementOperator.GEQ, Long.toString(sal_cores)));
|
||||
} else {
|
||||
// floatValue returns 0.0 if node is not numeric
|
||||
log.warn("CPU of component {} is 0 or not a number, value seen is {}",
|
||||
componentName, cpu.asText());
|
||||
}
|
||||
}
|
||||
JsonNode memory = c.at("/properties/memory");
|
||||
if (memory.isMissingNode()) cpu = c.at("/properties/resources/requests/memory");
|
||||
if (!memory.isMissingNode()) {;
|
||||
String sal_memory = memory.asText();
|
||||
if (sal_memory.endsWith("Mi")) {
|
||||
sal_memory = sal_memory.substring(0, sal_memory.length() - 2);
|
||||
} else if (sal_memory.endsWith("Gi")) {
|
||||
sal_memory = String.valueOf(Integer.parseInt(sal_memory.substring(0, sal_memory.length() - 2)) * 1024);
|
||||
} else if (!memory.isNumber()) {
|
||||
log.warn("Unsupported memory specification in component {} :{} (wanted 'Mi' or 'Gi') ",
|
||||
componentName,
|
||||
memory.asText());
|
||||
sal_memory = null;
|
||||
}
|
||||
// Fall-through: we rewrote the KubeVela file and didn't add
|
||||
// the "Mi" suffix, but it's a number
|
||||
if (sal_memory != null) {
|
||||
reqs.add(new AttributeRequirement("hardware", "memory",
|
||||
RequirementOperator.GEQ, sal_memory));
|
||||
}
|
||||
}
|
||||
for (final JsonNode t : c.withArray("/traits")) {
|
||||
// TODO: Check for node affinity / geoLocation / country /
|
||||
// node type (edge or cloud)
|
||||
}
|
||||
// Finally, add requirements for this job to the map
|
||||
result.put(componentName, reqs);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract node requirements from a KubeVela file.
|
||||
*
|
||||
* @see #getRequirements(JsonNode)
|
||||
* @param kubevela The KubeVela file, as a YAML string.
|
||||
* @return a map of component name to (potentially empty, except for OS
|
||||
* family) list of requirements for that component. No requirements mean
|
||||
* any node will suffice.
|
||||
* @throws JsonProcessingException if kubevela does not contain valid YAML.
|
||||
*/
|
||||
public static Map<String, List<Requirement>> getRequirements(String kubevela) throws JsonProcessingException {
|
||||
return getRequirements(parseKubevela(kubevela));
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert YAML KubeVela into a parsed representation.
|
||||
*
|
||||
* @param kubevela The KubeVela YAML.
|
||||
* @return A parsed representation of the KubeVela file, or null for a parse error.
|
||||
* @throws JsonProcessingException if kubevela does not contain valid YAML.
|
||||
*/
|
||||
public static JsonNode parseKubevela(String kubevela) throws JsonProcessingException {
|
||||
return yamlMapper.readTree(kubevela);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the parsed representation of a KubeVela file to yaml.
|
||||
*
|
||||
* @param kubevela The KubeVela parsed file.
|
||||
* @return A YAML representation of the KubeVela file.
|
||||
* @throws JsonProcessingException if YAML cannot be generated from kubevela.
|
||||
*/
|
||||
public static String generateKubevela(JsonNode kubevela) throws JsonProcessingException {
|
||||
return yamlMapper.writeValueAsString(kubevela);
|
||||
}
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
/**
|
||||
* This library provides the class {@link KubevelaAnalyzer}, which factors out
|
||||
* common code to extract node requirements from KubeVela files.
|
||||
*
|
||||
* @author Rudolf Schlatte
|
||||
*/
|
||||
package eu.nebulouscloud.optimiser.kubevela;
|
@ -57,6 +57,9 @@ dependencies {
|
||||
// https://openproject.nebulouscloud.eu/projects/nebulous-collaboration-hub/wiki/asynchronous-messaging-specification
|
||||
implementation 'eu.nebulouscloud:exn-connector-java:1.0-SNAPSHOT'
|
||||
|
||||
// Analysing KubeVela
|
||||
implementation project(':nebulous-requirements-extractor')
|
||||
|
||||
// Use JUnit Jupiter for testing.
|
||||
testImplementation 'org.junit.jupiter:junit-jupiter:5.10.1'
|
||||
testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.10.1'
|
||||
|
@ -1,29 +1,19 @@
|
||||
package eu.nebulouscloud.optimiser.controller;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import eu.nebulouscloud.optimiser.kubevela.KubevelaAnalyzer;
|
||||
import org.ow2.proactive.sal.model.AttributeRequirement;
|
||||
import org.ow2.proactive.sal.model.CommandsInstallation;
|
||||
import org.ow2.proactive.sal.model.Communication;
|
||||
import org.ow2.proactive.sal.model.IaasDefinition;
|
||||
import org.ow2.proactive.sal.model.JobDefinition;
|
||||
import org.ow2.proactive.sal.model.JobInformation;
|
||||
import org.ow2.proactive.sal.model.NodeCandidate;
|
||||
import org.ow2.proactive.sal.model.NodeType;
|
||||
import org.ow2.proactive.sal.model.NodeTypeRequirement;
|
||||
import org.ow2.proactive.sal.model.OperatingSystemFamily;
|
||||
import org.ow2.proactive.sal.model.Requirement;
|
||||
import org.ow2.proactive.sal.model.RequirementOperator;
|
||||
import org.ow2.proactive.sal.model.TaskDefinition;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
@ -80,134 +70,6 @@ public class NebulousAppDeployer {
|
||||
new AttributeRequirement("hardware", "cpu", RequirementOperator.GEQ, "4"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a KubeVela file, extract how many nodes to deploy for
|
||||
* each component. Note that this can be zero, when the component
|
||||
* should not be deployed at all, e.g., when there is a cloud and
|
||||
* an edge version of the component.
|
||||
*
|
||||
* We currently look for the following component trait:
|
||||
*
|
||||
* <pre>{@code
|
||||
* traits:
|
||||
* - type: scaler
|
||||
* properties:
|
||||
* replicas: 2
|
||||
* }</pre>
|
||||
*
|
||||
* @param kubevela the parsed KubeVela file.
|
||||
* @return A map from component name to number of instances to generate.
|
||||
*/
|
||||
public static Map<String, Integer> getNodeCountFromKubevela (JsonNode kubevela) {
|
||||
Map<String, Integer> result = new HashMap<>();
|
||||
ArrayNode components = kubevela.withArray("/spec/components");
|
||||
for (final JsonNode c : components) {
|
||||
result.put(c.get("name").asText(), 1); // default value
|
||||
for (final JsonNode t : c.withArray("/traits")) {
|
||||
if (t.at("/type").asText().equals("scaler")
|
||||
&& t.at("/properties/replicas").canConvertToExactIntegral())
|
||||
{
|
||||
result.put(c.get("name").asText(),
|
||||
t.at("/properties/replicas").asInt());
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a KubeVela file, extract its VM requirements in a form we can
|
||||
* send to the SAL `findNodeCandidates` endpoint. <p>
|
||||
*
|
||||
* We add the requirement that OS family == Ubuntu.
|
||||
*
|
||||
* We read the following attributes for each component:
|
||||
*
|
||||
* - `properties.cpu`, `properties.requests.cpu`: round up to next integer
|
||||
* and generate requirement `hardware.cores`
|
||||
*
|
||||
* - `properties.memory`, `properties.requests.memory`: Handle "200Mi",
|
||||
* "0.2Gi" and bare number, convert to MB and generate requirement
|
||||
* `hardware.memory`
|
||||
*
|
||||
* Notes:<p>
|
||||
*
|
||||
* - For the first version, we specify all requirements as "greater or
|
||||
* equal", i.e., we might not find precisely the node candidates that
|
||||
* are asked for. <p>
|
||||
*
|
||||
* - Related, KubeVela specifies "cpu" as a fractional value, while SAL
|
||||
* wants the number of cores as a whole number. We round up to the
|
||||
* nearest integer and ask for "this or more" cores, since we might end
|
||||
* up with “strange” numbers of cores. <p>
|
||||
*
|
||||
* @param kubevela the parsed KubeVela file.
|
||||
* @return a map of component name to (potentially empty) list of
|
||||
* requirements for that component. No requirements mean any node will
|
||||
* suffice.
|
||||
*/
|
||||
public static Map<String, List<Requirement>> getWorkerRequirementsFromKubevela(JsonNode kubevela) {
|
||||
Map<String, List<Requirement>> result = new HashMap<>();
|
||||
ArrayNode components = kubevela.withArray("/spec/components");
|
||||
for (final JsonNode c : components) {
|
||||
String componentName = c.get("name").asText();
|
||||
ArrayList<Requirement> reqs = new ArrayList<>();
|
||||
reqs.add(new AttributeRequirement("image", "operatingSystem.family",
|
||||
RequirementOperator.IN, OperatingSystemFamily.UBUNTU.toString()));
|
||||
JsonNode cpu = c.at("/properties/cpu");
|
||||
if (cpu.isMissingNode()) cpu = c.at("/properties/resources/requests/cpu");
|
||||
if (!cpu.isMissingNode()) {
|
||||
// KubeVela has fractional core /cpu requirements, and the
|
||||
// value might be given as a string instead of a number, so
|
||||
// parse string in all cases.
|
||||
double kubevela_cpu = -1;
|
||||
try {
|
||||
kubevela_cpu = Double.parseDouble(cpu.asText());
|
||||
} catch (NumberFormatException e) {
|
||||
log.warn("CPU spec in {} is not a number, value seen is {}",
|
||||
componentName, cpu.asText());
|
||||
}
|
||||
long sal_cores = Math.round(Math.ceil(kubevela_cpu));
|
||||
if (sal_cores > 0) {
|
||||
reqs.add(new AttributeRequirement("hardware", "cores",
|
||||
RequirementOperator.GEQ, Long.toString(sal_cores)));
|
||||
} else {
|
||||
// floatValue returns 0.0 if node is not numeric
|
||||
log.warn("CPU of component {} is 0 or not a number, value seen is {}",
|
||||
componentName, cpu.asText());
|
||||
}
|
||||
}
|
||||
JsonNode memory = c.at("/properties/memory");
|
||||
if (memory.isMissingNode()) cpu = c.at("/properties/resources/requests/memory");
|
||||
if (!memory.isMissingNode()) {;
|
||||
String sal_memory = memory.asText();
|
||||
if (sal_memory.endsWith("Mi")) {
|
||||
sal_memory = sal_memory.substring(0, sal_memory.length() - 2);
|
||||
} else if (sal_memory.endsWith("Gi")) {
|
||||
sal_memory = String.valueOf(Integer.parseInt(sal_memory.substring(0, sal_memory.length() - 2)) * 1024);
|
||||
} else if (!memory.isNumber()) {
|
||||
log.warn("Unsupported memory specification in component {} :{} (wanted 'Mi' or 'Gi') ",
|
||||
componentName,
|
||||
memory.asText());
|
||||
sal_memory = null;
|
||||
}
|
||||
// Fall-through: we rewrote the KubeVela file and didn't add
|
||||
// the "Mi" suffix, but it's a number
|
||||
if (sal_memory != null) {
|
||||
reqs.add(new AttributeRequirement("hardware", "memory",
|
||||
RequirementOperator.GEQ, sal_memory));
|
||||
}
|
||||
}
|
||||
for (final JsonNode t : c.withArray("/traits")) {
|
||||
// TODO: Check for node affinity / geoLocation / country /
|
||||
// node type (edge or cloud)
|
||||
}
|
||||
// Finally, add requirements for this job to the map
|
||||
result.put(componentName, reqs);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Produce a fresh KubeVela specification with added node affinity traits.
|
||||
*
|
||||
@ -280,8 +142,8 @@ public class NebulousAppDeployer {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// 1. Extract node requirements
|
||||
Map<String, List<Requirement>> workerRequirements = getWorkerRequirementsFromKubevela(kubevela);
|
||||
Map<String, Integer> nodeCounts = getNodeCountFromKubevela(kubevela);
|
||||
Map<String, List<Requirement>> workerRequirements = KubevelaAnalyzer.getRequirements(kubevela);
|
||||
Map<String, Integer> nodeCounts = KubevelaAnalyzer.getNodeCount(kubevela);
|
||||
List<Requirement> controllerRequirements = getControllerRequirements(appUUID);
|
||||
|
||||
Main.logFile("worker-requirements-" + appUUID + ".txt", workerRequirements);
|
||||
@ -290,25 +152,25 @@ public class NebulousAppDeployer {
|
||||
// ----------------------------------------
|
||||
// 2. Find node candidates
|
||||
|
||||
ArrayNode controllerCandidates = SalConnector.findNodeCandidates(controllerRequirements, appUUID);
|
||||
if (controllerCandidates.isEmpty()) {
|
||||
log.error("Could not find node candidates for requirements: {}",
|
||||
controllerRequirements, keyValue("appId", appUUID));
|
||||
// Continue here while we don't really deploy
|
||||
// return;
|
||||
}
|
||||
Map<String, ArrayNode> workerCandidates = new HashMap<>();
|
||||
for (Map.Entry<String, List<Requirement>> e : workerRequirements.entrySet()) {
|
||||
String nodeName = e.getKey();
|
||||
List<Requirement> requirements = e.getValue();
|
||||
ArrayNode candidates = SalConnector.findNodeCandidates(requirements, appUUID);
|
||||
if (candidates.isEmpty()) {
|
||||
log.error("Could not find node candidates for requirements: {}", requirements);
|
||||
// Continue here while we don't really deploy
|
||||
// return;
|
||||
}
|
||||
workerCandidates.put(nodeName, candidates);
|
||||
}
|
||||
// ArrayNode controllerCandidates = SalConnector.findNodeCandidates(controllerRequirements, appUUID);
|
||||
// if (controllerCandidates.isEmpty()) {
|
||||
// log.error("Could not find node candidates for requirements: {}",
|
||||
// controllerRequirements, keyValue("appId", appUUID));
|
||||
// // Continue here while we don't really deploy
|
||||
// // return;
|
||||
// }
|
||||
// Map<String, ArrayNode> workerCandidates = new HashMap<>();
|
||||
// for (Map.Entry<String, List<Requirement>> e : workerRequirements.entrySet()) {
|
||||
// String nodeName = e.getKey();
|
||||
// List<Requirement> requirements = e.getValue();
|
||||
// ArrayNode candidates = SalConnector.findNodeCandidates(requirements, appUUID);
|
||||
// if (candidates.isEmpty()) {
|
||||
// log.error("Could not find node candidates for requirements: {}", requirements);
|
||||
// // Continue here while we don't really deploy
|
||||
// // return;
|
||||
// }
|
||||
// workerCandidates.put(nodeName, candidates);
|
||||
// }
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// 3. Select node candidates
|
||||
@ -334,17 +196,17 @@ public class NebulousAppDeployer {
|
||||
// candidate is an edge node, we should select it and fill the
|
||||
// rest of the nodes with second-best cloud nodes.
|
||||
|
||||
// TODO: make sure we only choose the same edge node once; it
|
||||
// might be in all node candidate lists :)
|
||||
if (!workerCandidates.get(componentName).isEmpty()) {
|
||||
// should always be true, except currently we don't abort
|
||||
// in Step 2 if we don't find candidates.
|
||||
JsonNode candidate = workerCandidates.get(componentName).get(0);
|
||||
NodeCandidate c = mapper.convertValue(((ObjectNode)candidate).deepCopy()
|
||||
.remove(List.of("score", "ranking")),
|
||||
NodeCandidate.class);
|
||||
nodeNameToCandidate.put(nodeName, c);
|
||||
}
|
||||
// // TODO: make sure we only choose the same edge node once; it
|
||||
// // might be in all node candidate lists :)
|
||||
// if (!workerCandidates.get(componentName).isEmpty()) {
|
||||
// // should always be true, except currently we don't abort
|
||||
// // in Step 2 if we don't find candidates.
|
||||
// JsonNode candidate = workerCandidates.get(componentName).get(0);
|
||||
// NodeCandidate c = mapper.convertValue(((ObjectNode)candidate).deepCopy()
|
||||
// .remove(List.of("score", "ranking")),
|
||||
// NodeCandidate.class);
|
||||
// nodeNameToCandidate.put(nodeName, c);
|
||||
// }
|
||||
}
|
||||
app.getComponentMachineNames().put(componentName, nodeNames);
|
||||
}
|
||||
@ -405,8 +267,8 @@ public class NebulousAppDeployer {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// 1. Extract node requirements
|
||||
Map<String, List<Requirement>> workerRequirements = getWorkerRequirementsFromKubevela(kubevela);
|
||||
Map<String, Integer> nodeCounts = getNodeCountFromKubevela(kubevela);
|
||||
Map<String, List<Requirement>> workerRequirements = KubevelaAnalyzer.getRequirements(kubevela);
|
||||
Map<String, Integer> nodeCounts = KubevelaAnalyzer.getNodeCount(kubevela);
|
||||
List<Requirement> controllerRequirements = getControllerRequirements(appUUID);
|
||||
|
||||
Main.logFile("worker-requirements-" + appUUID + ".txt", workerRequirements);
|
||||
|
@ -8,6 +8,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
|
||||
|
||||
import eu.nebulouscloud.optimiser.kubevela.KubevelaAnalyzer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
@ -89,7 +91,7 @@ public class NebulousAppTests {
|
||||
String kubevela_str = Files.readString(getResourcePath("vela-deployment-v2.yml"),
|
||||
StandardCharsets.UTF_8);
|
||||
JsonNode kubevela = yaml_mapper.readTree(kubevela_str);
|
||||
Map<String, List<Requirement>> requirements = NebulousAppDeployer.getWorkerRequirementsFromKubevela(kubevela);
|
||||
Map<String, List<Requirement>> requirements = KubevelaAnalyzer.getRequirements(kubevela);
|
||||
// We could compare the requirements with what is contained in
|
||||
// KubeVela, or compare keys with component names, but this would
|
||||
// essentially duplicate the method code--so we just make sure the
|
||||
@ -109,7 +111,7 @@ public class NebulousAppTests {
|
||||
ObjectNode replacements = solutions.withObject("VariableValues");
|
||||
ObjectNode kubevela1 = app.rewriteKubevelaWithSolution(replacements);
|
||||
|
||||
Map<String, List<Requirement>> requirements = NebulousAppDeployer.getWorkerRequirementsFromKubevela(kubevela1);
|
||||
Map<String, List<Requirement>> requirements = KubevelaAnalyzer.getRequirements(kubevela1);
|
||||
// We could compare the requirements with what is contained in
|
||||
// KubeVela, or compare keys with component names, but this would
|
||||
// essentially duplicate the method code--so we just make sure the
|
||||
|
@ -12,4 +12,4 @@ plugins {
|
||||
|
||||
rootProject.name = 'optimiser-controller'
|
||||
|
||||
include('optimiser-controller')
|
||||
include('optimiser-controller', 'nebulous-requirements-extractor')
|
||||
|
@ -8,11 +8,11 @@
|
||||
- nebulous-optimiser-controller-container-images
|
||||
description: Build the container images.
|
||||
files: &image_files
|
||||
- ^optimiser-controller/
|
||||
- ^/
|
||||
vars: &image_vars
|
||||
promote_container_image_job: nebulous-optimiser-controller-upload-container-images
|
||||
container_images:
|
||||
- context: optimiser-controller
|
||||
- context: .
|
||||
registry: quay.io
|
||||
repository: quay.io/nebulous/optimiser-controller
|
||||
namespace: nebulous
|
||||
@ -44,7 +44,7 @@
|
||||
description: Run Hadolint on Dockerfile(s).
|
||||
vars:
|
||||
dockerfiles:
|
||||
- optimiser-controller/Dockerfile
|
||||
- Dockerfile
|
||||
|
||||
- job:
|
||||
name: nebulous-optimiser-controller-helm-lint
|
||||
@ -70,3 +70,24 @@
|
||||
vars:
|
||||
helm_charts:
|
||||
nebulous-optimiser-controller: ./charts/nebulous-optimiser-controller
|
||||
|
||||
- job:
|
||||
name: nebulous-optimiser-controller-java-build-java-libraries
|
||||
parent: nebulous-build-java-libraries
|
||||
provides:
|
||||
- nebulous-optimiser-controller-java-java-libraries
|
||||
description: Build the java libraries.
|
||||
files: &library_files
|
||||
- ^nebulous-requirements-extractor/
|
||||
vars: &library_vars
|
||||
java_libraries:
|
||||
- context: nebulous-requirements-extractor
|
||||
|
||||
- job:
|
||||
name: nebulous-optimiser-controller-java-upload-java-libraries
|
||||
parent: nebulous-upload-java-libraries
|
||||
provides:
|
||||
- nebulous-optimiser-controller-java-java-libraries
|
||||
description: Build and upload the java libraries.
|
||||
files: *library_files
|
||||
vars: *library_vars
|
||||
|
@ -6,6 +6,7 @@
|
||||
- nebulous-optimiser-controller-build-container-images
|
||||
- nebulous-optimiser-controller-hadolint
|
||||
- nebulous-platform-apply-helm-charts
|
||||
- nebulous-optimiser-controller-java-build-java-libraries
|
||||
- nox-linters
|
||||
gate:
|
||||
jobs:
|
||||
@ -14,7 +15,9 @@
|
||||
- nebulous-optimiser-controller-upload-container-images
|
||||
- nebulous-optimiser-controller-hadolint
|
||||
- nebulous-platform-apply-helm-charts
|
||||
- nebulous-optimiser-controller-java-build-java-libraries
|
||||
- nox-linters
|
||||
promote:
|
||||
jobs:
|
||||
- nebulous-optimiser-controller-java-upload-java-libraries
|
||||
- nebulous-optimiser-controller-promote-container-images
|
||||
|
Loading…
x
Reference in New Issue
Block a user