Log incoming solution message
Change-Id: I3f111fc76b48c8e97f39c5d5742fa3fdd744ab9b
This commit is contained in:
@@ -180,14 +180,15 @@ public class ExnConnector {
|
|||||||
// We'll talk a lot with SAL etc, so we should maybe fire up a
|
// We'll talk a lot with SAL etc, so we should maybe fire up a
|
||||||
// thread so as not to block here.
|
// thread so as not to block here.
|
||||||
try {
|
try {
|
||||||
|
ObjectNode json_body = mapper.convertValue(body, ObjectNode.class);
|
||||||
String app_id = message.subject();
|
String app_id = message.subject();
|
||||||
|
Main.logFile("solver-solution-" + app_id + ".json", json_body);
|
||||||
NebulousApp app = NebulousApps.get(app_id);
|
NebulousApp app = NebulousApps.get(app_id);
|
||||||
if (app == null) {
|
if (app == null) {
|
||||||
log.warn("Received solver solutions for non-existant app {}, discarding.", app_id);
|
log.warn("Received solver solutions for non-existant app {}, discarding.", app_id);
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
log.debug("Received solver solutions for app {}", app_id);
|
log.debug("Received solver solutions for app {}", app_id);
|
||||||
ObjectNode json_body = mapper.convertValue(body, ObjectNode.class);
|
|
||||||
app.processSolution(json_body);
|
app.processSolution(json_body);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
|||||||
@@ -92,10 +92,11 @@ public class Main implements Callable<Integer> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialization code shared between main and subcommands. Note that
|
* Initialization code shared between this class and any
|
||||||
* here we connect to SAL if possible, but (for now) do not start the EXN
|
* subcommands: set logging level, create log directory and create
|
||||||
* ActiveMQ middleware. Each main method needs to call
|
* the ActiveMQ adapter. Note that we do not start the EXN
|
||||||
* `activeMQConnector.start`.
|
* ActiveMQ middleware, so each main method needs to call
|
||||||
|
* `activeMQConnector.start` if needed.
|
||||||
*/
|
*/
|
||||||
private void init() {
|
private void init() {
|
||||||
log.debug("Beginning common startup of optimiser-controller");
|
log.debug("Beginning common startup of optimiser-controller");
|
||||||
|
|||||||
@@ -374,7 +374,7 @@ public class NebulousApp {
|
|||||||
constant.put("Variable", variableName);
|
constant.put("Variable", variableName);
|
||||||
constant.set("Value", value);
|
constant.set("Value", value);
|
||||||
}
|
}
|
||||||
|
log.info("Sending AMPL file to solver");
|
||||||
exnConnector.getAmplMessagePublisher().send(mapper.convertValue(msg, Map.class), getUUID(), true);
|
exnConnector.getAmplMessagePublisher().send(mapper.convertValue(msg, Map.class), getUUID(), true);
|
||||||
Main.logFile("to-solver-" + getUUID() + ".json", msg.toString());
|
Main.logFile("to-solver-" + getUUID() + ".json", msg.toString());
|
||||||
Main.logFile("to-solver-" + getUUID() + ".ampl", ampl);
|
Main.logFile("to-solver-" + getUUID() + ".ampl", ampl);
|
||||||
|
|||||||
@@ -78,15 +78,19 @@ public class NebulousAppDeployer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given a KubeVela file, extract how many nodes to deploy for each
|
* Given a KubeVela file, extract how many nodes to deploy for
|
||||||
* component.
|
* each component. Note that this can be zero, when the component
|
||||||
|
* should not be deployed at all, e.g., when there is a cloud and
|
||||||
|
* an edge version of the component.
|
||||||
*
|
*
|
||||||
* We currently detect replica count with the following component trait:
|
* We currently look for the following component trait:
|
||||||
* ---
|
*
|
||||||
|
* <pre>{@code
|
||||||
* traits:
|
* traits:
|
||||||
* - type: scaler
|
* - type: scaler
|
||||||
* properties:
|
* properties:
|
||||||
* replicas: 2
|
* replicas: 2
|
||||||
|
* }</pre>
|
||||||
*
|
*
|
||||||
* @param kubevela the parsed KubeVela file.
|
* @param kubevela the parsed KubeVela file.
|
||||||
* @return A map from component name to number of instances to generate.
|
* @return A map from component name to number of instances to generate.
|
||||||
@@ -100,8 +104,6 @@ public class NebulousAppDeployer {
|
|||||||
if (t.at("/type").asText().equals("scaler")
|
if (t.at("/type").asText().equals("scaler")
|
||||||
&& t.at("/properties/replicas").canConvertToExactIntegral())
|
&& t.at("/properties/replicas").canConvertToExactIntegral())
|
||||||
{
|
{
|
||||||
// Note this can be 0, in case we want to balance
|
|
||||||
// between e.g. cloud and edge
|
|
||||||
result.put(c.get("name").asText(),
|
result.put(c.get("name").asText(),
|
||||||
t.at("/properties/replicas").asInt());
|
t.at("/properties/replicas").asInt());
|
||||||
}
|
}
|
||||||
@@ -204,8 +206,10 @@ public class NebulousAppDeployer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add affinities trait to all components, except for those with a replica
|
* Produce a fresh KubeVela specification with added node affinity traits.
|
||||||
* count of 0.
|
*
|
||||||
|
* We add the following trait to all components, except those with
|
||||||
|
* a replica count of 0:
|
||||||
*
|
*
|
||||||
* <pre>{@code
|
* <pre>{@code
|
||||||
* traits:
|
* traits:
|
||||||
@@ -279,7 +283,7 @@ public class NebulousAppDeployer {
|
|||||||
|
|
||||||
Main.logFile("worker-requirements-" + appUUID + ".txt", workerRequirements);
|
Main.logFile("worker-requirements-" + appUUID + ".txt", workerRequirements);
|
||||||
Main.logFile("worker-counts-" + appUUID + ".txt", nodeCounts);
|
Main.logFile("worker-counts-" + appUUID + ".txt", nodeCounts);
|
||||||
Main.logFile("contoller-requirements-" + appUUID + ".txt", controllerRequirements);
|
Main.logFile("controller-requirements-" + appUUID + ".txt", controllerRequirements);
|
||||||
// ----------------------------------------
|
// ----------------------------------------
|
||||||
// 2. Find node candidates
|
// 2. Find node candidates
|
||||||
|
|
||||||
@@ -352,19 +356,30 @@ public class NebulousAppDeployer {
|
|||||||
|
|
||||||
JsonNode rewritten = addNodeAffinities(kubevela, app.getComponentMachineNames());
|
JsonNode rewritten = addNodeAffinities(kubevela, app.getComponentMachineNames());
|
||||||
String rewritten_kubevela = "---\n# Did not manage to create rewritten KubeVela";
|
String rewritten_kubevela = "---\n# Did not manage to create rewritten KubeVela";
|
||||||
try {
|
try {
|
||||||
rewritten_kubevela = yaml_mapper.writeValueAsString(rewritten);
|
rewritten_kubevela = yaml_mapper.writeValueAsString(rewritten);
|
||||||
} catch (JsonProcessingException e) {
|
} catch (JsonProcessingException e) {
|
||||||
log.error("Failed to convert KubeVela to YAML; this should never happen", e);
|
log.error("Failed to convert KubeVela to YAML; this should never happen", e);
|
||||||
}
|
}
|
||||||
Main.logFile("rewritten-kubevela-" + appUUID + ".yaml", rewritten_kubevela);
|
Main.logFile("rewritten-kubevela-" + appUUID + ".yaml", rewritten_kubevela);
|
||||||
// TODO: call deployApplication endpoint
|
// TODO: call deployApplication endpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Redeploy a running application.
|
* Given a KubeVela file, adapt the running application to its specification.
|
||||||
|
*
|
||||||
|
* The KubeVela file will have been rewritten with updated
|
||||||
|
* information from the solver.
|
||||||
|
*
|
||||||
|
* NOTE: this method is under development, pending the new endpoints.
|
||||||
|
*
|
||||||
|
* @param app the NebulOuS app object.
|
||||||
|
* @param kubevela the KubeVela file to deploy.
|
||||||
*/
|
*/
|
||||||
public static void redeployApplication(NebulousApp app, ObjectNode kubevela) {
|
public static void redeployApplication(NebulousApp app, ObjectNode kubevela) {
|
||||||
|
String appUUID = app.getUUID();
|
||||||
|
log.info("Starting redeployment of {}", appUUID);
|
||||||
|
|
||||||
// The overall flow:
|
// The overall flow:
|
||||||
//
|
//
|
||||||
// 1. Extract node requirements and node counts from the updated
|
// 1. Extract node requirements and node counts from the updated
|
||||||
@@ -378,7 +393,18 @@ public class NebulousAppDeployer {
|
|||||||
// 6. Call clusterScaleOut endpoint with list of added nodes
|
// 6. Call clusterScaleOut endpoint with list of added nodes
|
||||||
// 7. Call deployApplication with rewritten KubeVela
|
// 7. Call deployApplication with rewritten KubeVela
|
||||||
// 8. call clusterScaleIn endpoint with list of removed node names
|
// 8. call clusterScaleIn endpoint with list of removed node names
|
||||||
|
Main.logFile("kubevela-updated-from-solver-" + appUUID + ".yaml", kubevela);
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
// 1. Extract node requirements
|
||||||
|
Map<String, List<Requirement>> workerRequirements = getWorkerRequirementsFromKubevela(kubevela);
|
||||||
|
Map<String, Integer> nodeCounts = getNodeCountFromKubevela(kubevela);
|
||||||
|
List<Requirement> controllerRequirements = getControllerRequirements(appUUID);
|
||||||
|
|
||||||
|
Main.logFile("worker-requirements-" + appUUID + ".txt", workerRequirements);
|
||||||
|
Main.logFile("worker-counts-" + appUUID + ".txt", nodeCounts);
|
||||||
|
Main.logFile("controller-requirements-" + appUUID + ".txt", controllerRequirements);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user