update for devices

Change-Id: I64095946049555dd2eb82ac22f9bdae44aa398e9
This commit is contained in:
Gregory Koronakos 2024-04-30 16:56:41 +03:00
parent 064773fbd7
commit 7faee3d41d
5 changed files with 238 additions and 539 deletions

View File

@ -4,7 +4,7 @@ from scipy.optimize import linprog
from scipy.stats import rankdata from scipy.stats import rankdata
def perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids): def perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids):
print("Evaluation begun with perform_evaluation():") # print("Evaluation begun with perform_evaluation():")
# print("Data Table:", data_table) # print("Data Table:", data_table)
# Identify the boolean criteria columns by checking if all values are either 0 or 1 # Identify the boolean criteria columns by checking if all values are either 0 or 1
# boolean_criteria = [criterion for criterion in data_table if set(data_table[criterion]) <= {0, 1}] # boolean_criteria = [criterion for criterion in data_table if set(data_table[criterion]) <= {0, 1}]

View File

@ -13,6 +13,46 @@ Boolean_Variables = [
"8cd09fe9-c119-4ccd-b651-0f18334dbbe4", "7147995c-8e68-4106-ab24-f0a7673eb5f5", "c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef"] "8cd09fe9-c119-4ccd-b651-0f18334dbbe4", "7147995c-8e68-4106-ab24-f0a7673eb5f5", "c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef"]
# Used to extract_SAL_node_candidate_data from Use Side for DataGrid # Used to extract_SAL_node_candidate_data from Use Side for DataGrid
# def extract_SAL_node_candidate_data_Front(json_data):
# default_criteria_list = ["cores", "ram", "disk", "memoryPrice", "price"]
#
# if isinstance(json_data, dict): # Single node dictionary
# json_data = [json_data] # Wrap it in a list
#
# extracted_data = []
# node_ids = []
# node_names = []
#
# for item in json_data:
# hardware_info = item.get("hardware", {})
# # Extract default criteria values
# default_criteria_values = {criteria: hardware_info.get(criteria, 0.0) if criteria in hardware_info else item.get(criteria, 0.0) for criteria in default_criteria_list}
#
# # Correctly extract the providerName from the cloud information
# cloud_info = item.get("cloud", {}) # get the cloud info or default to an empty dict
# api_info = cloud_info.get("api", {})
# provider_name = api_info.get("providerName", "Unknown Provider")
#
# # each item is now a dictionary
# node_data = {
# "nodeId": item.get("nodeId", ''),
# "id": item.get('id', ''),
# "nodeCandidateType": item.get("nodeCandidateType", ''),
# **default_criteria_values, # Unpack default criteria values into node_data
# "hardware": hardware_info,
# "location": item.get("location", {}),
# "image": item.get("image", {}),
# "providerName": provider_name
# }
# extracted_data.append(node_data)
# node_ids.append(node_data["id"])
#
# # print("Before create_node_name")
# node_names.append(create_node_name(node_data)) # call create_node_name function
# # print("After create_node_name")
#
# return extracted_data, node_ids, node_names
def extract_SAL_node_candidate_data_Front(json_data): def extract_SAL_node_candidate_data_Front(json_data):
default_criteria_list = ["cores", "ram", "disk", "memoryPrice", "price"] default_criteria_list = ["cores", "ram", "disk", "memoryPrice", "price"]
@ -28,10 +68,15 @@ def extract_SAL_node_candidate_data_Front(json_data):
# Extract default criteria values # Extract default criteria values
default_criteria_values = {criteria: hardware_info.get(criteria, 0.0) if criteria in hardware_info else item.get(criteria, 0.0) for criteria in default_criteria_list} default_criteria_values = {criteria: hardware_info.get(criteria, 0.0) if criteria in hardware_info else item.get(criteria, 0.0) for criteria in default_criteria_list}
# Correctly extract the providerName from the cloud information
cloud_info = item.get("cloud", {}) # get the cloud info or default to an empty dict cloud_info = item.get("cloud", {}) # get the cloud info or default to an empty dict
api_info = cloud_info.get("api", {}) node_type = item.get("nodeCandidateType", "")
provider_name = api_info.get("providerName", "Unknown Provider")
# extract the providerName from the cloud information
if node_type == "EDGE":
provider_name = "-" # For "EDGE" type, set provider_name as "-"
else:
api_info = cloud_info.get("api", {})
provider_name = api_info.get("providerName", "Unknown Provider") # For other types, fetch from api_info
# each item is now a dictionary # each item is now a dictionary
node_data = { node_data = {
@ -53,6 +98,7 @@ def extract_SAL_node_candidate_data_Front(json_data):
return extracted_data, node_ids, node_names return extracted_data, node_ids, node_names
# Used to create node names for DataGrid # Used to create node names for DataGrid
def create_node_name(node_data): def create_node_name(node_data):
node_type = node_data.get("nodeCandidateType", "UNKNOWN_TYPE") node_type = node_data.get("nodeCandidateType", "UNKNOWN_TYPE")
@ -99,11 +145,12 @@ def create_node_name(node_data):
# Used to extract_SAL_node_candidate_data from App Side working with Optimizer # Used to extract_SAL_node_candidate_data from App Side working with Optimizer
def extract_SAL_node_candidate_data(json_string): def extract_SAL_node_candidate_data(json_string):
# print("Entered in extract_SAL_node_candidate_data") # print("Entered in extract_SAL_node_candidate_data")
try: json_data = json.loads(json_string)
json_data = json.loads(json_string) # Ensure json_data is a list of dictionaries # try:
except json.JSONDecodeError as e: # json_data = json.loads(json_string) # json_data is a list of dictionaries
print(f"Error parsing JSON: {e}") # except json.JSONDecodeError as e:
return [], 0, [], [] # print(f"Error parsing JSON inside extract_SAL_node_candidate_data(): {e}")
# return [], 0, [], []
extracted_data = [] extracted_data = []
@ -300,7 +347,6 @@ def convert_data_table(created_data_table):
return created_data_table return created_data_table
# Used to Append "Score" and "Rank" for each node in SAL's response JSON # Used to Append "Score" and "Rank" for each node in SAL's response JSON
def append_evaluation_results(sal_reply_body, scores_and_ranks): def append_evaluation_results(sal_reply_body, scores_and_ranks):
# Check if sal_reply_body is a string and convert it to a Python object # Check if sal_reply_body is a string and convert it to a Python object
@ -423,173 +469,14 @@ def random_value_based_on_type(data_type, criterion_info=None):
return round(random.uniform(1, 100), 2) return round(random.uniform(1, 100), 2)
# Used to parse Patini's JSON # Used to parse Dummy JSON files for Review
def parse_device_info_from_file(file_path): def read_json_file_as_string(file_path):
with open(file_path, 'r') as file: try:
json_data = json.load(file) with open(file_path, 'r') as file:
device_names = [] return file.read()
device_info = { except Exception as e:
'id': json_data['_id'], print(f"Error reading JSON file: {e}")
'name': json_data['name'], # Save the device name return None
'deviceInfo': json_data['deviceInfo'],
'creationDate': json_data['creationDate'],
'lastUpdateDate': json_data['lastUpdateDate'],
'status': json_data['status'],
'metrics': {
'cpu': json_data['metrics']['metrics']['cpu'],
'uptime': json_data['metrics']['metrics']['uptime'],
'disk': json_data['metrics']['metrics']['disk'],
'ram': json_data['metrics']['metrics']['ram']
}
}
# Example of converting and handling ISODate strings, adjust accordingly
device_info['creationDate'] = datetime.fromisoformat(device_info['creationDate'].replace("ISODate('", "").replace("')", ""))
device_info['lastUpdateDate'] = datetime.fromisoformat(device_info['lastUpdateDate'].replace("ISODate('", "").replace("')", ""))
device_info['creationDate'] = device_info['creationDate'].isoformat()
device_info['lastUpdateDate'] = device_info['lastUpdateDate'].isoformat()
# Update the global device_names list
device_names.append({'id': device_info['id'], 'name': device_info['name']})
return device_names, device_info
#---------------Read Application Data
# Used to read the saved Data of the Application ONLY for the Nodes returned by SAL
# def read_application_data(app_id, sal_reply_body):
# # Directory path and file path
# app_dir = os.path.join("app_dirs", app_id)
# file_path = os.path.join(app_dir, f"{app_id}_data.json")
#
# # Initialize variables to return in case of no data or an error
# data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = [], [], [], [], []
# # Read data from SAL's reply
# extracted_data_SAL, node_ids_SAL, node_names_SAL = extract_SAL_node_candidate_data(sal_reply_body)
#
# # Check if the file exists
# if os.path.exists(file_path):
# # Read and parse the JSON file
# with open(file_path, 'r', encoding='utf-8') as f:
# data = json.load(f)
#
# # Filter gridData based on Nodes returned by SAL
# filtered_grid_data = [node for node in data.get('gridData', []) if node.get('id') in node_ids_SAL]
#
# if filtered_grid_data: # if there's at least 1 match
# # Create a new JSON structure and call transform_grid_data_to_table
# filtered_json_data = {
# "gridData": filtered_grid_data,
# "relativeWRData": relative_wr_data,
# "immediateWRData": immediate_wr_data,
# "nodeNames": [node.get('name') for node in filtered_grid_data],
# "nodeIds": node_ids_SAL
# }
#
# # Call transform_grid_data_to_table with the filtered JSON data
# # data_table, _, _, node_names, _ = transform_grid_data_to_table(filtered_json_data)
# data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = transform_grid_data_to_table(filtered_json_data)
# if not node_names:
# node_names = node_ids
#
# else: # There is not any node id match - Proceed only with the nodes from SAL's reply
# print("No matching node IDs found in the saved data. Proceed only with data from SAL")
# selected_criteria = ["Number of CPU Cores", "Memory Size"]
# field_mapping = create_criteria_mapping(selected_criteria, extracted_data_SAL)
# data_table = create_data_table(selected_criteria, extracted_data_SAL, field_mapping)
# # Assign relativeWRData and immediateWRData regardless of node ID matches
# relative_wr_data = []
# immediate_wr_data = []
# node_ids = node_ids_SAL
# node_names = node_ids
# if not node_names_SAL:
# node_names = node_ids
# else:
# print(f"No JSON file found for application ID {app_id}.")
#
# # Note: relative_wr_data and immediate_wr_data are returned regardless of the node IDs match
# return data_table, relative_wr_data, immediate_wr_data, node_names, node_ids
#Used to create data table from SAL's response in app_side
# def read_application_data(app_id, sal_reply_body):
# app_dir = os.path.join("app_dirs", app_id)
# file_path = os.path.join(app_dir, f"{app_id}_data.json")
# data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = {}, [], [], [], []
#
# default_list_criteria_mapping = {
# "Operating cost": "price",
# "Memory Price": "memoryPrice",
# "Number of CPU Cores": "cores",
# "Memory Size": "ram",
# "Storage Capacity": "disk"
# }
#
# if isinstance(sal_reply_body, str):
# try:
# sal_reply_body = json.loads(sal_reply_body)
# except json.JSONDecodeError as e:
# print(f"Error parsing JSON: {e}")
# return data_table, relative_wr_data, immediate_wr_data, node_names, node_ids
#
# if os.path.exists(file_path):
# with open(file_path, 'r', encoding='utf-8') as f:
# data = json.load(f)
# selected_criteria = {criterion['title']: criterion for criterion in data.get('selectedCriteria', [])}
#
# for criterion in selected_criteria.keys():
# data_table[criterion] = []
#
# matched_node_ids = set(node['id'] for node in data.get('gridData', [])) & set(node['id'] for node in sal_reply_body)
# unmatched_node_ids = set(node['id'] for node in sal_reply_body) - matched_node_ids
#
# # Ordinal value mapping for MATCHED nodes
# ordinal_value_mapping = {"High": 3, "Medium": 2, "Low": 1}
#
# # Process MATCHED nodes from JSON file
# for node in data.get('gridData', []):
# if node['id'] in matched_node_ids:
# node_ids.append(node['id'])
# # node_names.append(node.get('name', 'Unknown'))
# for criterion, crit_info in selected_criteria.items():
# value = next((c['value'] for c in node['criteria'] if c['title'] == criterion), None)
# if value is not None:
# value = 1 if value is True else (0 if value is False else value)
# else: # Apply default if criterion not found
# value = 0.00001 if crit_info['type'] == 2 else 0
# data_table[criterion].append(value)
#
# # Process UNMATCHED nodes from sal_reply_body
# for node_id in unmatched_node_ids:
# node_data = next((node for node in sal_reply_body if node['id'] == node_id), {})
# node_ids.append(node_id)
# for criterion, crit_info in selected_criteria.items():
# mapped_field = default_list_criteria_mapping.get(criterion, '')
# value = node_data.get(mapped_field, 0.00001 if crit_info['type'] == 2 else False)
# value = 1 if value is True else (0 if value is False else value)
# data_table[criterion].append(value)
#
# # convert True/False to 1/0 in data_table for both boolean and string representations
# for criterion, values in data_table.items():
# data_table[criterion] = [convert_bool(value) for value in values]
# node_names = node_ids
# relative_wr_data, immediate_wr_data = data.get('relativeWRData', []), data.get('immediateWRData', [])
#
# else: # There is not any node id match - Proceed only with the nodes from SAL's reply
# print(f"No JSON file found for application ID {app_id}. Proceed only with data from SAL.")
# extracted_data_SAL, node_ids_SAL, node_names_SAL = extract_SAL_node_candidate_data(sal_reply_body)
# selected_criteria = ["Number of CPU Cores", "Memory Size"]
# field_mapping = create_criteria_mapping(selected_criteria, extracted_data_SAL)
# data_table = create_data_table(selected_criteria, extracted_data_SAL, field_mapping)
# # Assign relativeWRData and immediateWRData regardless of node ID matches
# relative_wr_data = []
# immediate_wr_data = []
# node_ids = node_ids_SAL
# node_names = node_ids
#
# return data_table, relative_wr_data, immediate_wr_data, node_names, node_ids
# Used to transform SAL's response before sending to DataGrid # Used to transform SAL's response before sending to DataGrid
@ -628,89 +515,3 @@ def extract_node_candidate_data(json_file_path):
return extracted_data, node_ids, node_names return extracted_data, node_ids, node_names
# Works for dummy_node_data
# def create_node_name(node_data):
# # dummy_node_data = '''{
# # "id": "8a7481d98e702b64018e702cbe070000",
# # "nodeCandidateType": "EDGE",
# # "jobIdForByon": null,
# # "jobIdForEdge": "FCRnewLight0",
# # "price": 0.0,
# # "cloud": {
# # "id": "edge",
# # "endpoint": null,
# # "cloudType": "EDGE",
# # "api": null,
# # "credential": null,
# # "cloudConfiguration": {
# # "nodeGroup": null,
# # "properties": {}
# # },
# # "owner": "EDGE",
# # "state": null,
# # "diagnostic": null
# # },
# # "location": {
# # "id": "edge-location-KmVf4xDJKL7acBGc",
# # "name": null,
# # "providerId": null,
# # "locationScope": null,
# # "isAssignable": null,
# # "geoLocation": {
# # "city": "Warsaw",
# # "country": "Poland",
# # "latitude": 52.237049,
# # "longitude": 21.017532
# # },
# # "parent": null,
# # "state": null,
# # "owner": null
# # },
# # "image": {
# # "id": "edge-image-KmVf4xDJKL7acBGc",
# # "name": "edge-image-name-UBUNTU-UNKNOWN",
# # "providerId": null,
# # "operatingSystem": {
# # "operatingSystemFamily": "UBUNTU",
# # "operatingSystemArchitecture": "UNKNOWN",
# # "operatingSystemVersion": 1804.00
# # },
# # "location": null,
# # "state": null,
# # "owner": null
# # },
# # "hardware": {
# # "id": "edge-hardware-KmVf4xDJKL7acBGc",
# # "name": null,
# # "providerId": null,
# # "cores": 1,
# # "ram": 1,
# # "disk": 1.0,
# # "fpga": 0,
# # "location": null,
# # "state": null,
# # "owner": null
# # },
# # "pricePerInvocation": 0.0,
# # "memoryPrice": 0.0,
# # "nodeId": null,
# # "environment": null
# # }'''
# # node_data = json.loads(dummy_node_data)
# # print("node_data in create node name")
# # print(node_data)
# node_type = node_data["nodeCandidateType"]
# # print(node_type)
# if node_data["location"]:
# node_location = node_data["location"]["geoLocation"]
# # print(json.dumps(node_location))
# node_city = node_location["city"]
# node_country = node_location["country"]
# else:
# node_city = ""
# node_country = ""
# node_os = node_data["image"]["operatingSystem"]["operatingSystemFamily"]
# node_name = node_type + " - " + node_city + " , " + node_country + " - " + node_os
# # print("node name crated: " + node_name)
# return node_name

View File

@ -31,14 +31,12 @@ class SyncedHandler(Handler):
# if address == "topic://eu.nebulouscloud.cfsb.get_node_candidates": # if address == "topic://eu.nebulouscloud.cfsb.get_node_candidates":
if key == "OPT-triggering": if key == "OPT-triggering":
# logging.info("Entered in OPT-triggering'") # logging.info("Entered in OPT-triggering'")
# Save the correlation_id (We do not have it from the app_side) # Save the correlation_id (We do not have it from the app_side)
uuid.uuid4().hex.encode("utf-8") # for Correlation id uuid.uuid4().hex.encode("utf-8") # for Correlation id
correlation_id_optimizer = message.correlation_id correlation_id_optimizer = message.correlation_id
if not correlation_id_optimizer: if not correlation_id_optimizer:
correlation_id_optimizer = '88334290cad34ad9b21eb468a9f8ff11' # dummy correlation_id correlation_id_optimizer = '88334290cad34ad9b21eb468a9f8ff11' # dummy correlation_id
# logging.info(f"Optimizer_correlation_id {message.correlation_id}")
# print("Optimizer Correlation Id: ", correlation_id_optimizer) # print("Optimizer Correlation Id: ", correlation_id_optimizer)
# application_id_optimizer = message.properties.application # can be taken also from message.annotations.application # application_id_optimizer = message.properties.application # can be taken also from message.annotations.application
@ -47,44 +45,55 @@ class SyncedHandler(Handler):
# print("Application Id: ", application_id_optimizer) # print("Application Id: ", application_id_optimizer)
try: try:
# Read the Message Sent from Optimizer ###--- For Review, use ONLY ONE block, Optimizer's body or Dummy body ----------------------###
###-------- Extract body from Optimizer's message --------###
## Read the Message Sent from Optimizer
opt_message_data = body opt_message_data = body
# print("Whole Message Sent from Optimizer:", opt_message_data) # print("Whole Message Sent from Optimizer:", opt_message_data)
## Extract 'body' from opt_message_data
# Extract 'body' from opt_message_data
body_sent_from_optimizer = opt_message_data.get('body', {}) body_sent_from_optimizer = opt_message_data.get('body', {})
body_json_string = body_sent_from_optimizer
###-------- Extract body from Optimizer's message --------###
## Example body ###-------- Dummy body for DEMO when we emulate the message sent from Optimizer--------###
# body_sent_from_optimizer = [ # body_sent_from_optimizer = [
# { # {
# "type": "NodeTypeRequirement", # "type": "NodeTypeRequirement",
# # "nodeTypes": ["EDGES"]
# "nodeTypes": ["IAAS", "PAAS", "FAAS", "BYON", "EDGE", "SIMULATION"] # "nodeTypes": ["IAAS", "PAAS", "FAAS", "BYON", "EDGE", "SIMULATION"]
# # "nodeTypes": ["EDGES"]
# # ,"jobIdForEDGE": "FCRnewLight0" # # ,"jobIdForEDGE": "FCRnewLight0"
# } # }
# # ,{ # # ,{
# # "type": "AttributeRequirement",
# # "requirementClass": "hardware",
# # "requirementAttribute": "cores",
# # "requirementOperator": "GEQ",
# # "value": "64"
# # }
# # ,{
# # "type": "AttributeRequirement", # # "type": "AttributeRequirement",
# # "requirementClass": "hardware", # # "requirementClass": "hardware",
# # "requirementAttribute": "ram", # # "requirementAttribute": "ram",
# # "requirementOperator": "EQ", # # "requirementOperator": "GEQ",
# # "value": "2" # # "value": "131072"
# # } # # }
# ] # ]
# logging.info(body_sent_from_optimizer) # body_json_string = json.dumps(body_sent_from_optimizer) # Convert the body data to a JSON string
###-------- Dummy body for DEMO when we emulate the message sent from Optimizer--------###
###--- For Review, use ONLY ONE block, Optimizer's body or dummy body ----------------------###
print("-------------------------------------------------")
print("Extracted body from Optimizer Message:", body_sent_from_optimizer) print("Extracted body from Optimizer Message:", body_sent_from_optimizer)
## Prepare message to be send to SAL ## Prepare message to be send to SAL
# Convert the body data to a JSON string
# body_json_string = json.dumps(body_sent_from_optimizer) # For Sender
body_json_string = body_sent_from_optimizer # For Optimizer
RequestToSal = { # Dictionary RequestToSal = { # Dictionary
"metaData": {"user": "admin"}, # key [String "metaData"] value [dictionary] "metaData": {"user": "admin"}, # key [String "metaData"] value [dictionary]
"body": body_json_string # key [String "body"] value [JSON String] "body": body_json_string # key [String "body"] value [JSON String]
} }
# logging.info("RequestToSal: %s", RequestToSal) print("Request to SAL:", RequestToSal)
# print("RequestToSal:", RequestToSal)
# print("Is RequestToSal a valid dictionary:", isinstance(RequestToSal, dict)) # print("Is RequestToSal a valid dictionary:", isinstance(RequestToSal, dict))
# print("Is the 'body' string in RequestToSal a valid JSON string:", is_json(RequestToSal["body"])) # print("Is the 'body' string in RequestToSal a valid JSON string:", is_json(RequestToSal["body"]))
@ -99,22 +108,21 @@ class SyncedHandler(Handler):
nodes_data = json.loads(sal_body) nodes_data = json.loads(sal_body)
# Check if there is any error in SAL's reply body # Check if there is any error in SAL's reply body
if 'key' in nodes_data and any(keyword in nodes_data['key'].lower() for keyword in ['error', 'exception']): if 'key' in nodes_data and any(keyword in nodes_data['key'].lower() for keyword in ['error', 'exception']):
print("Error found in message body:", nodes_data['message']) print("Error found in SAL's message body:", nodes_data['message'])
sal_reply_body = [] sal_reply_body = []
else: # No error found in SAL's reply body else: # No error found in SAL's reply body
total_nodes = len(nodes_data) # Get the total number of nodes total_nodes = len(nodes_data) # Get the total number of nodes
print("Total Nodes in SAL's reply:", total_nodes) # print("Total Nodes in SAL's reply:", total_nodes)
if total_nodes > 400: # Check if more than 400 nodes received if total_nodes > 400: # Check if more than 400 nodes received
print("More than 400 nodes returned from SAL.") # print("More than 400 nodes returned from SAL.")
# Filter to only include the first 400 nodes and convert back to JSON string # Filter to only include the first 400 nodes and convert back to JSON string
sal_reply_body = json.dumps(nodes_data[:400]) sal_reply_body = json.dumps(nodes_data[:400])
elif total_nodes > 0 and total_nodes <= 400: elif total_nodes > 0 and total_nodes <= 400:
print(f"Total {total_nodes} nodes returned from SAL. Processing all nodes.") # print(f"Total {total_nodes} nodes returned from SAL. Processing all nodes.")
# Keep sal_reply_body as is since it's already a JSON string # Keep sal_reply_body as is since it's already a JSON string
sal_reply_body = sal_body sal_reply_body = sal_body
else: else:
print(f"Total {total_nodes} nodes returned from SAL.")
sal_reply_body = [] sal_reply_body = []
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
@ -122,7 +130,6 @@ class SyncedHandler(Handler):
sal_reply_body = [] # Default to an empty JSON array as a string in case of error sal_reply_body = [] # Default to an empty JSON array as a string in case of error
if sal_reply_body: # Check whether SAL's reply body is empty if sal_reply_body: # Check whether SAL's reply body is empty
# logging.info(f"Reply Received from SAL: {sal_reply}")
# print("SAL reply Body:", sal_reply_body) # print("SAL reply Body:", sal_reply_body)
# Check the number of nodes before Evaluation # Check the number of nodes before Evaluation
@ -130,20 +137,32 @@ class SyncedHandler(Handler):
# Search for application_id, Read JSON and create data to pass to Evaluation # Search for application_id, Read JSON and create data to pass to Evaluation
if check_json_file_exists(application_id_optimizer): # Application JSON exist in DB if check_json_file_exists(application_id_optimizer): # Application JSON exist in DB
print(f"JSON file for application ID {application_id_optimizer} exists.") print(f"JSON file for application ID {application_id_optimizer} exists.")
###-------- Extract data from dummy JSON file --------###
# json_file_path = 'dummySALresponse.json'
# sal_reply_body = read_json_file_as_string(json_file_path)
###-------- Extract data from dummy JSON file --------###
# Check if there are differences in available nodes between saved data in JSON file and SAL's reply # Check if there are differences in available nodes between saved data in JSON file and SAL's reply
data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = read_application_data(application_id_optimizer, sal_reply_body) data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = read_application_data(application_id_optimizer, sal_reply_body)
# print("sal_reply_body:", sal_reply_body)
# print("data_table filtered from JSON and SAL:", data_table)
# print("node_ids filtered from JSON and SAL:", node_ids)
# print("relative_wr_data:", relative_wr_data) # print("relative_wr_data:", relative_wr_data)
# print("immediate_wr_data:", immediate_wr_data) # print("immediate_wr_data:", immediate_wr_data)
# print("node_names filtered from JSON and SAL:", node_names)
else: # Application does not exist in directory else: # Application does not exist in directory
print(f"JSON file for application ID {application_id_optimizer} does not exist.") # print(f"JSON file for application ID {application_id_optimizer} does not exist.")
# Read data from SAL's response by calling the function extract_node_candidate_data()
# extracted_data_SAL, node_ids, node_names = extract_node_candidate_data('SAL_Response_11EdgeDevs.json') ###-------- Extract data from SAL's response --------###
# Extract data from SAL's response
extracted_data_SAL, node_ids, node_names = extract_SAL_node_candidate_data(sal_reply_body) extracted_data_SAL, node_ids, node_names = extract_SAL_node_candidate_data(sal_reply_body)
###-------- Extract data from SAL's response --------###
###-------- Extract data from dummy JSON file --------###
# json_file_path = 'dummySALresponse.json'
# sal_reply_body = read_json_file_as_string(json_file_path)
# if sal_reply_body:
# extracted_data_SAL, node_ids, node_names = extract_SAL_node_candidate_data(sal_reply_body)
###-------- Extract data from dummy JSON file --------###
# print("extracted_data_SAL:", extracted_data_SAL) # print("extracted_data_SAL:", extracted_data_SAL)
# print("node_ids:", node_ids) # print("node_ids:", node_ids)
@ -155,27 +174,29 @@ class SyncedHandler(Handler):
data_table = create_data_table(selected_criteria, extracted_data_SAL, field_mapping) data_table = create_data_table(selected_criteria, extracted_data_SAL, field_mapping)
relative_wr_data = [] relative_wr_data = []
immediate_wr_data = [] immediate_wr_data = []
# print("created_data_table:", data_table)
# Check the number of nodes before Evaluation # Check the number of nodes before Evaluation
print("There are " + str(len(node_ids)) + " nodes for Evaluation") print("There are " + str(len(node_ids)) + " nodes for Evaluation")
# Convert the original data of RAM and # of Cores, e.g. 1/X, if they are selected
print("Original created_data_table:", data_table) print("Original created_data_table:", data_table)
# Convert RAM and Cores data_table = convert_data_table(data_table) # Convert RAM and # of Cores, e.g. 1/X
data_table = convert_data_table(data_table)
print("Converted created_data_table:", data_table) print("Converted created_data_table:", data_table)
## Run evaluation ## Run evaluation
evaluation_results = perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids) evaluation_results = perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids)
# print("Evaluation Results:", evaluation_results) # print("Evaluation Results:", evaluation_results)
## Extract and save the results ## Extract and Save the Results
# ScoresAndRanks = evaluation_results['results'] # ScoresAndRanks = evaluation_results['results']
ScoresAndRanks = evaluation_results.get('results', []) ScoresAndRanks = evaluation_results.get('results', [])
# print("Scores and Ranks:", ScoresAndRanks) print("Scores and Ranks:", ScoresAndRanks)
# Append the Score and Rank of each node to SAL's Response # Append the Score and Rank of each node to SAL's Response
SAL_and_Scores_Body = append_evaluation_results(sal_reply_body, ScoresAndRanks) SAL_and_Scores_Body = append_evaluation_results(sal_reply_body, ScoresAndRanks)
# print("SAL_and_Scores_Body:", SAL_and_Scores_Body) # print("SAL_and_Scores_Body:", SAL_and_Scores_Body)
else: else:
print("There is only one node!") print("There is only one node!")
# Append the Score and Rank of each node to SAL's Response # Append the Score and Rank of each node to SAL's Response
@ -193,7 +214,7 @@ class SyncedHandler(Handler):
formatted_json = json.dumps(CFSBResponse, indent=4) formatted_json = json.dumps(CFSBResponse, indent=4)
with open('CFSBResponse.json', 'w') as file: with open('CFSBResponse.json', 'w') as file:
file.write(formatted_json) file.write(formatted_json)
print("Formatted JSON has been saved to CFSBResponse.json") print("Data with Scores and Ranks for Nodes are saved to CFSBResponse.json")
else: # Then SAL's reply body is empty send an empty body to Optimizer else: # Then SAL's reply body is empty send an empty body to Optimizer
print("No Body in reply from SAL!") print("No Body in reply from SAL!")
@ -205,12 +226,13 @@ class SyncedHandler(Handler):
## Send message to OPTIMIZER ## Send message to OPTIMIZER
context.get_publisher('SendToOPT').send(CFSBResponse, application_id_optimizer, properties={'correlation_id': correlation_id_optimizer}, raw=True) context.get_publisher('SendToOPT').send(CFSBResponse, application_id_optimizer, properties={'correlation_id': correlation_id_optimizer}, raw=True)
print("Message to Optimizer has been sent")
print("-------------------------------------------------")
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
logging.error(f"Failed to parse message body from Optimizer as JSON: {e}") logging.error(f"Failed to parse message body from Optimizer as JSON: {e}")
def requestSAL(self, RequestToSal): def requestSAL(self, RequestToSal):
sal_reply = Context.publishers['SAL-GET'].send_sync(RequestToSal) sal_reply = Context.publishers['SAL-GET'].send_sync(RequestToSal)
# Process SAL's Reply # Process SAL's Reply
@ -277,4 +299,3 @@ def is_json(myjson):
except TypeError as e: # includes simplejson.decoder.JSONDecodeError except TypeError as e: # includes simplejson.decoder.JSONDecodeError
return False return False
return True return True

View File

@ -10,7 +10,7 @@ import get_data as file
import activemq import activemq
import traceback import traceback
import logging import logging
# logging.disable(logging.CRITICAL) logging.disable(logging.CRITICAL)
main_routes = Blueprint('main', __name__) main_routes = Blueprint('main', __name__)
@ -20,85 +20,6 @@ NoData_Variables = ['attr-security', 'attr-performance-capacity', 'attr-performa
Cont_Variables = ['attr-performance', 'attr-financial', 'attr-performance-capacity-memory', Cont_Variables = ['attr-performance', 'attr-financial', 'attr-performance-capacity-memory',
'attr-performance-capacity-memory-speed'] 'attr-performance-capacity-memory-speed']
dummy_node_data = {
"id": "8a7481d98e702b64018e702cbe070000",
"nodeCandidateType": "EDGE",
"jobIdForByon": "",
"jobIdForEdge": "FCRnewLight0",
"price": 0.0,
"cloud": {
"id": "edge",
"endpoint": "",
"cloudType": "EDGE",
"api": "",
"credential": "",
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": "EDGE",
"state": "",
"diagnostic": ""
},
"location": {
"id": "edge-location-KmVf4xDJKL7acBGc",
"name": "",
"providerId": "",
"locationScope": "",
"isAssignable": "",
"geoLocation": {
"city": "Warsaw",
"country": "Poland",
"latitude": 52.237049,
"longitude": 21.017532
},
"parent": "",
"state": "",
"owner": ""
},
"image": {
"id": "edge-image-KmVf4xDJKL7acBGc",
"name": "edge-image-name-UBUNTU-UNKNOWN",
"providerId": "",
"operatingSystem": {
"operatingSystemFamily": "UBUNTU",
"operatingSystemArchitecture": "UNKNOWN",
"operatingSystemVersion": 1804.00
},
"location": "",
"state": "",
"owner": ""
},
"hardware": {
"id": "edge-hardware-KmVf4xDJKL7acBGc",
"name": "",
"providerId": "",
"cores": 1,
"ram": 1,
"disk": 1.0,
"fpga": 0,
"location": "",
"state": "",
"owner": ""
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "",
"environment": ""
}
#Used in HomePage.vue to save app_id and user_id
# @main_routes.route('/save_ids', methods=['POST'])
# def save_ids():
# data = request.json
# app_id = data.get('app_id')
# user_id = data.get('user_id')
# print("user_id:", user_id)
# # Respond back with a success message
# return jsonify({"message": "IDs received successfully.", "app_id": app_id, "user_id": user_id})
#Used in CriteriaSelection.vue #Used in CriteriaSelection.vue
@main_routes.route('/get_hierarchical_category_list') @main_routes.route('/get_hierarchical_category_list')
@ -115,115 +36,141 @@ def get_hierarchical_category_list():
@main_routes.route('/process_selected_criteria', methods=['POST']) @main_routes.route('/process_selected_criteria', methods=['POST'])
def process_selected_criteria(): def process_selected_criteria():
try: try:
# Get selected criteria app_id and user_id sent from Frontend
data = request.json data = request.json
selected_criteria = data.get('selectedItems', []) selected_criteria = data.get('selectedItems', [])
print("-------------------------------------------------")
# application_id = data.get('app_id') # Get app_id and user_id already obtained in the Frontend from URL
# user_id = data.get('user_id') application_id = data.get('app_id')
# print("user_id:", user_id) user_id = data.get('user_id')
# print("application_id:", application_id) print("user_id:", user_id)
print("application_id:", application_id)
message_for_SAL = [{ # Prepare message to be sent to SAL
"type": "NodeTypeRequirement", message_for_SAL = [
"nodeTypes": ["IAAS", "PAAS", "FAAS", "BYON", "EDGE", "SIMULATION"]} {
# ,{ "type": "NodeTypeRequirement",
# "type": "AttributeRequirement", "nodeTypes": ["IAAS", "PAAS", "FAAS", "BYON", "EDGE", "SIMULATION"],
# "requirementClass": "hardware", #"nodeTypes": ["EDGE"],
# "requirementAttribute": "cores", "jobIdForEDGE": ""
# "requirementOperator": "GEQ", #"jobIdForEDGE": "FCRnewLight0"
# "value": "64" }
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "ram",
# "requirementOperator": "GEQ",
# "value": "33000"
# }
] ]
body_json_string_for_SAL = json.dumps(message_for_SAL) body_json_string_for_SAL = json.dumps(message_for_SAL)
RequestToSal = { RequestToSal = {
"metaData": {"user": "admin"}, "metaData": {"user": "admin"},
"body": body_json_string_for_SAL "body": body_json_string_for_SAL
} }
# print("RequestToSal:", RequestToSal) print("Request to Sal:", RequestToSal)
sal_reply = activemq.call_publisher(RequestToSal) sal_reply = activemq.call_publisher(RequestToSal)
# Parse the JSON string to a Python object
nodes_data = json.loads(sal_reply) if isinstance(sal_reply, str) else sal_reply nodes_data = json.loads(sal_reply) if isinstance(sal_reply, str) else sal_reply
# print("nodes_data", nodes_data) print("nodes_data", nodes_data)
extracted_data, node_ids, node_names = extract_SAL_node_candidate_data_Front(nodes_data) # Check if there is any error in SAL's reply body
# print("extracted_data:", extracted_data) if 'key' in nodes_data and any(keyword in nodes_data['key'].lower() for keyword in ['error', 'exception']):
field_mapping = create_criteria_mapping() messageToDataGrid = "Error in SAL's reply" + nodes_data['message']
# print("field_mapping", field_mapping) print("Error found in SAL's message body:", messageToDataGrid)
node_names = []
grid_data_with_names = []
else: # No error found in SAL's reply body
default_list_criteria_mapping = { ###--- For Review, use ONLY ONE block, SAL's response or JSON file ----------------------###
# "Cost": "price",
"Operating cost": "price",
"Memory Price": "memoryPrice",
"Number of CPU Cores": "cores",
"Memory Size": "ram",
"Storage Capacity": "disk"
}
grid_data = {} ###-------- Extract data from SAL's response --------###
print("Use of SAL's response")
extracted_data, node_ids, node_names = extract_SAL_node_candidate_data_Front(nodes_data)
print("SAL's extracted_data: ", extracted_data)
###-------- Extract data from SAL's response --------###
for node_data in extracted_data:
node_id = node_data.get('id')
# print("Before create_node_name")
node_name = create_node_name(node_data) if node_data else "Unknown"
# print("After create_node_name")
if node_id and node_id not in grid_data: ###-------- Extract data from dummy JSON file --------###
grid_data[node_id] = {"name": node_name, "criteria": []} # print("Use of dummy JSON file")
# json_file_path = 'dummySALresponse.json'
# jsondata = read_json_file_as_string(json_file_path)
# nodes_data = json.loads(jsondata)
# if nodes_data:
# extracted_data, node_ids, node_names = extract_SAL_node_candidate_data_Front(nodes_data)
###-------- Extract data from dummy JSON file --------###
hardware_info = node_data.get('hardware', {}) # contains the values for criteria coming from SAL ###--- For Review, use ONLY ONE block, SAL's response or JSON file ----------------------###
for criterion_key in selected_criteria:
# print("criterion_key:", criterion_key)
criterion_info = file.get_subject_data(file.SMI_prefix + criterion_key) # It contains the titles of the criteria
# print("criterion_info:", criterion_info)
# Resolve title and then map title to field name # print("extracted_data:", extracted_data)
criterion_data_type = get_attr_data_type(criterion_key) # criterion_data_type: {'type': 1, 'values': ['Low', 'Medium', 'High']} field_mapping = create_criteria_mapping()
# print("criterion_data_type:", criterion_data_type) # print("field_mapping", field_mapping)
criterion_title = criterion_info["title"]
# Fetch the values of the selected default criteria default_list_criteria_mapping = {
if criterion_title in default_list_criteria_mapping: # "Cost": "price",
SAL_criterion_name = field_mapping.get(criterion_title) # Map the criterion title with the criterion name in SAL's reply "Operating cost": "price",
value = hardware_info.get(SAL_criterion_name, "N/A") # Get the criterion values "Memory Price": "memoryPrice",
else: "Number of CPU Cores": "cores",
# Handle other criteria (this part may need adjustment based on your actual data structure) "Memory Size": "ram",
# value = "N/A" # Placeholder for the logic to determine non-default criteria values "Storage Capacity": "disk"
# Generate random or default values for rest criteria }
type_value = criterion_data_type['type']
# print("type_value:", type_value)
if type_value == 1: grid_data = {}
value = random.choice(["High", "Medium", "Low"])
elif type_value == 5: for node_data in extracted_data:
value = random.choice(["True", "False"]) node_id = node_data.get('id')
# print("Before create_node_name")
node_name = create_node_name(node_data) if node_data else "Unknown"
# print("After create_node_name")
if node_id and node_id not in grid_data:
grid_data[node_id] = {"name": node_name, "criteria": []}
hardware_info = node_data.get('hardware', {}) # contains the values for criteria coming from SAL
for criterion_key in selected_criteria:
# print("criterion_key:", criterion_key)
criterion_info = file.get_subject_data(file.SMI_prefix + criterion_key) # It contains the titles of the criteria
# print("criterion_info:", criterion_info)
# Resolve title and then map title to field name
criterion_data_type = get_attr_data_type(criterion_key) # criterion_data_type: {'type': 1, 'values': ['Low', 'Medium', 'High']}
# print("criterion_data_type:", criterion_data_type)
criterion_title = criterion_info["title"]
# Fetch the values of the selected default criteria
if criterion_title in default_list_criteria_mapping:
SAL_criterion_name = field_mapping.get(criterion_title) # Map the criterion title with the criterion name in SAL's reply
value = hardware_info.get(SAL_criterion_name, "N/A") # Get the criterion values
else: else:
value = round(random.uniform(1, 100), 2) # Handle other criteria (this part may need adjustment based on your actual data structure)
# value = "N/A" # Placeholder for the logic to determine non-default criteria values
# Generate random or default values for rest criteria
type_value = criterion_data_type['type']
# print("type_value:", type_value)
criterion_data = { if type_value == 1:
"title": criterion_title, value = random.choice(["High", "Medium", "Low"])
"value": value, elif type_value == 5:
"data_type": criterion_data_type # criterion_data_type: {'type': 1, 'values': ['Low', 'Medium', 'High']} value = random.choice(["True", "False"])
} else:
grid_data[node_id]["criteria"].append(criterion_data) value = round(random.uniform(1, 100), 2)
grid_data_with_names = [{ criterion_data = {
'name': data["name"], "title": criterion_title,
'id': node_id, "value": value,
'criteria': data["criteria"] "data_type": criterion_data_type # criterion_data_type: {'type': 1, 'values': ['Low', 'Medium', 'High']}
} for node_id, data in grid_data.items()] }
# print("grid_data_with_names:", grid_data_with_names) grid_data[node_id]["criteria"].append(criterion_data)
grid_data_with_names = [{
'name': data["name"],
'id': node_id,
'criteria': data["criteria"]
} for node_id, data in grid_data.items()]
# print("grid_data_with_names:", grid_data_with_names)
messageToDataGrid = "True"
return jsonify({ return jsonify({
'success': True, 'success': messageToDataGrid,
'gridData': grid_data_with_names, 'gridData': grid_data_with_names,
'NodeNames': node_names 'NodeNames': node_names
}) })
@ -233,79 +180,8 @@ def process_selected_criteria():
return jsonify({'success': False, 'error': str(e)}), 500 return jsonify({'success': False, 'error': str(e)}), 500
# Works by reading a JSON file with dummy data
# def process_selected_criteria():
# try:
# data = request.json
# # Selected Criteria by the User from the List
# selected_criteria = data.get('selectedItems', [])
# # Extract app_id, user_id
# application_id = data.get('app_id') # Take it from local storage from frontend
# # application_id = 'd535cf554ea66fbebfc415ac837a5828' #dummy application_id_optimizer
# user_id = data.get('user_id') # Take it from local storage from frontend
# print("user_id:", user_id)
# print("application_id:", application_id)
#
# ## Process SAL's Reply
# # extracted_data, number_of_nodes, node_names = extract_node_candidate_data('dummy_data_node_candidates.json')
# extracted_data, node_ids, node_names = extract_node_candidate_data('SAL_Response_11EdgeDevs.json')
# print("extracted_data:", extracted_data)
#
# # Use the create_criteria_mapping() to get the criteria mappings
# field_mapping = create_criteria_mapping(selected_criteria, extracted_data)
# grid_data = {name: [] for name in node_names}
#
# # Prepare the data to be sent to DataGrid.vue
# for node_data in extracted_data:
# node_name = node_data.get('name') # Using name to match
# node_id = node_data.get('id') # Extract the node ID
# grid_data[node_name] = {"id": node_id, "criteria": []}
#
# if node_name in grid_data: # Check if node_name exists in grid_data keys
# for item in selected_criteria:
# criterion_data = {}
# criterion_data["data_type"] = get_attr_data_type(item)
# item_data_dict = file.get_subject_data(file.SMI_prefix + item)
# criterion_data["title"] = item_data_dict["title"]
# field_name = field_mapping.get(criterion_data["title"], item)
#
# # Check if the field_name is a direct key or nested inside 'hardware'
# if field_name in node_data:
# value = node_data[field_name]
# elif 'hardware' in node_data and field_name in node_data['hardware']:
# value = node_data['hardware'][field_name]
# else:
# # Generate random or default values for unmapped criteria or missing data
# item_data_type_value = criterion_data["data_type"].get('type')
# if item_data_type_value == 1:
# value = random.choice(["High", "Medium", "Low"])
# elif item_data_type_value == 5:
# value = random.choice(["True", "False"])
# else:
# value = round(random.uniform(1, 100), 2)
#
# criterion_data["value"] = value if value != 0 else 0.00001
# # grid_data[node_id].append(criterion_data)
# grid_data[node_name]["criteria"].append(criterion_data)
#
# # Conversion to list format remains unchanged
# # grid_data_with_names = [{'name': name, 'criteria': data} for name, data in grid_data.items()]
# grid_data_with_names = [{'name': name, 'id': data["id"], 'criteria': data["criteria"]} for name, data in grid_data.items()]
# print("grid_data_with_names:", grid_data_with_names)
#
# # Send the comprehensive grid_data_with_names to the frontend
# return jsonify({
# 'success': True,
# 'gridData': grid_data_with_names,
# 'NodeNames': node_names
# })
# except Exception as e:
# print(f"Error processing selected items: {e}")
# traceback.print_exc()
# return jsonify({'success': False, 'error': str(e)}), 500
# Used for Evating the node candidates
@main_routes.route('/process-evaluation-data', methods=['POST']) @main_routes.route('/process-evaluation-data', methods=['POST'])
def process_evaluation_data(): def process_evaluation_data():
try: try:
@ -317,17 +193,18 @@ def process_evaluation_data():
# Transform grid data to table and get node names directly from the function # Transform grid data to table and get node names directly from the function
data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = transform_grid_data_to_table(data) data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = transform_grid_data_to_table(data)
# print("data_table FRONT:", data_table) print("data_table Frontend:", data_table)
# print("relative_wr_data:", relative_wr_data) # print("relative_wr_data:", relative_wr_data)
# print("immediate_wr_data:", immediate_wr_data) # print("immediate_wr_data:", immediate_wr_data)
# print("# node_names:", len(node_names)) # print("# node_names:", len(node_names))
# print("# node_ids:", len(node_ids)) # print("# node_ids:", len(node_ids))
# Convert RAM and Cores # Convert RAM and Cores
data_table = convert_data_table(data_table) data_table = convert_data_table(data_table) # Convert RAM and # of Cores, e.g. 1/X
# Run Optimization - Perform evaluation # Run Optimization - Perform evaluation
results = perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids) results = perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids)
# print(results) print("Results: ", results)
print("-------------------------------------------------")
# Return the results # Return the results
return jsonify({'status': 'success', 'results': results}) return jsonify({'status': 'success', 'results': results})
@ -336,7 +213,7 @@ def process_evaluation_data():
return jsonify({'status': 'error', 'message': error_message}), 500 return jsonify({'status': 'error', 'message': error_message}), 500
#Creates a new user # Creates a new user
@main_routes.route('/user', methods=['POST']) @main_routes.route('/user', methods=['POST'])
def create_user(): def create_user():
data = request.json data = request.json

View File

@ -170,8 +170,8 @@
const ranks = this.results.map(result => result.Rank); const ranks = this.results.map(result => result.Rank);
this.$nextTick(() => { this.$nextTick(() => {
this.createBarChart(titles, deaScores, 'deascoresChart', 'Fog Node Scores'); this.createBarChart(titles, deaScores, 'deascoresChart', 'Scores');
this.createHorizontalBarChart(titles, ranks, 'ranksChart', 'Fog Node Ranking'); this.createHorizontalBarChart(titles, ranks, 'ranksChart', 'Ranking');
}); });
}, },
createBarChart(labels, data, chartId, label) { createBarChart(labels, data, chartId, label) {