Init backend project
Change-Id: I61048bc79b3e1d43111d11f7c0f5f4272e8cb27e
This commit is contained in:
parent
ffa43a0b15
commit
78ce3a87f7
108
cfsb-backend/DEA.py
Normal file
108
cfsb-backend/DEA.py
Normal file
@ -0,0 +1,108 @@
|
||||
import numpy as np
|
||||
from scipy.optimize import linprog
|
||||
from scipy.stats import rankdata
|
||||
|
||||
def perform_evaluation(data_table, wr_data,fog_nodes_titles):
|
||||
criteria_list = list(data_table.keys())
|
||||
criterion_index = {criterion: idx for idx, criterion in enumerate(criteria_list)}
|
||||
|
||||
# Initialize A and b for inequality constraints, and A_eq and b_eq for equality constraints
|
||||
A = []
|
||||
b = []
|
||||
A_eq = []
|
||||
b_eq = []
|
||||
|
||||
# Add data_table rows to A and b
|
||||
for row_values in zip(*data_table.values()):
|
||||
A.append(list(row_values))
|
||||
b.extend([1] * len(A))
|
||||
|
||||
# Add weight restriction constraints to A or A_eq based on the operator
|
||||
for constraint in wr_data:
|
||||
lhs_index = criterion_index[constraint['LHSCriterion']]
|
||||
rhs_index = criterion_index[constraint['RHSCriterion']]
|
||||
intensity = constraint['Intense']
|
||||
|
||||
constraint_row = [0] * len(criteria_list)
|
||||
if constraint['Operator'] == 1: # >=
|
||||
constraint_row[lhs_index] = -1
|
||||
constraint_row[rhs_index] = intensity
|
||||
A.append(constraint_row)
|
||||
b.append(0)
|
||||
elif constraint['Operator'] == -1: # <=
|
||||
constraint_row[lhs_index] = 1
|
||||
constraint_row[rhs_index] = -intensity
|
||||
A.append(constraint_row)
|
||||
b.append(0)
|
||||
elif constraint['Operator'] == 0: # equality
|
||||
constraint_row[lhs_index] = -1
|
||||
constraint_row[rhs_index] = intensity
|
||||
A_eq.append(constraint_row)
|
||||
b_eq.append(0)
|
||||
|
||||
# Convert lists to numpy arrays
|
||||
A = np.array(A, dtype=float)
|
||||
b = np.array(b, dtype=float)
|
||||
A_eq = np.array(A_eq, dtype=float) if A_eq else None
|
||||
b_eq = np.array(b_eq, dtype=float) if b_eq else None
|
||||
# print(A)
|
||||
# print(b)
|
||||
# print(A_eq)
|
||||
# print(b_eq)
|
||||
num_of_dmus = len(next(iter(data_table.values())))
|
||||
Cols_No = len(criteria_list)
|
||||
DEA_Scores = []
|
||||
epsilon = 0.0001 # Lower bound of the variables
|
||||
|
||||
# Iterating over each DMU to Perform DEA
|
||||
for dmu_index in range(num_of_dmus):
|
||||
# Gathering values for the current DMU
|
||||
dmu_values = [values[dmu_index] for values in data_table.values()]
|
||||
|
||||
# Forming the objective function coefficients
|
||||
c = -np.array(dmu_values)
|
||||
|
||||
# Bounds for each variable
|
||||
bounds = [(epsilon, None) for _ in range(Cols_No)]
|
||||
|
||||
# Solve the problem
|
||||
res = linprog(c, A_ub=A, b_ub=b, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method='highs')
|
||||
|
||||
DEA_Scores.append(-res.fun if res.success else None)
|
||||
|
||||
# Rank the DEA scores using 'max' method for ties
|
||||
DEA_Scores_Ranked = len(DEA_Scores) - rankdata(DEA_Scores, method='max') + 1
|
||||
|
||||
# Create a JSON object with titles, DEA scores, and ranks
|
||||
results_json = [
|
||||
{
|
||||
"Title": fog_nodes_titles[i],
|
||||
"DEA Score": DEA_Scores[i],
|
||||
"Rank": int(DEA_Scores_Ranked[i])
|
||||
}
|
||||
for i in range(len(fog_nodes_titles))
|
||||
]
|
||||
|
||||
return results_json
|
||||
# return DEA_Scores, DEA_Scores_Ranked
|
||||
|
||||
# # Provided data
|
||||
# data_table = {
|
||||
# 'Provider Track record': [44.3, 37.53, 51.91, 86.56, 28.43],
|
||||
# 'Agility': [41.8, 53.69, 91.3, 84.72, 58.37],
|
||||
# 'Reputation': [2, 1, 3, 1, 3],
|
||||
# 'Brand Name': [71.39, 83.11, 20.72, 91.07, 89.49]
|
||||
# }
|
||||
#
|
||||
# wr_data = [
|
||||
# {'LHSCriterion': 'Reputation', 'Operator': 1, 'Intense': 2.5, 'RHSCriterion': 'Brand Name'},
|
||||
# {'LHSCriterion': 'Brand Name', 'Operator': -1, 'Intense': 3, 'RHSCriterion': 'Agility'},
|
||||
# {'LHSCriterion': 'Brand Name', 'Operator': 0, 'Intense': 2, 'RHSCriterion': 'Provider Track record'}
|
||||
# ]
|
||||
#
|
||||
# fog_nodes_titles = ['Fog Node 1', 'Fog Node 2', 'Fog Node 3', 'Fog Node 4', 'Fog Node 5']
|
||||
#
|
||||
# Evaluation_JSON = perform_evaluation(data_table, wr_data,fog_nodes_titles)
|
||||
# print(Evaluation_JSON)
|
||||
# # print("DEA Scores:", DEA_Scores)
|
||||
# # print("Ranked DEA Scores:", DEA_Scores_Ranked)
|
17
cfsb-backend/Dockerfile
Normal file
17
cfsb-backend/Dockerfile
Normal file
@ -0,0 +1,17 @@
|
||||
#
|
||||
FROM python:3.10
|
||||
|
||||
#
|
||||
WORKDIR /code
|
||||
|
||||
#
|
||||
COPY ./requirements.txt /code/requirements.txt
|
||||
|
||||
#
|
||||
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
||||
|
||||
#
|
||||
COPY ./ /code
|
||||
|
||||
#
|
||||
CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0", "--port", "8001"]
|
24
cfsb-backend/README.md
Normal file
24
cfsb-backend/README.md
Normal file
@ -0,0 +1,24 @@
|
||||
# FogBrokerBack
|
||||
|
||||
Backend service providing criteria data
|
||||
|
||||
## Installation
|
||||
|
||||
Install requirements uning [pip](https://pip.pypa.io/en/stable/).
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
flask --app app.py run
|
||||
```
|
||||
# Endpoints
|
||||
|
||||
#### returns the criteria in hierarchical list
|
||||
/get_hierarchical_category_list
|
||||
|
||||
#### accepts and returns the selected cireteria
|
||||
/process_selected_items
|
183
cfsb-backend/app.py
Normal file
183
cfsb-backend/app.py
Normal file
@ -0,0 +1,183 @@
|
||||
from flask import Flask, request, jsonify, render_template
|
||||
from flask_cors import CORS, cross_origin
|
||||
# import read_file
|
||||
import get_data as file
|
||||
import random
|
||||
import json
|
||||
import data_types as attr_data_types
|
||||
from DEA import perform_evaluation
|
||||
from data_types import get_attr_data_type
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app) # This enables CORS for all routes
|
||||
# CORS(app, resources={r"/api/*": {"origins": "http://localhost:8080"}})
|
||||
|
||||
# Store evaluation results globally
|
||||
evaluation_results_global = {}
|
||||
criteria_titles = []
|
||||
|
||||
# Global variable for the number of rows
|
||||
NUMBER_OF_FOG_NODES = 7
|
||||
def create_fog_node_titles(NUMBER_OF_FOG_NODES):
|
||||
return [f"Fog Node {i+1}" for i in range(NUMBER_OF_FOG_NODES)]
|
||||
|
||||
FOG_NODES_TITLES = create_fog_node_titles(NUMBER_OF_FOG_NODES)
|
||||
|
||||
|
||||
# List of items with Ordinal Data
|
||||
Ordinal_Variables = ['attr-reputation', 'attr-assurance']
|
||||
NoData_Variables = ['attr-security', 'attr-performance-capacity', 'attr-performance-suitability']
|
||||
Cont_Variables = ['attr-performance', 'attr-financial', 'attr-performance-capacity-memory',
|
||||
'attr-performance-capacity-memory-speed']
|
||||
|
||||
# TODO boolean vars random choice generate
|
||||
#Bool_Variables = []
|
||||
|
||||
@app.route('/get_hierarchical_category_list')
|
||||
def get_hierarchical_category_list():
|
||||
data = file.get_level_1_items()
|
||||
# TODO order by something
|
||||
return jsonify(data)
|
||||
|
||||
|
||||
# Receives the Selected Criteria and Generates data
|
||||
@app.route('/process_selected_items', methods=['POST'])
|
||||
def process_selected_items():
|
||||
try:
|
||||
data = request.json
|
||||
selected_items = data.get('selectedItems', [])
|
||||
global criteria_titles
|
||||
criteria_titles = [file.get_subject_data(file.SMI_prefix + item)["title"] for item in selected_items]
|
||||
|
||||
# Generate random values for each selected item
|
||||
grid_data = {}
|
||||
|
||||
for item in selected_items:
|
||||
item_data = {}
|
||||
item_data["data_type"] = get_attr_data_type(item)
|
||||
if item in Ordinal_Variables:
|
||||
# grid_data[item] = [random.choice(["High", "Medium", "Low"]) for _ in range(NUMBER_OF_FOG_NODES)]
|
||||
item_data["data_values"] = [random.choice(["High", "Medium", "Low"]) for _ in
|
||||
range(NUMBER_OF_FOG_NODES)]
|
||||
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
|
||||
item_data["title"] = item_data_dict["title"]
|
||||
elif item in NoData_Variables:
|
||||
# Leave blank for this items
|
||||
item_data["data_values"] = ['' for _ in range(NUMBER_OF_FOG_NODES)]
|
||||
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
|
||||
item_data["title"] = item_data_dict["title"]
|
||||
elif item in Cont_Variables:
|
||||
# grid_data[item] = [round(random.uniform(50.5, 312.3), 2) for _ in range(NUMBER_OF_FOG_NODES)]
|
||||
item_data["data_values"] = [round(random.uniform(50.5, 312.3), 2) for _ in range(NUMBER_OF_FOG_NODES)]
|
||||
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
|
||||
item_data["title"] = item_data_dict["title"]
|
||||
else:
|
||||
# Default data generation for other items
|
||||
# grid_data[item] = [round(random.uniform(1, 100), 2) for _ in range(NUMBER_OF_FOG_NODES)]
|
||||
item_data["data_values"] = [round(random.uniform(1, 100), 2) for _ in range(NUMBER_OF_FOG_NODES)]
|
||||
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
|
||||
item_data["title"] = item_data_dict["title"]
|
||||
grid_data[item] = item_data
|
||||
|
||||
return jsonify({'success': True, 'gridData': grid_data})
|
||||
except Exception as e:
|
||||
return jsonify({'success': False, 'error': str(e)}), 500
|
||||
|
||||
|
||||
@app.route('/show_selected_items/<items>')
|
||||
@cross_origin()
|
||||
def show_selected_items(items):
|
||||
return render_template('selected_items.html', items=items.split(','))
|
||||
|
||||
|
||||
@app.route('/get-criteria-titles', methods=['GET'])
|
||||
def get_criteria_titles():
|
||||
return jsonify(criteria_titles)
|
||||
|
||||
|
||||
@app.route('/get-fog-nodes-titles', methods=['GET'])
|
||||
def get_fog_nodes_titles():
|
||||
return jsonify(FOG_NODES_TITLES)
|
||||
|
||||
|
||||
# # Process the Grid Data and the WR Data
|
||||
# @app.route('/process-evaluation-data', methods=['POST'])
|
||||
# def process_evaluation_data():
|
||||
# global evaluation_results_global
|
||||
# try:
|
||||
# data = request.get_json()
|
||||
# data_table, wr_data = transform_grid_data_to_table(data)
|
||||
# print(data_table)
|
||||
# print(wr_data)
|
||||
# evaluation_results_global = perform_evaluation(data_table, wr_data,FOG_NODES_TITLES)
|
||||
# return jsonify({'status': 'success', 'message': 'Evaluation completed successfully'})
|
||||
# except Exception as e:
|
||||
# app.logger.error(f"Error processing evaluation data: {str(e)}")
|
||||
# return jsonify({'status': 'error', 'message': str(e)}), 500
|
||||
|
||||
@app.route('/process-evaluation-data', methods=['POST'])
|
||||
def process_evaluation_data():
|
||||
global evaluation_results_global
|
||||
try:
|
||||
# Log the incoming request data
|
||||
request_data = request.get_data(as_text=True)
|
||||
app.logger.info(f"Received data: {request_data}")
|
||||
|
||||
data = request.get_json()
|
||||
if data is None:
|
||||
raise ValueError("Received data is not in JSON format or 'Content-Type' header is not set to 'application/json'")
|
||||
|
||||
app.logger.info(f"Parsed JSON data: {data}")
|
||||
|
||||
data_table, wr_data = transform_grid_data_to_table(data)
|
||||
app.logger.info(f"Data table: {data_table}, WR data: {wr_data}")
|
||||
|
||||
evaluation_results_global = perform_evaluation(data_table, wr_data, FOG_NODES_TITLES)
|
||||
return jsonify({'status': 'success', 'message': 'Evaluation completed successfully'})
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
app.logger.error(f"Error processing evaluation data: {error_message}")
|
||||
return jsonify({'status': 'error', 'message': error_message}), 500
|
||||
|
||||
|
||||
def transform_grid_data_to_table(json_data):
|
||||
grid_data = json_data.get('gridData', {}).get('gridData', {})
|
||||
wr_data = json_data.get('wrData', [])
|
||||
|
||||
# if not wr_data:
|
||||
# # return a default value
|
||||
# wr_data = default_wr_data()
|
||||
|
||||
data_table = {}
|
||||
row_count = None
|
||||
|
||||
# Mapping for ordinal values
|
||||
ordinal_value_mapping = {"High": 3, "Medium": 2, "Low": 1}
|
||||
boolean_value_mapping = {"True": 2, "False": 1}
|
||||
|
||||
for key, value in grid_data.items():
|
||||
title = value.get('title')
|
||||
data_values = value.get('data_values', [])
|
||||
|
||||
# Replace ordinal values with their numeric counterparts
|
||||
numeric_data_values = [ordinal_value_mapping.get(val, val) for val in data_values]
|
||||
|
||||
# Initialize row_count if not set
|
||||
if row_count is None:
|
||||
row_count = len(numeric_data_values)
|
||||
|
||||
if len(numeric_data_values) != row_count:
|
||||
raise ValueError(f"Inconsistent row count for {title}")
|
||||
|
||||
data_table[title] = numeric_data_values
|
||||
|
||||
return data_table, wr_data
|
||||
|
||||
|
||||
# Endpoint to transfer the results to Results.vue
|
||||
@app.route('/get-evaluation-results', methods=['GET'])
|
||||
def get_evaluation_results():
|
||||
return jsonify(evaluation_results_global)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
88
cfsb-backend/app_HierList.py
Normal file
88
cfsb-backend/app_HierList.py
Normal file
@ -0,0 +1,88 @@
|
||||
from flask import Flask, request, jsonify, render_template, redirect, url_for
|
||||
from flask_cors import CORS, cross_origin
|
||||
import json
|
||||
app = Flask(__name__, template_folder='templates')
|
||||
#app = Flask(__name__)
|
||||
CORS(app)
|
||||
#CORS(app, resources={r"/get_hierarchical_category_list": {"origins": "http://localhost:8080"}})
|
||||
|
||||
|
||||
hierarchy_data = hierarchical_list = [
|
||||
{
|
||||
"name": "Level 1 - Item 1",
|
||||
"children": [
|
||||
{
|
||||
"name": "Level 2 - Item 1.1",
|
||||
"children": [
|
||||
{"name": "Level 3 - Item 1.1.1"},
|
||||
{"name": "Level 3 - Item 1.1.2"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Level 2 - Item 1.2",
|
||||
"children": [
|
||||
{"name": "Level 3 - Item 1.2.1"},
|
||||
{"name": "Level 3 - Item 1.2.2"},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Level 1 - Item 2",
|
||||
"children": [
|
||||
{
|
||||
"name": "Level 2 - Item 2.1",
|
||||
"children": [
|
||||
{"name": "Level 3 - Item 2.1.1"},
|
||||
{"name": "Level 3 - Item 2.1.2"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Level 2 - Item 2.2",
|
||||
"children": [
|
||||
{"name": "Level 3 - Item 2.2.1"},
|
||||
{"name": "Level 3 - Item 2.2.2"},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
# Add more items as needed
|
||||
]
|
||||
|
||||
# print(json.dumps(hierarchical_list, indent=2))
|
||||
'''
|
||||
def traverse_hierarchy(node, selected_items, required_count):
|
||||
if node['name'] in selected_items:
|
||||
required_count -= 1
|
||||
for child in node.get('children', []):
|
||||
required_count = traverse_hierarchy(child, selected_items, required_count)
|
||||
return required_count
|
||||
'''
|
||||
@app.route('/get_hierarchical_category_list')
|
||||
def get_hierarchical_category_list():
|
||||
return jsonify(hierarchy_data)
|
||||
|
||||
@app.route('/process_selected_items', methods=['POST'])
|
||||
def process_selected_items():
|
||||
try:
|
||||
data = request.get_json()
|
||||
selected_items = data.get('selectedItems', [])
|
||||
|
||||
# Print selected items for debugging
|
||||
print("Selected Items:", selected_items)
|
||||
|
||||
# Continue processing the selected items
|
||||
# For example, you can print or process the selected items here
|
||||
|
||||
# Redirect to the show_selected_items route
|
||||
return redirect(url_for('show_selected_items', items=','.join(selected_items)))
|
||||
except Exception as e:
|
||||
return jsonify({'success': False, 'error': str(e)}), 500
|
||||
|
||||
@app.route('/show_selected_items/<items>')
|
||||
@cross_origin()
|
||||
def show_selected_items(items):
|
||||
return render_template('selected_items.html', items=items.split(','))
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
1302
cfsb-backend/assets/Preferences_Model.ttl
Normal file
1302
cfsb-backend/assets/Preferences_Model.ttl
Normal file
File diff suppressed because it is too large
Load Diff
87
cfsb-backend/data_types.py
Normal file
87
cfsb-backend/data_types.py
Normal file
@ -0,0 +1,87 @@
|
||||
Linguistic = 1
|
||||
Float = 2
|
||||
Seconds = 3
|
||||
Percentage = 4
|
||||
Boolean = 5
|
||||
Integer = 6
|
||||
Linguistic_bad = 7
|
||||
|
||||
# 1 for LOW, 2 for MEDIUM, etc.
|
||||
linguistic_low_choices = ["Low", "Medium", "High"]
|
||||
linguistic_very_low_choices = ["Very low", "LOW", "MEDIUM", "HIGH", "VERY HIGH", "PERFECT"]
|
||||
linguistic_bad_choices = ["BAD", "OK", "GOOD"]
|
||||
boolean_choices = ["True", "False"]
|
||||
|
||||
linguistic_low_attributes = [
|
||||
"attr-accountability-auditability",
|
||||
"attr-78baf8b3-2d1d-4899-88ef-ca74990f07eb",
|
||||
"attr-agility-adaptability",
|
||||
"attr-agility-portability",
|
||||
"attr-assurance-maintainability",
|
||||
"attr-assurance-service-stability",
|
||||
"attr-financial-structure",
|
||||
"attr-performance-accuracy",
|
||||
"attr-usability-installability",
|
||||
"attr-usability-learnability",
|
||||
"attr-usability-operability",
|
||||
"attr-usability-transparency",
|
||||
"attr-usability-understandability",
|
||||
"attr-usability-reusability",
|
||||
"d503cabe-17d7-4b9b-9231-a8b211f3ce11",
|
||||
"attr-reputation-contracting-experience",
|
||||
"attr-reputation-ease-of-doing-business",
|
||||
"attr-reputation-provider-ethicality",
|
||||
"attr-reputation-sustainability-economical-impact",
|
||||
"attr-reputation-sustainability-societal-impact"
|
||||
]
|
||||
|
||||
linguistic_very_low_attributes = [
|
||||
"attr-assurance", # TODO delete this, we keep it for testing
|
||||
"attr-assurance-serviceability-support-satisfaction"
|
||||
]
|
||||
|
||||
linguistic_bad_attributes = [
|
||||
"attr-reputation-brand-name",
|
||||
"attr-reputation-service-reputation",
|
||||
]
|
||||
|
||||
boolean_attributes = [
|
||||
"fd871ec6-d953-430d-a354-f13c66fa8bc9",
|
||||
"dcedb196-2c60-4c29-a66d-0e768cfd698a",
|
||||
"0cf00a53-fd33-4887-bb38-e0bbb04e3f3e",
|
||||
"d95c1dae-1e22-4fb4-9cdc-743e96d0dddc",
|
||||
"8cd09fe9-c119-4ccd-b651-0f18334dbbe4",
|
||||
"7147995c-8e68-4106-ab24-f0a7673eb5f5",
|
||||
"c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef"
|
||||
]
|
||||
|
||||
time_in_seconds_attributes = [
|
||||
"attr-assurance-reliability",
|
||||
]
|
||||
|
||||
percentage_attributes = [
|
||||
"attr-assurance-availability",
|
||||
"attr-reputation-provider-business-stability",
|
||||
"55a60ec3-55f7-48db-83bc-be2875c5210c"
|
||||
]
|
||||
|
||||
|
||||
def get_attr_data_type(attribute):
|
||||
data = {}
|
||||
print("get type for " + attribute)
|
||||
if attribute in linguistic_low_attributes:
|
||||
data["type"] = 1
|
||||
data["values"] = linguistic_low_choices
|
||||
elif attribute in linguistic_very_low_attributes:
|
||||
data["type"] = 1
|
||||
data["values"] = linguistic_low_choices
|
||||
elif attribute in linguistic_bad_attributes:
|
||||
data["type"] = 7
|
||||
data["values"] = linguistic_low_choices
|
||||
# elif attribute in boolean_attributes:
|
||||
# data["type"] = 5
|
||||
# data["values"] = boolean_choices
|
||||
else:
|
||||
data["type"] = 0 # all other cases
|
||||
print(data)
|
||||
return data
|
140
cfsb-backend/get_data.py
Normal file
140
cfsb-backend/get_data.py
Normal file
@ -0,0 +1,140 @@
|
||||
from rdflib import Graph, URIRef
|
||||
|
||||
# Create a new RDF graph
|
||||
g = Graph()
|
||||
|
||||
# Load TTL data into the graph
|
||||
file_path = 'assets/Preferences_Model.ttl'
|
||||
g.parse(file_path, format='turtle')
|
||||
|
||||
# Create variables for predicate names
|
||||
SMI_prefix = "https://www.nebulouscloud.eu/smi/SMI-OBJECT#"
|
||||
a = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
|
||||
type = "http://purl.org/dc/elements/1.1/type"
|
||||
terms_URI = "http://purl.org/dc/terms/URI"
|
||||
terms_created = "http://purl.org/dc/terms/created"
|
||||
terms_description = "http://purl.org/dc/terms/description"
|
||||
terms_identifier = "http://purl.org/dc/terms/identifier"
|
||||
terms_modified = "http://purl.org/dc/terms/modified"
|
||||
terms_title = "http://purl.org/dc/terms/title"
|
||||
skos_broader = "http://www.w3.org/2004/02/skos/core#broader"
|
||||
|
||||
|
||||
def get_level_1_items():
|
||||
items_list = []
|
||||
level_1_items_list = []
|
||||
for subject, predicate, object in g:
|
||||
if "broader" in predicate and "attr-root" in object:
|
||||
item_dict = {}
|
||||
# keep only the attribute part - attr-performance
|
||||
attribute = str(subject)
|
||||
attribute = attribute.replace(SMI_prefix, '')
|
||||
# add it in level_1_items_list for easy search in level 2 items loop
|
||||
level_1_items_list.append(attribute)
|
||||
item_data_dict = get_subject_data(str(subject))
|
||||
item_dict["title"] = item_data_dict["title"]
|
||||
item_dict["description"] = item_data_dict["description"]
|
||||
item_dict["name"] = attribute
|
||||
item_dict["children"] = []
|
||||
items_list.append(item_dict)
|
||||
items_2_list = get_level_2_items(level_1_items_list, items_list)
|
||||
return items_2_list
|
||||
|
||||
|
||||
def get_level_2_items(level_1_items_list, level_1_items_dict_list):
|
||||
items_list = []
|
||||
level_2_items_list = []
|
||||
for subject, predicate, object in g:
|
||||
if "broader" in predicate:
|
||||
object_str = str(object)
|
||||
object_str = object_str.replace(SMI_prefix, '')
|
||||
if object_str in level_1_items_list:
|
||||
item_dict = {}
|
||||
level_2_attribute = str(subject)
|
||||
level_2_attribute = level_2_attribute.replace(SMI_prefix, '')
|
||||
level_2_items_list.append(level_2_attribute)
|
||||
item_data_dict = get_subject_data(str(subject))
|
||||
item_dict["title"] = item_data_dict["title"]
|
||||
item_dict["description"] = item_data_dict["description"]
|
||||
item_dict["parent"] = object_str
|
||||
item_dict["name"] = level_2_attribute
|
||||
item_dict["children"] = []
|
||||
items_list.append(item_dict)
|
||||
items_3_list = get_level_3_items(level_2_items_list, items_list, level_1_items_dict_list)
|
||||
return items_3_list
|
||||
|
||||
|
||||
def get_level_3_items(level_2_items_list, level_2_items_dict_list, level_1_items_dict_list):
|
||||
items_list = []
|
||||
level_3_items_list = []
|
||||
for subject, predicate, object in g:
|
||||
if "broader" in predicate:
|
||||
object_str = str(object)
|
||||
object_str = object_str.replace(SMI_prefix, '')
|
||||
if object_str in level_2_items_list:
|
||||
item_dict = {}
|
||||
level_3_attribute = str(subject)
|
||||
level_3_attribute = level_3_attribute.replace(SMI_prefix, '')
|
||||
level_3_items_list.append(level_3_attribute)
|
||||
item_data_dict = get_subject_data(str(subject))
|
||||
item_dict["title"] = item_data_dict["title"]
|
||||
item_dict["description"] = item_data_dict["description"]
|
||||
item_dict["parent"] = object_str
|
||||
item_dict["name"] = level_3_attribute
|
||||
item_dict["children"] = []
|
||||
items_list.append(item_dict)
|
||||
level_2_children_list = insert_level_2_children(level_1_items_dict_list, level_2_items_dict_list, items_list)
|
||||
return level_2_children_list
|
||||
|
||||
|
||||
def insert_level_2_children(level_1_items_dict_list, level_2_items_dict_list, level_3_items_dict_list):
|
||||
for level_2_item in level_2_items_dict_list:
|
||||
level_2_children_list = []
|
||||
# print("level_2_item = " + level_2_item["name"])
|
||||
for level_3_item in level_3_items_dict_list:
|
||||
# print("level_3_item = " + level_3_item["name"])
|
||||
if level_3_item["parent"] == level_2_item["name"]:
|
||||
# print("Children of " + level_2_item["name"] + " is " + level_3_item["name"])
|
||||
item_dict = {"name": level_3_item["name"]}
|
||||
# level_2_children_list.append(item_dict)
|
||||
level_2_children_list.append(level_3_item)
|
||||
# here to append the list at the correct position of level_2_items_dict_list
|
||||
level_2_item["children"] = level_2_children_list
|
||||
items_dict_list = insert_level_1_children(level_1_items_dict_list, level_2_items_dict_list)
|
||||
# return level_2_items_dict_list
|
||||
return items_dict_list
|
||||
|
||||
|
||||
def insert_level_1_children(level_1_items_dict_list, level_2_items_dict_list):
|
||||
for level_1_item in level_1_items_dict_list:
|
||||
level_1_children_list = []
|
||||
# print("level_1_item = " + level_1_item["name"])
|
||||
for level_2_item in level_2_items_dict_list:
|
||||
# print("level_2_item = " + level_2_item["name"])
|
||||
if level_2_item["parent"] == level_1_item["name"]:
|
||||
# print("Children of " + level_1_item["name"] + " is " + level_2_item["name"])
|
||||
level_1_children_list.append(level_2_item)
|
||||
# here to append the list at the correct position of level_1_items_dict_list
|
||||
level_1_item["children"] = level_1_children_list
|
||||
return level_1_items_dict_list
|
||||
|
||||
|
||||
def get_subject_data(item_subject):
|
||||
subject_data = {
|
||||
"title": "",
|
||||
"description": ""
|
||||
}
|
||||
for subject, predicate, object in g:
|
||||
if str(subject) == item_subject:
|
||||
# print("checking data for " + item_subject + " and subject is " + subject)
|
||||
if str(predicate) == terms_description and not str(object) == " ":
|
||||
subject_data["description"] = str(object)
|
||||
elif str(predicate) == terms_description:
|
||||
subject_data["description"] = "No description available"
|
||||
if str(predicate) == terms_title and not str(object) == " ":
|
||||
subject_data["title"] = str(object)
|
||||
elif str(predicate) == terms_title:
|
||||
attr_subject = str(item_subject)
|
||||
attr_subject = attr_subject.replace(SMI_prefix, '')
|
||||
subject_data["title"] = attr_subject
|
||||
return subject_data
|
183
cfsb-backend/read_file.py
Normal file
183
cfsb-backend/read_file.py
Normal file
@ -0,0 +1,183 @@
|
||||
from rdflib import Graph, URIRef
|
||||
# from rdflib.namespace import RDF, RDFS, DC, DCTERMS, SKOS
|
||||
|
||||
# Create a new RDF graph
|
||||
g = Graph()
|
||||
|
||||
# Load TTL data into the graph
|
||||
file_path = 'assets/Preferences_Model.ttl'
|
||||
g.parse(file_path, format='turtle')
|
||||
SMI_prefix = "https://www.nebulouscloud.eu/smi/SMI-OBJECT#"
|
||||
|
||||
a = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
|
||||
type = "http://purl.org/dc/elements/1.1/type"
|
||||
terms_URI = "http://purl.org/dc/terms/URI"
|
||||
terms_created = "http://purl.org/dc/terms/created"
|
||||
terms_description = "http://purl.org/dc/terms/description"
|
||||
terms_identifier = "http://purl.org/dc/terms/identifier"
|
||||
terms_modified = "http://purl.org/dc/terms/modified"
|
||||
terms_title = "http://purl.org/dc/terms/title"
|
||||
skos_broader = "http://www.w3.org/2004/02/skos/core#broader"
|
||||
|
||||
subjects = g.subjects()
|
||||
predicates = g.predicates()
|
||||
objects = g.objects()
|
||||
|
||||
# for subject in subjects:
|
||||
# print(subject)
|
||||
# print(g.objects(subject=subject))
|
||||
#
|
||||
# for predicate in predicates:
|
||||
# print(predicate)
|
||||
#
|
||||
# for object in objects:
|
||||
# print("object start")
|
||||
# print(object)
|
||||
# print(g.subject_predicates(object))
|
||||
|
||||
# print(g.serialize(format='turtle'))
|
||||
|
||||
# file_data = g.serialize(format='turtle')
|
||||
|
||||
level_1_items = []
|
||||
level_1_subjects_dict = {}
|
||||
level_2_items = []
|
||||
level_2_subjects_dict = {}
|
||||
level_3_items = []
|
||||
level_3_subjects_dict = {}
|
||||
|
||||
|
||||
def scan_level_1_items():
|
||||
for subject, predicate, object in g:
|
||||
print("\nloop data for level 1")
|
||||
# print(f"Subject: {subject}, Predicate: {predicate}, Object: {object}")
|
||||
if "broader" in predicate and "attr-root" in object:
|
||||
attribute = str(subject)
|
||||
attribute = attribute.replace(SMI_prefix, '')
|
||||
print("\nRoot from predicate type: " + attribute)
|
||||
level_1_items.append(attribute)
|
||||
level_1_subjects_dict[attribute] = subject
|
||||
return level_1_subjects_dict
|
||||
|
||||
|
||||
def scan_level_2_items():
|
||||
for subject, predicate, object in g:
|
||||
print("\nloop data for level 2")
|
||||
# print(f"Subject: {subject}, Predicate: {predicate}, Object: {object}")
|
||||
if "broader" in predicate:
|
||||
object_str = str(object)
|
||||
object_str = object_str.replace(SMI_prefix, '')
|
||||
if object_str in level_1_items:
|
||||
# parent found in level 1
|
||||
level_2_attribute = str(subject)
|
||||
level_2_attribute = level_2_attribute.replace(SMI_prefix, '')
|
||||
print("\nLevel 2 attr: " + level_2_attribute)
|
||||
level_2_items.append(level_2_attribute)
|
||||
level_2_subjects_dict[level_2_attribute] = subject
|
||||
print("for dict 2 key = " + level_2_attribute + " - Value = " + subject)
|
||||
return level_2_subjects_dict
|
||||
|
||||
|
||||
def scan_level_3_items():
|
||||
for subject, predicate, object in g:
|
||||
print("\nloop data for level 3")
|
||||
print(f"Subject: {subject}, Predicate: {predicate}, Object: {object}")
|
||||
if "broader" in predicate:
|
||||
object_str = str(object)
|
||||
object_str = object_str.replace(SMI_prefix, '')
|
||||
if object_str in level_2_items:
|
||||
level_3_attribute = str(subject)
|
||||
level_3_attribute = level_3_attribute.replace(SMI_prefix, '')
|
||||
print("\nLevel 3 attr: " + level_3_attribute)
|
||||
level_3_items.append(level_3_attribute)
|
||||
level_3_subjects_dict[level_3_attribute] = subject
|
||||
return level_3_subjects_dict
|
||||
|
||||
|
||||
print(level_1_items)
|
||||
print(level_1_subjects_dict)
|
||||
print("count level 1: " + str(len(level_1_items)))
|
||||
print(level_2_items)
|
||||
print(level_2_subjects_dict)
|
||||
print("count level 2: " + str(len(level_2_items)))
|
||||
print(level_3_items)
|
||||
print(level_3_subjects_dict)
|
||||
print("count level 3: " + str(len(level_3_items)))
|
||||
|
||||
print("\n------------\n")
|
||||
|
||||
attr_dict = {}
|
||||
|
||||
|
||||
def create_level_1_attr_dict(item, item_subject):
|
||||
print("item: " + item)
|
||||
attr_data = {}
|
||||
for subject, predicate, object in g:
|
||||
if subject == item_subject:
|
||||
attr_data["level"] = 1
|
||||
attr_data["subject"] = subject
|
||||
if str(predicate) == terms_description:
|
||||
# print("\nDescription found for " + item + " - description: " + str(object))
|
||||
attr_data["description"] = str(object)
|
||||
if str(predicate) == terms_title:
|
||||
# print("\nTitle found for " + item + " - title: " + str(object))
|
||||
attr_data["title"] = str(object)
|
||||
if str(predicate) == skos_broader:
|
||||
# print("\nskos found for " + item + " - Parent: " + str(object))
|
||||
attr_data["parent"] = str(object)
|
||||
|
||||
attr_dict[item] = attr_data
|
||||
print(attr_data)
|
||||
|
||||
|
||||
def create_attr_dict(item, item_subject):
|
||||
attr_data_dict = {}
|
||||
for subject, predicate, object in g:
|
||||
if subject == item_subject:
|
||||
attr_data_dict["subject"] = subject
|
||||
if str(predicate) == terms_description:
|
||||
print("\nDescription found for " + item + " - description: " + str(object))
|
||||
attr_data_dict["description"] = str(object)
|
||||
if str(predicate) == terms_title:
|
||||
print("\nTitle found for " + item + " - title: " + str(object))
|
||||
attr_data_dict["title"] = str(object)
|
||||
if str(predicate) == skos_broader:
|
||||
print("\nskos found for " + item + " - Parent: " + str(object))
|
||||
attr_data_dict["parent"] = str(object)
|
||||
if object in level_1_subjects_dict.values():
|
||||
print("found level 2 item")
|
||||
attr_data_dict["level"] = 2
|
||||
elif object in level_2_subjects_dict.values():
|
||||
print("found level 3 item")
|
||||
attr_data_dict["level"] = 3
|
||||
attr_dict[item] = attr_data_dict
|
||||
|
||||
|
||||
for item, item_subject in level_1_subjects_dict.items():
|
||||
create_level_1_attr_dict(item, item_subject)
|
||||
|
||||
for item, item_subject in level_2_subjects_dict.items():
|
||||
create_attr_dict(item, item_subject)
|
||||
|
||||
for item, item_subject in level_3_subjects_dict.items():
|
||||
create_attr_dict(item, item_subject)
|
||||
|
||||
print(attr_dict)
|
||||
|
||||
|
||||
def get_data():
|
||||
print("in get data")
|
||||
scan_level_1_items()
|
||||
scan_level_2_items()
|
||||
scan_level_3_items()
|
||||
|
||||
for item, item_subject in level_1_subjects_dict.items():
|
||||
create_level_1_attr_dict(item, item_subject)
|
||||
|
||||
for item, item_subject in level_2_subjects_dict.items():
|
||||
create_attr_dict(item, item_subject)
|
||||
|
||||
for item, item_subject in level_3_subjects_dict.items():
|
||||
create_attr_dict(item, item_subject)
|
||||
|
||||
return attr_dict
|
14
cfsb-backend/requirements.txt
Normal file
14
cfsb-backend/requirements.txt
Normal file
@ -0,0 +1,14 @@
|
||||
blinker==1.7.0
|
||||
click==8.1.7
|
||||
Flask==3.0.0
|
||||
Flask-Cors==4.0.0
|
||||
isodate==0.6.1
|
||||
itsdangerous==2.1.2
|
||||
Jinja2==3.1.2
|
||||
MarkupSafe==2.1.3
|
||||
numpy==1.26.3
|
||||
pyparsing==3.1.1
|
||||
rdflib==7.0.0
|
||||
scipy==1.11.4
|
||||
six==1.16.0
|
||||
Werkzeug==3.0.1
|
17
cfsb-backend/templates/selected_items.html
Normal file
17
cfsb-backend/templates/selected_items.html
Normal file
@ -0,0 +1,17 @@
|
||||
<!-- templates/selected_items.html -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Selected Items</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Selected Items</h1>
|
||||
<ul>
|
||||
{% for item in items %}
|
||||
<li>{{ item }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
Loading…
Reference in New Issue
Block a user