Commit 0640dd76 authored by Dorel Butaciu's avatar Dorel Butaciu

updated flexibility file structure + clean-up

parent 816fba30
......@@ -6,7 +6,6 @@ import ro.tuc.dsrl.catalyst.model.enums.PredictionType;
import ro.tuc.dsrl.catalyst.model.error_handler.ErrorMessageConstants;
import ro.tuc.dsrl.catalyst.model.error_handler.IncorrectParameterException;
import javax.xml.crypto.Data;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
......
......@@ -11,7 +11,6 @@ import ro.tuc.dsrl.catalyst.model.dto.EnergyProfileDTO;
import ro.tuc.dsrl.catalyst.model.enums.DataScenario;
import ro.tuc.dsrl.catalyst.model.enums.TopologyComponentType;
import javax.xml.crypto.Data;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
......
......@@ -80,30 +80,10 @@ MLP_INTRADAY_COOLING = [
MLP_INTRADAY_SERVER_POZNAN = [
# model 0
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=28, no_hidden_layers=1, batch_size=16, no_epochs=200),
# model 1
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=100),
# model 2
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=200),
# model 3
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=100),
# model 4
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=12, no_hidden_layers=1, batch_size=20, no_epochs=250),
# model 5
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=200)
]
MLP_INTRADAY_COOLING_POZNAN = [
# model 0
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=28, no_hidden_layers=1, batch_size=32, no_epochs=100),
# model 1
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=100),
# model 2
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=100),
# model 3
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=100),
# model 4
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=170),
# model 5
MLP_INTRADAY(no_inputs=8, no_outputs=8, no_neurons=20, no_hidden_layers=1, batch_size=8, no_epochs=200)
]
LSTM_INTRADAY = namedtuple('LSTM_INTRADAY', LSTM_PARAMETERS_NAME)
......@@ -138,31 +118,11 @@ LSTM_INTRADAY_COOLING = [
LSTM_INTRADAY_SERVER_POZNAN = [
# model 0
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=300),
# model 1
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=12, no_epochs=300),
# model 2
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=300),
# model 3
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=300),
# model 4
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=300),
# model 5
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=300)
]
LSTM_INTRADAY_COOLING_POZNAN = [
# model 0
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=230),
# model 1
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=270),
# model 2
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=30, no_features=1, batch_size=16, no_epochs=175),
# model 3
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=20, no_features=1, batch_size=80, no_epochs=400),
# model 4
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=12, no_epochs=400),
# model 5
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=310)
LSTM_DAYAHEAD(no_inputs=8, no_outputs=8, no_neurons=16, no_features=1, batch_size=16, no_epochs=230)
]
......
......@@ -7,6 +7,7 @@ RESULTS_PATH = '../src/%s/results'
LOSS_PATH = '../src/%s/loss'
# Functions use DATA_SCENARIO and PREDICTION_TYPE (DA/ID/NRT) constants from this file to construct the requested path.
def get_training_data_path(data_scenario, prediction_type):
return TRAINING_DATA % (data_scenario, prediction_type)
......@@ -28,13 +29,44 @@ def get_loss_path(data_scenario):
# #################################################################################
# Business logic
# ### Flexibility
# Paths
FLEXIBILITY_TRAINING_DATA = '../src/%s/flexibility/training_data/%s'
FLEXIBILITY_TEST_DATA = '../src/%s/flexibility/test_data/%s'
FLEXIBILITY_TRAINED_MODELS = '../src/%s/flexibility/trained_models/%s'
# Results
FLEXIBILITY_RESULTS_PATH = '../src/%s/flexibility/results'
FLEXIBILITY_LOSS_PATH = '../src/%s/flexibility/loss'
# Functions use DATA_SCENARIO & FLEXIBILITY_TYPE (Upper/Lower) constants from this file to construct the requested path.
def get_flexibility_results_path(data_scenario):
return FLEXIBILITY_RESULTS_PATH % data_scenario
def get_flexibility_loss_path(data_scenario):
return FLEXIBILITY_LOSS_PATH % data_scenario
def get_flexibility_training_data_path(data_scenario, flexibility_type):
return FLEXIBILITY_TRAINING_DATA % (data_scenario, flexibility_type)
def get_flexibility_test_data_path(data_scenario, flexibility_type):
return FLEXIBILITY_TEST_DATA % (data_scenario, flexibility_type)
def get_flexibility_trained_models_path(data_scenario, flexibility_type):
return FLEXIBILITY_TRAINED_MODELS % (data_scenario, flexibility_type)
# ### Business logic
# Prediction type
DAYAHEAD = 'dayahead'
INTRADAY = 'intraday'
NEAR_REAL_TIME = 'near_real_time'
# Scenario
# Data source scenario
DATA_SCENARIO = [
'paper',
'poznan'
......@@ -45,39 +77,14 @@ ALGORITHM_TYPE_MLP = 'mlp'
ALGORITHM_TYPE_LSTM = 'lstm'
ALGORITHM_TYPE_ENSEMBLE = 'ensemble'
# DataCenter components
SERVER_COMPONENT = 'SERVER_ROOM'
COOLING_SYSTEM_COMPONENT = 'COOLING_SYSTEM'
SCENARIO_TRAIN = 'train'
SCENARIO_TEST = 'test'
# Flexibility
# Flexibility type
FLEXIBILITY_TYPE_UPPER = 'UPPER'
FLEXIBILITY_TYPE_LOWER = 'LOWER'
TRAINING_DATA_UPPER_BOUND = '../src/flexibility/training_data/upper'
TRAINING_DATA_LOWER_BOUND = '../src/flexibility/training_data/lower'
TRAINED_MODELS_UPPER_BOUND = '../src/flexibility/trained_models/upper'
TRAINED_MODELS_LOWER_BOUND = '../src/flexibility/trained_models/lower'
TEST_DATA_UPPER_BOUND = '../src/flexibility/test_data/upper'
TEST_DATA_LOWER_BOUND = '../src/flexibility/test_data/lower'
FLEXIBILITY_RESULTS_PATH = '../src/flexibility/results'
FLEXIBILITY_LOSS_PATH = '../src/flexibility/loss'
# POZNAN PATHS
TRAINING_DATA_UPPER_BOUND_POZNAN = '../src/flexibility/poznan_data/training_data/upper'
TRAINING_DATA_LOWER_BOUND_POZNAN = '../src/flexibility/poznan_data/training_data/lower'
TRAINED_MODELS_UPPER_BOUND_POZNAN = '../src/flexibility/poznan_data/trained_models/upper'
TRAINED_MODELS_LOWER_BOUND_POZNAN = '../src/flexibility/poznan_data/trained_models/lower'
TEST_DATA_UPPER_BOUND_POZNAN = '../src/flexibility/poznan_data/test_data/upper'
TEST_DATA_LOWER_BOUND_POZNAN = '../src/flexibility/poznan_data/test_data/lower'
SCENARIO_TRAIN = 'train'
SCENARIO_TEST = 'test'
FLEXIBILITY_RESULTS_PATH_POZNAN = '../src/flexibility/poznan_data/results'
FLEXIBILITY_LOSS_PATH_POZNAN = '../src/flexibility/poznan_data/loss'
......@@ -55,23 +55,11 @@ def get_no_models_for_flexibility_algorithm(algorithm_type, prediction_type, fle
def get_flexibility_paths(flexibility_type, data_scenario):
switch = {
# Scenario 0 - Paper
(constants.FLEXIBILITY_TYPE_UPPER, constants.DATA_SCENARIO[0]): (
constants.TRAINING_DATA_UPPER_BOUND, constants.TRAINED_MODELS_UPPER_BOUND, constants.TEST_DATA_UPPER_BOUND),
(constants.FLEXIBILITY_TYPE_LOWER, constants.DATA_SCENARIO[0]): (
constants.TRAINING_DATA_LOWER_BOUND, constants.TRAINED_MODELS_LOWER_BOUND, constants.TEST_DATA_LOWER_BOUND),
# Scenario 1 - Poznan
(constants.FLEXIBILITY_TYPE_UPPER, constants.DATA_SCENARIO[1]): (
constants.TRAINING_DATA_UPPER_BOUND_POZNAN, constants.TRAINED_MODELS_UPPER_BOUND_POZNAN,
constants.TEST_DATA_UPPER_BOUND_POZNAN),
(constants.FLEXIBILITY_TYPE_LOWER, constants.DATA_SCENARIO[1]): (
constants.TRAINING_DATA_LOWER_BOUND_POZNAN, constants.TRAINED_MODELS_LOWER_BOUND_POZNAN,
constants.TEST_DATA_LOWER_BOUND_POZNAN)
}
training_data_path = constants.get_flexibility_training_data_path(data_scenario, flexibility_type)
trained_models_path = constants.get_flexibility_trained_models_path(data_scenario, flexibility_type)
test_data_path = constants.get_flexibility_test_data_path(data_scenario, flexibility_type)
return switch.get((flexibility_type, data_scenario), None)
return training_data_path, trained_models_path, test_data_path
def get_config_for_flexibility_algorithm(prediction_type, flexibility_type, data_scenario):
......
......@@ -23,14 +23,7 @@ def predict(data, algorithm_type, algorithm_model, trained_models_root_path, top
def predict_flexibility(data, algorithm_type, algorithm_model, topology_component, flexibility_type, data_scenario):
switch = {
(constants.FLEXIBILITY_TYPE_LOWER, constants.DATA_SCENARIO[0]): constants.TRAINED_MODELS_LOWER_BOUND,
(constants.FLEXIBILITY_TYPE_UPPER, constants.DATA_SCENARIO[0]): constants.TRAINED_MODELS_UPPER_BOUND,
(constants.FLEXIBILITY_TYPE_LOWER, constants.DATA_SCENARIO[1]): constants.TRAINED_MODELS_LOWER_BOUND_POZNAN,
(constants.FLEXIBILITY_TYPE_UPPER, constants.DATA_SCENARIO[1]): constants.TRAINED_MODELS_UPPER_BOUND_POZNAN
}
trained_models_root_path = switch.get((flexibility_type, data_scenario), None)
trained_models_root_path = constants.get_flexibility_trained_models_path(data_scenario, flexibility_type)
trained_models_path = "%s/%s/%s" % (trained_models_root_path, algorithm_type, topology_component)
with keras.backend.get_session().graph.as_default():
......@@ -71,8 +64,7 @@ def plot_prediction(predictions, test_data, prediction_type, data_scenario, topo
algorithm_type, data_scenario, algorithm_type, i + 1, no_models, j + 1, no_samples,
topology_component)
else:
root_path = constants.FLEXIBILITY_RESULTS_PATH if data_scenario == constants.DATA_SCENARIO[0] \
else constants.FLEXIBILITY_RESULTS_PATH_POZNAN
root_path = constants.get_flexibility_results_path(data_scenario)
saving_path = "%s/%s/results_%s_sample_%d-outOf-%d_%s.png" % \
(root_path, flexibility_type, algorithm_type, j + 1, no_samples,
topology_component)
......@@ -113,7 +105,7 @@ def plot_prediction(predictions, test_data, prediction_type, data_scenario, topo
plt.cla()
plt.close()
if prediction_type is None:
if flexibility_type is None:
print('\n')
print("All samples average prediction MAPE for %s: %0.5f" % (algorithm_type, np.average(mapes)))
print("All samples average prediction MAE for %s: %0.5f" % (algorithm_type, np.average(maes)))
......@@ -158,8 +150,7 @@ def plot_loss(histories, algorithm_type, prediction_type, data_scenario, topolog
(constants.get_loss_path(data_scenario), prediction_type, topology_component,
algorithm_type, i + 1, histories_max_index)
else:
root_path = constants.FLEXIBILITY_LOSS_PATH if data_scenario == constants.DATA_SCENARIO[0]\
else constants.FLEXIBILITY_LOSS_PATH_POZNAN
root_path = constants.get_flexibility_loss_path(data_scenario)
saving_path = "%s/%s/loss_%s_sample_%d-outOf-%d.png" % \
(root_path, flexibility_type, algorithm_type, i + 1, histories_max_index)
......
......@@ -87,14 +87,14 @@ def call_flexibility_model(algorithm_type, prediction_type, data_scenario, topol
prediction_type=prediction_type,
data_scenario=data_scenario,
algorithm_model=algorithm_model,
train_data_path="%s/%s" % (training_data_root_path, utils.constants.SCENARIO_TRAIN),
train_data_path="%s/%s" % (training_data_root_path, constants.SCENARIO_TRAIN),
trained_models_root_path=trained_models_root_path,
topology_component=topology_component,
flexibility_type=flexibility_type)
predict(algorithm_type=algorithm_type,
algorithm_model=algorithm_model,
data_path="%s/%s" % (test_data_root_path, utils.constants.SCENARIO_TEST),
data_path="%s/%s" % (test_data_root_path, constants.SCENARIO_TEST),
trained_models_root_path=trained_models_root_path,
prediction_type=prediction_type,
topology_component=topology_component,
......@@ -150,7 +150,7 @@ def call_algorithm_model(algorithm_type, prediction_type, data_scenario, topolog
predict(algorithm_type=algorithm_type,
algorithm_model=algorithm_model,
data_path="%s/%s" % (test_data_root_path, utils.constants.SCENARIO_TEST),
data_path="%s/%s" % (test_data_root_path, constants.SCENARIO_TEST),
trained_models_root_path=trained_models_root_path,
prediction_type=prediction_type,
data_scenario=data_scenario,
......@@ -192,9 +192,8 @@ if __name__ == '__main__':
# call_algorithm_model(constants.ALGORITHM_TYPE_ENSEMBLE, constants.INTRADAY, constants.DATA_SCENARIO[0], constants.COOLING_SYSTEM_COMPONENT)
# call_algorithm_model(constants.ALGORITHM_TYPE_ENSEMBLE, constants.NEAR_REAL_TIME, constants.DATA_SCENARIO[0], constants.COOLING_SYSTEM_COMPONENT)
# TODO: update flexibility paths and folder structure
# FLEXIBILITY
call_flexibility_model(constants.ALGORITHM_TYPE_MLP, constants.DAYAHEAD, constants.DATA_SCENARIO[0], constants.SERVER_COMPONENT, constants.FLEXIBILITY_TYPE_UPPER)
# call_flexibility_model(constants.ALGORITHM_TYPE_MLP, constants.DAYAHEAD, constants.DATA_SCENARIO[0], constants.SERVER_COMPONENT, constants.FLEXIBILITY_TYPE_UPPER)
# call_flexibility_model(constants.ALGORITHM_TYPE_MLP, constants.DAYAHEAD, constants.DATA_SCENARIO[0], constants.SERVER_COMPONENT, constants.FLEXIBILITY_TYPE_LOWER)
# ###
......
......@@ -44,7 +44,8 @@ def predict_dayahead_flexibility_upper_bounds(algorithm_type, data_scenario, top
energy_values = extract_energy_values(request.get_json())
# take only upper bounds of energy values
upper_bounds = extract_bounds_from_energy_curve(energy_values, constants.FLEXIBILITY_TYPE_UPPER).reshape(1, -1)
upper_bounds = extract_bounds_from_energy_curve(energy_values, constants.FLEXIBILITY_TYPE_UPPER, data_scenario)\
.reshape(1, -1)
algo_config = mapping.get_config_for_flexibility_algorithm(constants.DAYAHEAD, constants.FLEXIBILITY_TYPE_UPPER,
data_scenario)
......@@ -80,7 +81,8 @@ def predict_dayahead_flexibility_lower_bounds(algorithm_type, data_scenario, top
energy_values = extract_energy_values(request.get_json())
# take only lower bounds of energy values
lower_bounds = extract_bounds_from_energy_curve(energy_values, constants.FLEXIBILITY_TYPE_LOWER).reshape(1, -1)
lower_bounds = extract_bounds_from_energy_curve(energy_values, constants.FLEXIBILITY_TYPE_LOWER, data_scenario)\
.reshape(1, -1)
algo_config = mapping.get_config_for_flexibility_algorithm(constants.DAYAHEAD, constants.FLEXIBILITY_TYPE_LOWER,
data_scenario)
......@@ -105,9 +107,15 @@ def predict_dayahead_flexibility_lower_bounds(algorithm_type, data_scenario, top
return Response(response=json.dumps(predictions), status=200, mimetype='application/json')
def extract_bounds_from_energy_curve(energy_values, flexibility_type):
def extract_bounds_from_energy_curve(energy_values, flexibility_type, data_scenario):
lower_bounds = np.zeros(24)
baseline = SERVER_POZNAN_BASELINE
baseline = None
if data_scenario == constants.DATA_SCENARIO[0]: # Scenario 0 - Paper
baseline = SERVER_BASELINE
elif data_scenario == constants.DATA_SCENARIO[1]: # Scenario 1 - Paper
baseline = SERVER_POZNAN_BASELINE
energy_values = energy_values.reshape(24)
diff = energy_values - baseline
......
......@@ -22,6 +22,8 @@ def predict_near_real_time(algorithm_type, data_scenario, topology_component, ti
def sample_predict_near_real_time(sample, algorithm_type, data_scenario, topology_component):
data_scenario = data_scenario.lower()
if algorithm_type == constants.ALGORITHM_TYPE_ENSEMBLE:
# setup ensemble
mlp_config, lstm_config = get_config_for_algorithms(constants.NEAR_REAL_TIME, data_scenario, topology_component)
......
,0
0,737.1162790697674
1,736.2558139534884
2,734.1511627906976
3,731.453488372093
4,728.4651162790698
5,725.5
6,723.1744186046511
7,722.3372093023256
8,722.8372093023256
9,725.3604651162791
10,728.9883720930233
11,732.5348837209302
12,735.6162790697674
13,738.3023255813954
14,740.0
15,741.3604651162791
16,742.0232558139535
17,741.9651162790698
18,741.453488372093
19,740.5116279069767
20,739.2790697674419
21,738.7674418604652
22,738.8255813953489
23,729.8139534883721
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
8
11
14
16
16
11
0
0
0
0
0
9
17
10
3
3
7
11
10
9
13
1
0
0
0
0
0
0
0