Commit b4141be4 authored by Tjerk Vreeken's avatar Tjerk Vreeken

Change solver_output from 2-D to 1-D array

Most arrays like lbg/ubg/x0 are already 1-D NumPy arrays if they are not
a CasADi matrix (MX/DM). It therefore makes sense if the output of the
solver is also a 1-D array, requiring only a single index to be
specified to get an element (instead of X[i, 0]).

We ensure that the results dictionary does not present views onto the
raw solver_output by making a full copy of it first. This is just a
safeguard though, as it is undefined behavior what should happen if
someone changes the arrays in the results like that.

Contrary to the previous type annotation, the solver output was already
a numpy array (a 2-D one, see commit 40875b0a).

Closes #1064
parent d3e1d3a8
......@@ -1421,15 +1421,15 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
def extract_controls(self, ensemble_member=0):
# Solver output
X = self.solver_output
X = self.solver_output.copy()
# Extract control inputs
results = {}
offset = 0
for variable in self.controls:
n_times = len(self.times(variable))
results[variable] = np.array(self.variable_nominal(
variable) * X[offset:offset + n_times, 0]).ravel()
results[variable] = self.variable_nominal(
variable) * X[offset:offset + n_times]
offset += n_times
# Done
......@@ -1692,7 +1692,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
def extract_states(self, ensemble_member=0):
# Solver output
X = self.solver_output
X = self.solver_output.copy()
# Discretization parameters
control_size = self.__control_size
......@@ -1724,8 +1724,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
offset += 1
else:
n_times = len(self.times(variable))
results[variable] = np.array(self.variable_nominal(
variable) * X[offset:offset + n_times, 0]).ravel()
results[variable] = self.variable_nominal(variable) * X[offset:offset + n_times]
offset += n_times
# Extract constant input aliases
......@@ -1744,14 +1743,13 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
n_collocation_times = len(self.times())
for variable in self.path_variables:
variable = variable.name()
results[variable] = np.array(
X[offset:offset + n_collocation_times, 0]).ravel()
results[variable] = X[offset:offset + n_collocation_times]
offset += n_collocation_times
# Extract extra variables
for k in range(len(self.extra_variables)):
variable = self.extra_variables[k].name()
results[variable] = np.array(X[offset + k, 0]).ravel()
results[variable] = X[offset + k].ravel()
# Done
return results
......
......@@ -232,13 +232,13 @@ class ControlTreeMixin(OptimizationProblem):
def extract_controls(self, ensemble_member=0):
# Solver output
X = self.solver_output
X = self.solver_output.copy()
# Extract control inputs
results = {}
for variable in self.controls:
results[variable] = np.array(self.variable_nominal(
variable) * X[self.__control_indices[ensemble_member][variable], 0]).ravel()
results[variable] = self.variable_nominal(
variable) * X[self.__control_indices[ensemble_member][variable]].ravel()
# Done
return results
......
......@@ -108,7 +108,7 @@ class OptimizationProblem(metaclass=ABCMeta):
# Extract relevant stats
self.__objective_value = float(results['f'])
self.__solver_output = np.array(results['x'])
self.__solver_output = np.array(results['x']).ravel()
self.__solver_stats = solver.stats()
success, log_level = self.solver_success(self.__solver_stats, log_solver_failure_as_error)
......@@ -242,7 +242,7 @@ class OptimizationProblem(metaclass=ABCMeta):
return self.__objective_value
@property
def solver_output(self) -> ca.DM:
def solver_output(self) -> np.ndarray:
"""
The raw output from the last NLP solver run.
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment