Commit b2dc65d6 authored by Tjerk Vreeken's avatar Tjerk Vreeken

Add support for qpsol and solvers other than IPOPT

We change the solver_options API by removing the workaround for IPOPT to
pass it options. This behavior changed between CasADi 2 and 3. The
workaround makes using solver other than IPOPT harder however, so instead
we go back to the old way of just passing through most options directly to

This commit alo introduces a new option "casadi_solver" for the dictionary
returned by solver_options(). The default value is "nlpsol" for
backwards compatibility. Another typical option would be "qpsol".

It also introduces a new API function "solver_success()" to override
what solver return status should be considered succesful, and how to
log said status.

Closes #1006
parent d81c9113
......@@ -128,8 +128,9 @@ class Example(GoalProgrammingMixin, ControlTreeMixin, CSVLookupTableMixin,
# When mumps_scaling is not zero, errors occur. RTC-Tools does its own
# scaling, so mumps scaling is not critical. Proprietary HSL solvers
# do not exhibit this error.
options['mumps_scaling'] = 0
options['print_level'] = 1
solver = options['solver']
options[solver]['mumps_scaling'] = 0
options[solver]['print_level'] = 1
return options
......@@ -147,7 +147,8 @@ class Example(GoalProgrammingMixin, CSVMixin, ModelicaMixin,
# Any solver options can be set here
def solver_options(self):
options = super().solver_options()
options['print_level'] = 1
solver = options['solver']
options[solver]['print_level'] = 1
return options
......@@ -103,7 +103,8 @@ class Example(GoalProgrammingMixin, CSVLookupTableMixin, CSVMixin,
# Any solver options can be set here
def solver_options(self):
options = super().solver_options()
options['print_level'] = 1
solver = options['solver']
options[solver]['print_level'] = 1
return options
......@@ -62,7 +62,8 @@ class Example(CSVMixin, ModelicaMixin, CollocatedIntegratedOptimizationProblem):
def solver_options(self):
options = super().solver_options()
# Restrict solver output
options['print_level'] = 1
solver = options['solver']
options[solver]['print_level'] = 1
return options
......@@ -1289,8 +1289,12 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
def solver_options(self):
options = super(CollocatedIntegratedOptimizationProblem,
solver = options['solver']
assert solver in ['bonmin', 'ipopt']
# Set the option in both cases, to avoid one inadvertently remaining in the cache.
options['jac_c_constant'] = 'yes' if self.linear_collocation else 'no'
options[solver]['jac_c_constant'] = 'yes' if self.linear_collocation else 'no'
return options
def integrator_options(self):
......@@ -396,15 +396,19 @@ class GoalProgrammingMixin(OptimizationProblem, metaclass=ABCMeta):
# Call parent
options = super().solver_options()
solver = options['solver']
assert solver in ['bonmin', 'ipopt']
# Make sure constant states, such as min/max timeseries for violation variables,
# are turned into parameters for the final optimization problem.
options['fixed_variable_treatment'] = 'make_parameter'
ipopt_options = options[solver]
ipopt_options['fixed_variable_treatment'] = 'make_parameter'
if not self.goal_programming_options()['mu_reinit']:
options['mu_strategy'] = 'monotone'
options['gather_stats'] = True
ipopt_options['mu_strategy'] = 'monotone'
ipopt_options['gather_stats'] = True
if not self.__first_run:
options['mu_init'] = self.solver_stats['iterations'][
ipopt_options['mu_init'] = self.solver_stats['iterations'][
# Done
......@@ -437,9 +441,10 @@ class GoalProgrammingMixin(OptimizationProblem, metaclass=ABCMeta):
than the specified tolerance. Violated goals are fixed. Use of this option is normally not
The Ipopt barrier parameter ``mu`` is normally re-initialized a every iteration of the goal
programming algorithm, unless mu_reinit is set to ``False``. Use of this option is normally
not required.
When using the default solver (IPOPT), its barrier parameter ``mu`` is
normally re-initialized a every iteration of the goal programming
algorithm, unless mu_reinit is set to ``False``. Use of this option
is normally not required.
If ``fix_minimized_values`` is set to ``True``, goal functions will be set to equal their
optimized values in optimization problems generated during subsequent priorities. Otherwise,
......@@ -79,13 +79,25 @@ class OptimizationProblem(metaclass=ABCMeta):
# Iteration callback
iteration_callback = options.pop('iteration_callback', None)
nlpsol_options = {my_solver: options}
# CasADi solver to use
casadi_solver = options.pop('casadi_solver')
if isinstance(casadi_solver, str):
casadi_solver = getattr(ca, casadi_solver)
nlpsol_options = {**options}
if self.__mixed_integer:
nlpsol_options['discrete'] = discrete
if iteration_callback:
nlpsol_options['iteration_callback'] = iteration_callback
solver = ca.nlpsol('nlp', my_solver, nlp, nlpsol_options)
# Remove ipopt and bonmin defaults if they are not used
if my_solver != 'ipopt':
nlpsol_options.pop('ipopt', None)
if my_solver != 'bonmin':
nlpsol_options.pop('bonmin', None)
solver = casadi_solver('nlp', my_solver, nlp, nlpsol_options)
# Solve NLP"Calling solver")
......@@ -97,29 +109,14 @@ class OptimizationProblem(metaclass=ABCMeta):
self.__solver_output = np.array(results['x'])
self.__solver_stats = solver.stats()
# Get the return status
successful_retvals = ['Solve_Succeeded', 'Solved_To_Acceptable_Level', 'User_Requested_Stop', 'SUCCESS']
if self.__solver_stats['return_status'] in successful_retvals:"Solver succeeded with status {}".format(
success, log_level = self.solver_success(self.__solver_stats, log_solver_failure_as_error)
success = True
elif self.__solver_stats['return_status'] in ['Not_Enough_Degrees_Of_Freedom']:
logger.warning("Solver failed with status {}".format(
if success:
logger.log(log_level, "Solver succeeded with status {}".format(
success = False
if log_solver_failure_as_error:
logger.error("Solver failed with status {}".format(
# In this case we expect some higher level process to deal
# with the solver failure, so we only log it as info here."Solver failed with status {}".format(
success = False
logger.log(log_level, "Solver failed with status {}".format(
# Do any postprocessing
if postprocessing:
......@@ -159,21 +156,65 @@ class OptimizationProblem(metaclass=ABCMeta):
The default solver for continuous problems is `Ipopt <>`_.
The default solver for mixed integer problems is `Bonmin <>`_.
:returns: A dictionary of CasADi :class:`NlpSolver` options. See the CasADi,
Ipopt, and Bonmin documentation for details.
:returns: A dictionary of solver options. See the CasADi and
respective solver documentation for details.
options = {'optimized_num_dir': 3}
options = {'optimized_num_dir': 3,
'casadi_solver': ca.nlpsol}
if self.__mixed_integer:
options['solver'] = 'bonmin'
options['algorithm'] = 'B-BB'
options['nlp_solver'] = 'Ipopt'
options['nlp_log_level'] = 2
options['linear_solver'] = 'mumps'
bonmin_options = options['bonmin'] = {}
bonmin_options['algorithm'] = 'B-BB'
bonmin_options['nlp_solver'] = 'Ipopt'
bonmin_options['nlp_log_level'] = 2
bonmin_options['linear_solver'] = 'mumps'
options['solver'] = 'ipopt'
options['linear_solver'] = 'mumps'
ipopt_options = options['ipopt'] = {}
ipopt_options['linear_solver'] = 'mumps'
return options
def solver_success(self,
solver_stats: Dict[str, Union[str, bool]],
log_solver_failure_as_error: bool) -> Tuple[bool, int]:
Translates the returned solver statistics into a boolean and log level
to indicate whether the solve was succesful, and how to log it.
:param solver_stats: Dictionary containing information about the
solver status. See explanation below.
:param log_solver_failure_as_error: Indicates whether a solve failure
Should be logged as an error or info message.
``solver_stats`` typically consist of three fields:
* return_status: ``str``
* secondary_return_status: ``str``
* success: ``bool``
By default we rely on CasADi's interpretation of the return_status
(and secondary status) to the success variable, with an exception for
IPOPT (see below).
The logging level is typically logging.INFO for success, and
logging.ERROR for failure. Only for IPOPT an exception is made for
Not_Enough_Degrees_Of_Freedom, which return logging.WARNING instead.
For example, this can happen when too many goals are specified, and
lower priority goals cannot improve further on the current result.
:returns: A tuple indicating whether or not the solver has succeeded, and what level to log it with.
success = solver_stats['success']
log_level = logging.INFO if success else logging.ERROR
if (self.solver_options()['solver'].lower() in ['bonmin', 'ipopt']
and solver_stats['return_status'] in ['Not_Enough_Degrees_Of_Freedom']):
log_level = logging.WARNING
return success, log_level
def solver_input(self) -> ca.MX:
import logging
import numpy as np
from test_case import TestCase
from .test_modelica_mixin import TestProblemAlgebraic
logger = logging.getLogger("rtctools")
class TestProblemCLP(TestProblemAlgebraic):
def solver_options(self):
options = super().solver_options()
options['solver'] = 'clp'
options['casadi_solver'] = 'qpsol'
return options
class TestSolverCLP(TestCase):
def setUp(self):
self.problem = TestProblemCLP()
self.results = self.problem.extract_results()
self.tolerance = 1e-6
def test_solver_clp(self):
self.assertAlmostEqual(self.results['y'] + self.results['u'],
np.ones(len(self.problem.times())) * 1.0,
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment