Commit 19ee78df authored by Jorn Baayen's avatar Jorn Baayen

Merge branch 'homotopy'

parents 8f21bb1b 37424771
......@@ -14,3 +14,4 @@ Contents:
optimization/lookup_tables
optimization/multi_objective
optimization/forecast_uncertainty
optimization/homotopy
Homotopy
========
.. autoclass:: rtctools.optimization.homotopy_mixin.HomotopyMixin
:members: homotopy_options
:show-inheritance:
\ No newline at end of file
......@@ -19,13 +19,13 @@ logger = logging.getLogger("rtctools")
class Goal(object):
"""
Base class for lexicographic goal programming goals.
Base class for lexicographic goal programming goals.
A goal is defined by overriding the :func:`function` method, and setting at least the
``function_range`` class variable.
:cvar function_range: Range of goal function. *Required*.
:cvar function_nominal: Nominal value of function. Used for scaling. Default is ``1``.
:cvar function_nominal: Nominal value of function. Used for scaling. Default is ``1``.
:cvar target_min: Desired lower bound for goal function. Default is ``numpy.nan``.
:cvar target_max: Desired upper bound for goal function. Default is ``numpy.nan``.
:cvar priority: Integer priority of goal. Default is ``1``.
......@@ -34,7 +34,7 @@ class Goal(object):
:cvar critical: If ``True``, the algorithm will abort if this goal cannot be fully met. Default is ``False``.
The target bounds indicate the range within the function should stay, *if possible*. Goals
are, in that sense, *soft*, as opposed to standard hard constraints.
are, in that sense, *soft*, as opposed to standard hard constraints.
Four types of goals can be created:
......@@ -62,9 +62,9 @@ class Goal(object):
m \\leq f \\leq M
Lower priority goals take precedence over higher priority goals.
Lower priority goals take precedence over higher priority goals.
Goals with the same priority are weighted off against each other in a
Goals with the same priority are weighted off against each other in a
single objective function.
The goal violation value is taken to the order'th power in the objective function of the final
......@@ -93,7 +93,7 @@ class Goal(object):
target_min = 1.1
priority = 2
Note that for path goals, the ensemble member index is not passed to the call
Note that for path goals, the ensemble member index is not passed to the call
to :func:`OptimizationProblem.state`. This call returns a time-independent symbol
that is also independent of the active ensemble member. Path goals are
applied to all times and all ensemble members simultaneously.
......@@ -107,7 +107,7 @@ class Goal(object):
"""
This method returns a CasADi :class:`MX` object describing the goal function.
:returns: A CasADi :class:`MX` object.
:returns: A CasADi :class:`MX` object.
"""
pass
......@@ -291,13 +291,13 @@ class GoalProgrammingMixin(OptimizationProblem):
+---------------------------+-----------+---------------+
| ``violation_tolerance`` | ``float`` | ``1.0`` |
+---------------------------+-----------+---------------+
| ``mu_reinit`` | ``bool`` | ``True`` |
| ``mu_reinit`` | ``bool`` | ``True`` |
+---------------------------+-----------+---------------+
Constraints generated by the goal programming algorithm are relaxed by applying the specified relaxation.
Use of this option is normally not required.
A goal is considered to be violated if the violation, scaled between 0 and 1, is greater than the specified tolerance.
A goal is considered to be violated if the violation, scaled between 0 and 1, is greater than the specified tolerance.
Violated goals are fixed. Use of this option is normally not required.
The Ipopt barrier parameter ``mu`` is normally re-initialized a every iteration of the goal programming algorithm, unless
......@@ -569,7 +569,7 @@ class GoalProgrammingMixin(OptimizationProblem):
self._subproblem_path_constraints[ensemble_member][
goal.get_dependency_key()] = new_constraints
def optimize(self):
def optimize(self, preprocessing=True, postprocessing=True):
# Do pre-processing
self.pre()
......
# cython: embedsignature=True
from rtctools.optimization.optimization_problem import OptimizationProblem
from rtctools.optimization.timeseries import Timeseries
import logging
logger = logging.getLogger("rtctools")
class HomotopyMixin(OptimizationProblem):
"""
Adds homotopy to your optimization problem. A homotopy is a continuous transformation between
two optimization problems, parametrized by a single parameter :math:`\\theta \\in [0, 1]`.
Homotopy may be used to solve non-convex optimization problems, by starting with a convex
approximation at :math:`\\theta = 0.0` and ending with the non-convex problem at
:math:`\\theta = 1.0`.
.. note::
It is advised to look for convex reformulations of your problem, before resorting to a use
of the (potentially expensive) homotopy process.
"""
def seed(self, ensemble_member):
if self._theta == 0.0:
seed = super(HomotopyMixin, self).seed(ensemble_member)
else:
# Seed with previous results
seed = {}
for key in self._results[ensemble_member].keys():
seed[key] = Timeseries(self.times(key), self._results[
ensemble_member][key])
return seed
def parameters(self, ensemble_member):
parameters = super(HomotopyMixin, self).parameters(ensemble_member)
options = self.homotopy_options()
parameters[options['homotopy_parameter']] = self._theta
return parameters
def homotopy_options(self):
"""
Returns a dictionary of options controlling the homotopy process.
+------------------------+------------+---------------+
| Option | Type | Default value |
+========================+============+===============+
| ``delta_theta_0`` | ``float`` | ``1.0`` |
+------------------------+------------+---------------+
| ``delta_theta_min`` | ``float`` | ``0.01`` |
+------------------------+------------+---------------+
| ``homotopy_parameter`` | ``string`` | ``theta`` |
+------------------------+------------+---------------+
The homotopy process is controlled by the homotopy parameter in the model, specified
by the option ``homotopy_parameter``. The homotopy parameter is initialized to ``0.0``,
and increases to a value of ``1.0`` with a dynamically changing step size. This step
size is initialized with the value of the option ``delta_theta_0``. If this step
size is too large, i.e., if the problem with the increased homotopy parameter fails to
converge, the step size is halved. The process of halving terminates when the step size falls
below the minimum value specified by the option ``delta_theta_min``.
:returns: A dictionary of homotopy options.
"""
return {'delta_theta_0' : 1.0,
'delta_theta_min' : 0.01,
'homotopy_parameter': 'theta'}
def optimize(self, preprocessing=True, postprocessing=True):
# Pre-processing
self.pre()
# Homotopy loop
self._theta = 0.0
options = self.homotopy_options()
delta_theta = options['delta_theta_0']
while self._theta <= 1.0:
logger.info("Solving with homotopy parameter theta = {}.".format(self._theta))
success = super(HomotopyMixin, self).optimize(preprocessing=False, postprocessing=False)
if success:
self._results = [self.extract_results(ensemble_member) for ensemble_member in range(self.ensemble_size)]
else:
if self._theta == 0.0:
break
self._theta -= delta_theta
delta_theta /= 2
if delta_theta < options['delta_theta_min']:
break
self._theta += delta_theta
# Post-processing
self.post()
return success
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment