Rosenbrock banana

Here, we perform optimization for the Rosenbrock banana function, which does not require an AMICI model. In particular, we try several ways of specifying derivative information.

[1]:
import pypesto
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

%matplotlib inline

Define the objective and problem

[2]:
# first type of objective
# get the optimizer trace
objective_options = pypesto.ObjectiveOptions(trace_record=True,
                                             trace_all=False,
                                             trace_save_iter=1)
objective1 = pypesto.Objective(fun=sp.optimize.rosen,
                               grad=sp.optimize.rosen_der,
                               hess=sp.optimize.rosen_hess,
                               options=objective_options)

# second type of objective
def rosen2(x):
    return sp.optimize.rosen(x), sp.optimize.rosen_der(x), sp.optimize.rosen_hess(x)
objective2 = pypesto.Objective(fun=rosen2, grad=True, hess=True)

dim_full = 10
lb = -5 * np.ones((dim_full, 1))
ub = 5 * np.ones((dim_full, 1))

problem1 = pypesto.Problem(objective=objective1, lb=lb, ub=ub)
problem2 = pypesto.Problem(objective=objective2, lb=lb, ub=ub)

Illustration

[3]:
x = np.arange(-2, 2, 0.1)
y = np.arange(-2, 2, 0.1)
x, y = np.meshgrid(x, y)
z = np.zeros_like(x)
for j in range(0, x.shape[0]):
    for k in range(0, x.shape[1]):
        z[j,k] = objective1([x[j,k], y[j,k]], (0,))
[4]:
fig = plt.figure()
fig.set_size_inches(*(14,10))
ax = plt.axes(projection='3d')
ax.plot_surface(X=x, Y=y, Z=z)
plt.xlabel('x axis')
plt.ylabel('y axis')
ax.set_title('cost function values')
[4]:
Text(0.5, 0.92, 'cost function values')
../_images/example_rosenbrock_7_1.png

Run optimization

[5]:
# create different optimizers
optimizer_bfgs = pypesto.ScipyOptimizer(method='l-bfgs-b')
optimizer_tnc = pypesto.ScipyOptimizer(method='TNC')
optimizer_dogleg = pypesto.ScipyOptimizer(method='dogleg')

# set number of starts
n_starts = 20

# Run optimizaitons for different optimzers
result1_bfgs = pypesto.minimize(problem=problem1, optimizer=optimizer_bfgs, n_starts=n_starts)
result1_tnc = pypesto.minimize(problem=problem1, optimizer=optimizer_tnc, n_starts=n_starts)
result1_dogleg = pypesto.minimize(problem=problem1, optimizer=optimizer_dogleg, n_starts=n_starts)

# Optimize second type of objective
result2 = pypesto.minimize(problem=problem2, optimizer=optimizer_tnc, n_starts=n_starts)

Visualize and compare optimization results

[6]:
import pypesto.visualize

# plot separated waterfalls
pypesto.visualize.waterfall(result1_bfgs, size=(15,6))
pypesto.visualize.waterfall(result1_tnc, size=(15,6))
pypesto.visualize.waterfall(result1_dogleg, size=(15,6))
[6]:
<matplotlib.axes._subplots.AxesSubplot at 0x132d1ec10>
../_images/example_rosenbrock_11_1.png
../_images/example_rosenbrock_11_2.png
../_images/example_rosenbrock_11_3.png

We can now have a closer look, which method perfomred better: Let’s first compare bfgs and TNC, since both methods gave good results. How does they fine convergence look like?

[7]:
# plot one list of waterfalls
pypesto.visualize.waterfall([result1_bfgs, result1_tnc],
                            legends=['L-BFGS-B', 'TNC'],
                            start_indices=10,
                            scale_y='lin')
[7]:
<matplotlib.axes._subplots.AxesSubplot at 0x135795e10>
../_images/example_rosenbrock_13_1.png
[8]:
# retrieve second optimum
all_x = result1_bfgs.optimize_result.get_for_key('x')
all_fval = result1_bfgs.optimize_result.get_for_key('fval')
x = all_x[19]
fval = all_fval[19]
print('Second optimum at: ' + str(fval))

# create a reference point from it
ref = {'x': x, 'fval': fval, 'color': [
    0.2, 0.4, 1., 1.], 'legend': 'second optimum'}
ref = pypesto.visualize.create_references(ref)

# new waterfall plot with reference point for second optimum
pypesto.visualize.waterfall(result1_dogleg, size=(15,6),
                            scale_y='lin', y_limits=[-1, 101],
                            reference=ref, colors=[0., 0., 0., 1.])
Second optimum at: 3.986579113637416
[8]:
<matplotlib.axes._subplots.AxesSubplot at 0x135d87450>
../_images/example_rosenbrock_14_2.png

Visualize parameters

There seems to be a second local optimum. We want to see whether it was also found by the dogleg method

[9]:
pypesto.visualize.parameters([result1_bfgs, result1_tnc],
                            legends=['L-BFGS-B', 'TNC'],
                            balance_alpha=False)
pypesto.visualize.parameters(result1_dogleg,
                             legends='dogleg',
                             reference=ref,
                             size=(15,10),
                             start_indices=[0, 1, 2, 3, 4, 5],
                             balance_alpha=False)
[9]:
<matplotlib.axes._subplots.AxesSubplot at 0x136971bd0>
../_images/example_rosenbrock_17_1.png
../_images/example_rosenbrock_17_2.png

Optimizer history

Let’s compare optimzer progress over time.

[10]:
# plot one list of waterfalls
pypesto.visualize.optimizer_history([result1_bfgs, result1_tnc],
                                    legends=['L-BFGS-B', 'TNC'],
                                    reference=ref)
# plot one list of waterfalls
pypesto.visualize.optimizer_history(result1_dogleg,
                                    reference=ref)
[10]:
<matplotlib.axes._subplots.AxesSubplot at 0x136924050>
../_images/example_rosenbrock_20_1.png
../_images/example_rosenbrock_20_2.png

We can also visualize this usign other scalings or offsets…

[11]:
# plot one list of waterfalls
pypesto.visualize.optimizer_history([result1_bfgs, result1_tnc],
                                    legends=['L-BFGS-B', 'TNC'],
                                    reference=ref,
                                    offset_y=0.)

# plot one list of waterfalls
pypesto.visualize.optimizer_history([result1_bfgs, result1_tnc],
                                    legends=['L-BFGS-B', 'TNC'],
                                    reference=ref,
                                    scale_y='lin',
                                    y_limits=[-1., 11.])
[11]:
<matplotlib.axes._subplots.AxesSubplot at 0x132cf5c50>
../_images/example_rosenbrock_22_1.png
../_images/example_rosenbrock_22_2.png

Compute profiles

The profiling routine needs a problem, a results object and an optimizer.

Moreover it accepts an index of integer (profile_index), whether or not a profile should be computed.

Finally, an integer (result_index) can be passed, in order to specify the local optimum, from which profiling should be started.

[12]:
# compute profiles
profile_options = pypesto.ProfileOptions(min_step_size=0.0005,
    delta_ratio_max=0.05,
    default_step_size=0.005,
    ratio_min=0.03)

result1_tnc = pypesto.parameter_profile(
    problem=problem1,
    result=result1_tnc,
    optimizer=optimizer_tnc,
    profile_index=np.array([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0]),
    result_index=0,
    profile_options=profile_options)

# compute profiles from second optimum
result1_tnc = pypesto.parameter_profile(
    problem=problem1,
    result=result1_tnc,
    optimizer=optimizer_tnc,
    profile_index=np.array([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0]),
    result_index=19,
    profile_options=profile_options)

Visualize and analyze results

pypesto offers easy-to-use visualization routines:

[13]:
# specify the parameters, for which profiles should be computed
ax = pypesto.visualize.profiles(result1_tnc, profile_indices = [0,1,2,5],
                           reference=ref, profile_list_id=0)
# plot profiles again, now from second optimum
ax = pypesto.visualize.profiles(result1_tnc, profile_indices = [0,1,2,5],
                           reference=ref, profile_list_id=1)
../_images/example_rosenbrock_28_0.png
../_images/example_rosenbrock_28_1.png

If the result needs to be examined in more detail, it can easily be exported as a pandas.DataFrame:

[14]:
result1_tnc.optimize_result.as_dataframe(['fval', 'n_fval', 'n_grad',
                                          'n_hess', 'n_res', 'n_sres', 'time'])
[14]:
fval n_fval n_grad n_hess n_res n_sres time
0 6.209909e-13 182 182 0 0 0 3.018615
1 3.347752e-12 201 201 0 0 0 3.129823
2 6.678543e-12 199 199 0 0 0 3.283912
3 7.053040e-12 220 220 0 0 0 4.203216
4 3.774925e-11 108 108 0 0 0 1.765140
5 3.801535e-11 189 189 0 0 0 4.939764
6 4.257708e-11 215 215 0 0 0 3.341858
7 6.523019e-11 184 184 0 0 0 2.943149
8 7.130051e-11 183 183 0 0 0 4.247466
9 1.235387e-10 216 216 0 0 0 3.459202
10 3.527061e-10 195 195 0 0 0 3.148446
11 3.810705e-10 211 211 0 0 0 3.243232
12 1.265625e-09 189 189 0 0 0 3.081982
13 3.046493e-09 165 165 0 0 0 2.597345
14 1.748569e-08 186 186 0 0 0 2.902532
15 2.042323e-08 159 159 0 0 0 2.793006
16 2.076666e-08 205 205 0 0 0 3.255512
17 3.986579e+00 173 173 0 0 0 2.814030
18 3.986579e+00 172 172 0 0 0 2.740488
19 3.986579e+00 199 199 0 0 0 3.038311