forked from quaquel/EMAworkbench
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathoptimization_lake_model_dps.py
183 lines (150 loc) · 5.11 KB
/
optimization_lake_model_dps.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
"""
This example replicates Quinn, J.D., Reed, P.M., Keller, K. (2017)
Direct policy search for robust multi-objective management of deeply
uncertain socio-ecological tipping points. Environmental Modelling &
Software 92, 125-141.
It also show cases how the workbench can be used to apply the MORDM extension
suggested by Watson, A.A., Kasprzyk, J.R. (2017) Incorporating deeply uncertain
factors into the many objective search process. Environmental Modelling &
Software 89, 159-171.
"""
import math
import numpy as np
from scipy.optimize import brentq
from ema_workbench import (
Model,
RealParameter,
ScalarOutcome,
Constant,
ema_logging,
MultiprocessingEvaluator,
CategoricalParameter,
Scenario,
)
from ema_workbench.em_framework.optimization import ArchiveLogger, EpsilonProgress
# Created on 1 Jun 2017
#
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
def get_antropogenic_release(xt, c1, c2, r1, r2, w1):
"""
Parameters
----------
xt : float
pollution in lake at time t
c1 : float
center rbf 1
c2 : float
center rbf 2
r1 : float
ratius rbf 1
r2 : float
ratius rbf 2
w1 : float
weight of rbf 1
note:: w2 = 1 - w1
"""
rule = w1 * (abs(xt - c1) / r1) ** 3 + (1 - w1) * (abs(xt - c2) / r2) ** 3
at1 = max(rule, 0.01)
at = min(at1, 0.1)
return at
def lake_problem(
b=0.42, # decay rate for P in lake (0.42 = irreversible)
q=2.0, # recycling exponent
mean=0.02, # mean of natural inflows
stdev=0.001, # future utility discount rate
delta=0.98, # standard deviation of natural inflows
alpha=0.4, # utility from pollution
nsamples=100, # Monte Carlo sampling of natural inflows
myears=1, # the runtime of the simulation model
c1=0.25,
c2=0.25,
r1=0.5,
r2=0.5,
w1=0.5,
):
Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5)
X = np.zeros(myears)
average_daily_P = np.zeros(myears)
reliability = 0.0
inertia = 0
utility = 0
for _ in range(nsamples):
X[0] = 0.0
decision = 0.1
decisions = np.zeros(myears)
decisions[0] = decision
natural_inflows = np.random.lognormal(
math.log(mean**2 / math.sqrt(stdev**2 + mean**2)),
math.sqrt(math.log(1.0 + stdev**2 / mean**2)),
size=myears,
)
for t in range(1, myears):
# here we use the decision rule
decision = get_antropogenic_release(X[t - 1], c1, c2, r1, r2, w1)
decisions[t] = decision
X[t] = (
(1 - b) * X[t - 1]
+ X[t - 1] ** q / (1 + X[t - 1] ** q)
+ decision
+ natural_inflows[t - 1]
)
average_daily_P[t] += X[t] / nsamples
reliability += np.sum(X < Pcrit) / (nsamples * myears)
inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears)
utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples
max_P = np.max(average_daily_P)
return max_P, utility, inertia, reliability
if __name__ == "__main__":
ema_logging.log_to_stderr(ema_logging.INFO)
# instantiate the model
lake_model = Model("lakeproblem", function=lake_problem)
# specify uncertainties
lake_model.uncertainties = [
RealParameter("b", 0.1, 0.45),
RealParameter("q", 2.0, 4.5),
RealParameter("mean", 0.01, 0.05),
RealParameter("stdev", 0.001, 0.005),
RealParameter("delta", 0.93, 0.99),
]
# set levers
lake_model.levers = [
RealParameter("c1", -2, 2),
RealParameter("c2", -2, 2),
RealParameter("r1", 0, 2),
RealParameter("r2", 0, 2),
CategoricalParameter("w1", np.linspace(0, 1, 10)),
]
# specify outcomes
lake_model.outcomes = [
ScalarOutcome("max_P", kind=ScalarOutcome.MINIMIZE),
ScalarOutcome("utility", kind=ScalarOutcome.MAXIMIZE),
ScalarOutcome("inertia", kind=ScalarOutcome.MAXIMIZE),
ScalarOutcome("reliability", kind=ScalarOutcome.MAXIMIZE),
]
# override some of the defaults of the model
lake_model.constants = [
Constant("alpha", 0.41),
Constant("nsamples", 100),
Constant("myears", 100),
]
# reference is optional, but can be used to implement search for
# various user specified scenarios along the lines suggested by
# Watson and Kasprzyk (2017)
reference = Scenario("reference", b=0.4, q=2, mean=0.02, stdev=0.01)
convergence_metrics = [
ArchiveLogger(
"./data",
[l.name for l in lake_model.levers],
[o.name for o in lake_model.outcomes],
base_filename="lake_model_dps_archive.tar.gz",
),
EpsilonProgress(),
]
with MultiprocessingEvaluator(lake_model) as evaluator:
results, convergence = evaluator.optimize(
searchover="levers",
nfe=100000,
epsilons=[0.1] * len(lake_model.outcomes),
reference=reference,
convergence=convergence_metrics,
)