19
19
20
20
#the rankine cycle is a directory above this one, so modify path
21
21
import json
22
- import time
23
22
from functools import partial
24
23
import numpy as np
25
24
import pandas as pd
51
50
# path for folder that has surrogate models
52
51
re_nn_dir = Path ("/Users/dguittet/Projects/Dispatches/NN_models/steady_state_new" )
53
52
54
- # load scaling and bounds for NN surrogates (rev and # of startups)
55
-
56
- with open (re_nn_dir / "dispatch_frequency" / "static_clustering_wind_pmax.pkl" , 'rb' ) as f :
57
- model = pickle .load (f )
58
- centers = model .cluster_centers_
59
- dispatch_clusters_mean = centers [:, 0 ]
60
- pem_clusters_mean = centers [:, 1 ]
61
- resource_clusters_mean = centers [:, 2 ]
62
-
63
- with open (re_nn_dir / "revenue" / "RE_revenue_params_3layers.json" , 'rb' ) as f :
64
- rev_data = json .load (f )
65
-
66
- # load keras neural networks
67
- # Input variables are PEM bid price, PEM MW, Reserve Factor and Load Shed Price
68
- nn_rev = keras .models .load_model (re_nn_dir / "revenue" / "RE_revenue_3layers" )
69
- nn_dispatch = keras .models .load_model (re_nn_dir / "dispatch_frequency" / "ss_surrogate_model_wind_pmax" )
70
-
71
- with open (re_nn_dir / "dispatch_frequency" / "ss_surrogate_param_wind_pmax.json" , 'r' ) as f :
72
- dispatch_data = json .load (f )
73
-
74
- # load keras models and create OMLT NetworkDefinition objects
75
- #Revenue model definition
76
- input_bounds_rev = {i :(rev_data ['xmin' ][i ],rev_data ['xmax' ][i ]) for i in range (len (rev_data ['xmin' ]))}
77
- scaling_object_rev = omlt .OffsetScaling (offset_inputs = rev_data ['xm_inputs' ],
78
- factor_inputs = rev_data ['xstd_inputs' ],
79
- offset_outputs = [rev_data ['y_mean' ]],
80
- factor_outputs = [rev_data ['y_std' ]])
81
- net_rev_defn = load_keras_sequential (nn_rev ,scaling_object_rev ,input_bounds_rev )
82
-
83
- # the dispatch frequency surrogate
84
- input_bounds_dispatch = {i :(dispatch_data ['xmin' ][i ],dispatch_data ['xmax' ][i ]) for i in range (len (dispatch_data ['xmin' ]))}
85
- scaling_object_dispatch = omlt .OffsetScaling (offset_inputs = dispatch_data ['xm_inputs' ],
86
- factor_inputs = dispatch_data ['xstd_inputs' ],
87
- offset_outputs = dispatch_data ['ws_mean' ],
88
- factor_outputs = dispatch_data ['ws_std' ])
89
- net_frequency_defn = load_keras_sequential (nn_dispatch ,scaling_object_dispatch ,input_bounds_dispatch )
90
-
91
-
92
- def conceptual_design_dynamic_RE (input_params , num_rep_days , PEM_bid = None , PEM_MW = None , verbose = False ):
53
+ def load_surrogate_model (re_nn_dir ):
54
+ # load scaling and bounds for NN surrogates (rev and # of startups)
55
+ with open (re_nn_dir / "dispatch_frequency" / "static_clustering_wind_pmax.pkl" , 'rb' ) as f :
56
+ model = pickle .load (f )
57
+ centers = model .cluster_centers_
58
+ dispatch_clusters_mean = centers [:, 0 ]
59
+ pem_clusters_mean = centers [:, 1 ]
60
+ resource_clusters_mean = centers [:, 2 ]
61
+
62
+ with open (re_nn_dir / "revenue" / "RE_revenue_params_3layers.json" , 'rb' ) as f :
63
+ rev_data = json .load (f )
64
+
65
+ # load keras neural networks
66
+ # Input variables are PEM bid price, PEM MW, Reserve Factor and Load Shed Price
67
+ nn_rev = keras .models .load_model (re_nn_dir / "revenue" / "RE_revenue_3layers" )
68
+ nn_dispatch = keras .models .load_model (re_nn_dir / "dispatch_frequency" / "ss_surrogate_model_wind_pmax" )
69
+
70
+ with open (re_nn_dir / "dispatch_frequency" / "ss_surrogate_param_wind_pmax.json" , 'r' ) as f :
71
+ dispatch_data = json .load (f )
72
+
73
+ # load keras models and create OMLT NetworkDefinition objects
74
+ #Revenue model definition
75
+ input_bounds_rev = {i :(rev_data ['xmin' ][i ],rev_data ['xmax' ][i ]) for i in range (len (rev_data ['xmin' ]))}
76
+ scaling_object_rev = omlt .OffsetScaling (offset_inputs = rev_data ['xm_inputs' ],
77
+ factor_inputs = rev_data ['xstd_inputs' ],
78
+ offset_outputs = [rev_data ['y_mean' ]],
79
+ factor_outputs = [rev_data ['y_std' ]])
80
+ net_rev_defn = load_keras_sequential (nn_rev ,scaling_object_rev ,input_bounds_rev )
81
+
82
+ # the dispatch frequency surrogate
83
+ input_bounds_dispatch = {i :(dispatch_data ['xmin' ][i ],dispatch_data ['xmax' ][i ]) for i in range (len (dispatch_data ['xmin' ]))}
84
+ scaling_object_dispatch = omlt .OffsetScaling (offset_inputs = dispatch_data ['xm_inputs' ],
85
+ factor_inputs = dispatch_data ['xstd_inputs' ],
86
+ offset_outputs = dispatch_data ['ws_mean' ],
87
+ factor_outputs = dispatch_data ['ws_std' ])
88
+ net_frequency_defn = load_keras_sequential (nn_dispatch ,scaling_object_dispatch ,input_bounds_dispatch )
89
+ return net_rev_defn , net_frequency_defn , dispatch_clusters_mean , pem_clusters_mean , resource_clusters_mean
90
+
91
+ def conceptual_design_dynamic_RE (input_params , PEM_bid = None , PEM_MW = None , verbose = False ):
92
+
93
+ net_rev_defn , net_frequency_defn , dispatch_clusters_mean , pem_clusters_mean , resource_clusters_mean = load_surrogate_model (re_nn_dir )
94
+
95
+ num_rep_days = len (dispatch_clusters_mean )
93
96
94
97
m = ConcreteModel (name = 'RE_Conceptual_Design_full_surrogates' )
95
98
@@ -275,7 +278,7 @@ def record_result(m, num_rep_days):
275
278
276
279
277
280
def run_design (PEM_bid = None , PEM_size = None ):
278
- model = conceptual_design_dynamic_RE (default_input_params , num_rep_days = n_rep_days , PEM_bid = PEM_bid , PEM_MW = PEM_size , verbose = False )
281
+ model = conceptual_design_dynamic_RE (default_input_params , PEM_bid = PEM_bid , PEM_MW = PEM_size , verbose = False )
279
282
nlp_solver = SolverFactory ('ipopt' )
280
283
nlp_solver .options ['max_iter' ] = 8000
281
284
nlp_solver .options ['acceptable_tol' ] = 1e-8
@@ -320,9 +323,6 @@ def run_design(PEM_bid=None, PEM_size=None):
320
323
"pem_var_cost" : pem_var_cost
321
324
}
322
325
323
- start_time = time .time ()
324
- n_rep_days = centers .shape [0 ]
325
-
326
326
327
327
if __name__ == "__main__" :
328
328
# result = run_design()
@@ -339,4 +339,4 @@ def run_design(PEM_bid=None, PEM_size=None):
339
339
res = p .starmap (run_design , inputs )
340
340
341
341
df = pd .DataFrame (res )
342
- df .to_csv ("surrogate_results_ss.csv" )
342
+ # df.to_csv("surrogate_results_ss.csv")
0 commit comments