diff --git a/.gitignore b/.gitignore index b4ecdf8c0..0461e217f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,7 @@ -*pyc +!.gitignore +.* +*.pyc **parse_table_datacmds.py -.idea -.DS_Store gurobi.log switch_model.egg-info/ venv diff --git a/CHANGELOG.md b/CHANGELOG.md index 203dc5195..8f934146e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,29 @@ +------------------------------------------------------------------------------- +Switch 2.0.7 +------------------------------------------------------------------------------- +Note that as of version 5.7, Pyomo no longer supports "inf", "infinity" or "nan" +values in the input files, so Switch will likely report errors if you have +those. + +Changed Pyomo dependencies to versions 5.5.1-6.4.2. Note that Pyomo versions +before 5.7.3 don't work with CPLEX version 12.10 or later (see +https://github.com/Pyomo/pyomo/pull/1792). If using a recent version of CPLEX, +you should ensure that you also use version 5.7.3 or later of Pyomo. + +Renamed outputs/dispatch-wide.csv to outputs/dispatch_wide.csv + ------------------------------------------------------------------------------- Switch 2.0.6 ------------------------------------------------------------------------------- -This release fixes a bug where the fixed costs from all possible build years -were applied each period, instead of only using the build years that are still -in service in the current period. This increased the apparent cost of storage by -approximately (study length) / (storage life). This bug was introduced in -version 2.0.0b3 and persisted through version 2.0.5, so results from earlier -models will need to be updated. +This release fixes a bug where the fixed costs of storage energy capacity (the +MWh part of storage) from all possible build years were mistakenly applied each +period, instead of only using the build years that are still in service in the +current period. This increased the apparent cost of storage by approximately +(study length) / (storage life). This bug was introduced in version 2.0.0b3 and +persisted through version 2.0.5, so results from earlier models will need to be +updated. -This will be the last version of Switch to work in Python 2. It requires at +This will be the last version of Switch to work in Python 2. It requires at least Python 2.7.12 and also works with Python 3. ------------------------------------------------------------------------------- diff --git a/INSTALL.md b/INSTALL.md index 5837d7bc6..63b627ade 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -108,7 +108,7 @@ that they also use to solve models. This supports a number of activities: To work this way, first install Switch as described above (this will install all the Switch dependencies, even though you will later reinstall Switch itself). -Then, in a terminal window or Anaconda command prompt Anaconda command prompt, +Then, in a terminal window or Anaconda command prompt, use the `cd` and `mkdir` commands to create and/or enter the directory where you would like to store the Switch model code and examples. Once you are in that directory, run the following commands (don't type the comments that start with diff --git a/LICENSE b/LICENSE index 733d69919..6dac1daff 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,6 @@ -The Pyomo implementation of the SWITCH model for planning least-cost low-emission electricity grids is licensed under Apache License v2.0 which can be obtained at http://www.apache.org/licenses/LICENSE-2.0 +The Switch model for planning least-cost low-emission electricity grids is +licensed under Apache License v2.0 which can be obtained at +http://www.apache.org/licenses/LICENSE-2.0. Apache License @@ -201,4 +203,4 @@ The Pyomo implementation of the SWITCH model for planning least-cost low-emissio distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/examples/3zone_toy/inputs/balancing_areas.csv b/examples/3zone_toy/inputs/balancing_areas.csv deleted file mode 100644 index b3d2ee48b..000000000 --- a/examples/3zone_toy/inputs/balancing_areas.csv +++ /dev/null @@ -1,3 +0,0 @@ -BALANCING_AREAS,quickstart_res_load_frac,quickstart_res_wind_frac,quickstart_res_solar_frac,spinning_res_load_frac,spinning_res_wind_frac,spinning_res_solar_frac -NorthCentral,0.04,0.05,0.05,0.02,0.05,0.05 -South,0.04,0.06,0.06,0.02,0.07,0.07 diff --git a/examples/3zone_toy/inputs/fuel_supply_curves.csv b/examples/3zone_toy/inputs/fuel_supply_curves.csv index 747e53b66..a799d5e8f 100644 --- a/examples/3zone_toy/inputs/fuel_supply_curves.csv +++ b/examples/3zone_toy/inputs/fuel_supply_curves.csv @@ -1,10 +1,10 @@ regional_fuel_market,period,tier,unit_cost,max_avail_at_cost -All_DistOil,2020,0,21.9802,inf +All_DistOil,2020,0,21.9802,. All_DistOil,2030,0,24.5216,100000.0 All_NG,2020,0,4.4647,1950514555.0 -All_NG,2020,1,5.0709,inf +All_NG,2020,1,5.0709,. All_NG,2030,0,5.925,2368354558.0 -All_NG,2030,1,5.925,inf +All_NG,2030,1,5.925,. North_Bio,2020,0,1.7102,6864985.0 North_Bio,2020,1,3.3941,6782413.0 North_Bio,2030,0,2.0438,6064415.0 diff --git a/examples/3zone_toy/inputs/gen_build_predetermined.csv b/examples/3zone_toy/inputs/gen_build_predetermined.csv index 1dbc91204..fc9182464 100644 --- a/examples/3zone_toy/inputs/gen_build_predetermined.csv +++ b/examples/3zone_toy/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined N-Coal_ST,1995,2 N-Geothermal,2000,1 N-NG_CC,2008,2 diff --git a/examples/3zone_toy/inputs/generation_projects_info.csv b/examples/3zone_toy/inputs/gen_info.csv similarity index 100% rename from examples/3zone_toy/inputs/generation_projects_info.csv rename to examples/3zone_toy/inputs/gen_info.csv diff --git a/examples/3zone_toy/inputs/load_zones.csv b/examples/3zone_toy/inputs/load_zones.csv index af45f83ac..542f08220 100644 --- a/examples/3zone_toy/inputs/load_zones.csv +++ b/examples/3zone_toy/inputs/load_zones.csv @@ -1,4 +1,4 @@ -LOAD_ZONE,dbid,existing_local_td,local_td_annual_cost_per_mw -North,1,5.5,66406.5 -Central,2,3.5,61663.4 -South,3,9.5,128040.0 +LOAD_ZONE,dbid,existing_local_td,local_td_annual_cost_per_mw,local_td_loss_rate +North,1,5.5,66406.5,0.053 +Central,2,3.5,61663.4,0.053 +South,3,9.5,128040.0,0.053 diff --git a/examples/3zone_toy/inputs/switch_inputs_version.txt b/examples/3zone_toy/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/3zone_toy/inputs/switch_inputs_version.txt +++ b/examples/3zone_toy/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/3zone_toy/inputs/trans_params.csv b/examples/3zone_toy/inputs/trans_params.csv index 6d8350120..b42a53794 100644 --- a/examples/3zone_toy/inputs/trans_params.csv +++ b/examples/3zone_toy/inputs/trans_params.csv @@ -1,2 +1,2 @@ -trans_capital_cost_per_mw_km,trans_lifetime_yrs,trans_fixed_om_fraction,distribution_loss_rate -1000.0,20,0.03,0.053 +trans_capital_cost_per_mw_km,trans_lifetime_yrs,trans_fixed_om_fraction +1000.0,20,0.03 diff --git a/examples/3zone_toy/outputs/total_cost.txt b/examples/3zone_toy/outputs/total_cost.txt index 7db8d1c71..7ae87fd4b 100644 --- a/examples/3zone_toy/outputs/total_cost.txt +++ b/examples/3zone_toy/outputs/total_cost.txt @@ -1 +1 @@ -134733088.429 +134733088.42929107 diff --git a/examples/3zone_toy_stochastic_PySP/PySPInputGenerator.py b/examples/3zone_toy_stochastic_PySP/PySPInputGenerator.py index 26b59969d..5b30f1b8c 100644 --- a/examples/3zone_toy_stochastic_PySP/PySPInputGenerator.py +++ b/examples/3zone_toy_stochastic_PySP/PySPInputGenerator.py @@ -48,6 +48,7 @@ """ from __future__ import print_function + # Inputs directory relative to the location of this script. inputs_dir = "inputs" # ScenarioStructure.dat and RootNode.dat will be saved to a @@ -58,11 +59,10 @@ stage_list = ["Investment", "Operation"] stage_vars = { "Investment": ["BuildGen", "BuildLocalTD", "BuildTx"], - "Operation": ["DispatchGen", "GenFuelUseRate"] + "Operation": ["DispatchGen", "GenFuelUseRate"], } # List of scenario names -scenario_list = [ - "LowFuelCosts", "MediumFuelCosts", "HighFuelCosts"] +scenario_list = ["LowFuelCosts", "MediumFuelCosts", "HighFuelCosts"] ########################################################### @@ -82,6 +82,7 @@ instance = model.load_inputs(inputs_dir=inputs_dir) print("inputs successfully loaded...") + def save_dat_files(): if not os.path.exists(os.path.join(inputs_dir, pysp_subdir)): @@ -92,8 +93,9 @@ def save_dat_files(): dat_file = os.path.join(inputs_dir, pysp_subdir, "RootNode.dat") print("creating and saving {}...".format(dat_file)) - utilities.save_inputs_as_dat(model, instance, save_path=dat_file, - sorted_output=model.options.sorted_output) + utilities.save_inputs_as_dat( + model, instance, save_path=dat_file, sorted_output=model.options.sorted_output + ) ####################### # ScenarioStructure.dat @@ -117,7 +119,7 @@ def save_dat_files(): f.write("param NodeStage := RootNode {}\n".format(stage_list[0])) for s in scenario_list: - f.write(" {scen} {st}\n".format(scen=s,st=stage_list[1])) + f.write(" {scen} {st}\n".format(scen=s, st=stage_list[1])) f.write(";\n\n") f.write("set Children[RootNode] := ") @@ -127,7 +129,7 @@ def save_dat_files(): f.write("param ConditionalProbability := RootNode 1.0") # All scenarios have the same probability in this example - probs = [1.0/len(scenario_list)] * (len(scenario_list) - 1) + probs = [1.0 / len(scenario_list)] * (len(scenario_list) - 1) # The remaining probability is lumped in the last scenario to avoid rounding issues probs.append(1.0 - sum(probs)) for (s, p) in zip(scenario_list, probs): @@ -150,14 +152,16 @@ def write_var_name(f, cname): if hasattr(instance, cname): dimen = getattr(instance, cname).index_set().dimen if dimen == 0: - f.write(" {cn}\n".format(cn=cname)) + f.write(" {cn}\n".format(cn=cname)) else: - indexing = (",".join(["*"]*dimen)) + indexing = ",".join(["*"] * dimen) f.write(" {cn}[{dim}]\n".format(cn=cname, dim=indexing)) else: raise ValueError( - "Variable '{}' is not a component of the model. Did you make a typo?". - format(cname)) + "Variable '{}' is not a component of the model. Did you make a typo?".format( + cname + ) + ) for st in stage_list: f.write("set StageVariables[{}] := \n".format(st)) @@ -171,8 +175,9 @@ def write_var_name(f, cname): f.write(" Operation OperationCost\n") f.write(";") + #################### -if __name__ == '__main__': +if __name__ == "__main__": # If the script is executed on the command line, then the .dat files are created. save_dat_files() diff --git a/examples/3zone_toy_stochastic_PySP/ReferenceModel.py b/examples/3zone_toy_stochastic_PySP/ReferenceModel.py index 04fc67d36..c5a00910d 100644 --- a/examples/3zone_toy_stochastic_PySP/ReferenceModel.py +++ b/examples/3zone_toy_stochastic_PySP/ReferenceModel.py @@ -37,7 +37,7 @@ # Ideally, we would use the main codebase to generate the model, but the # mandatory switch argument parser is interferring with pysp's command line tools -#model = switch_model.solve.main(return_model=True) +# model = switch_model.solve.main(return_model=True) module_list = switch_model.solve.get_module_list(args=None) model = utilities.create_model(module_list, args=[]) @@ -53,14 +53,19 @@ # are nested inside another function in the financials module, they can't # be called from this script. + def calc_tp_costs_in_period(m, t): - return sum( - getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] - for tp_cost in m.Cost_Components_Per_TP) + return sum( + getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] + for tp_cost in m.Cost_Components_Per_TP + ) + + def calc_annual_costs_in_period(m, p): - return sum( - getattr(m, annual_cost)[p] - for annual_cost in m.Cost_Components_Per_Period) + return sum( + getattr(m, annual_cost)[p] for annual_cost in m.Cost_Components_Per_Period + ) + # In the current version of Switch, all annual costs are defined # by First Stage decision variables, such as fixed O&M and capital @@ -73,14 +78,19 @@ def calc_annual_costs_in_period(m, p): # decisions in this example. # Further comments on this are written in the Readme file. -model.InvestmentCost = Expression(rule=lambda m: sum( - calc_annual_costs_in_period(m, p) * m.bring_annual_costs_to_base_year[p] - for p in m.PERIODS)) - -model.OperationCost = Expression(rule=lambda m: - sum( - sum(calc_tp_costs_in_period(m, t) for t in m.TPS_IN_PERIOD[p] - ) * m.bring_annual_costs_to_base_year[p] - for p in m.PERIODS)) +model.InvestmentCost = Expression( + rule=lambda m: sum( + calc_annual_costs_in_period(m, p) * m.bring_annual_costs_to_base_year[p] + for p in m.PERIODS + ) +) + +model.OperationCost = Expression( + rule=lambda m: sum( + sum(calc_tp_costs_in_period(m, t) for t in m.TPS_IN_PERIOD[p]) + * m.bring_annual_costs_to_base_year[p] + for p in m.PERIODS + ) +) print("model successfully loaded...") diff --git a/examples/3zone_toy_stochastic_PySP/inputs/balancing_areas.csv b/examples/3zone_toy_stochastic_PySP/inputs/balancing_areas.csv deleted file mode 100644 index b3d2ee48b..000000000 --- a/examples/3zone_toy_stochastic_PySP/inputs/balancing_areas.csv +++ /dev/null @@ -1,3 +0,0 @@ -BALANCING_AREAS,quickstart_res_load_frac,quickstart_res_wind_frac,quickstart_res_solar_frac,spinning_res_load_frac,spinning_res_wind_frac,spinning_res_solar_frac -NorthCentral,0.04,0.05,0.05,0.02,0.05,0.05 -South,0.04,0.06,0.06,0.02,0.07,0.07 diff --git a/examples/3zone_toy_stochastic_PySP/inputs/gen_build_predetermined.csv b/examples/3zone_toy_stochastic_PySP/inputs/gen_build_predetermined.csv index 1dbc91204..fc9182464 100644 --- a/examples/3zone_toy_stochastic_PySP/inputs/gen_build_predetermined.csv +++ b/examples/3zone_toy_stochastic_PySP/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined N-Coal_ST,1995,2 N-Geothermal,2000,1 N-NG_CC,2008,2 diff --git a/examples/3zone_toy_stochastic_PySP/inputs/generation_projects_info.csv b/examples/3zone_toy_stochastic_PySP/inputs/gen_info.csv similarity index 100% rename from examples/3zone_toy_stochastic_PySP/inputs/generation_projects_info.csv rename to examples/3zone_toy_stochastic_PySP/inputs/gen_info.csv diff --git a/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt b/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt +++ b/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/3zone_toy_stochastic_PySP/pha_bounds_cfg.py b/examples/3zone_toy_stochastic_PySP/pha_bounds_cfg.py index b54d7d840..d891b3de4 100644 --- a/examples/3zone_toy_stochastic_PySP/pha_bounds_cfg.py +++ b/examples/3zone_toy_stochastic_PySP/pha_bounds_cfg.py @@ -4,20 +4,24 @@ # Use this by adding terms like the following to the runph command: # --linearize-nonbinary-penalty-terms=5 --bounds-cfgfile=pha_bounds_cfg.py + def pysp_boundsetter_callback(self, scenario_tree, scenario): - m = scenario._instance # see pyomo/pysp/scenariotree/tree_structure.py + m = scenario._instance # see pyomo/pysp/scenariotree/tree_structure.py # BuildLocalTD for p in m.PERIODS: for lz in m.LOAD_ZONES: - m.BuildLocalTD[lz, p].setub(2 * m.zone_expected_coincident_peak_demand[lz, p]) + m.BuildLocalTD[lz, p].setub( + 2 * m.zone_expected_coincident_peak_demand[lz, p] + ) # Estimate an upper bound of system peak demand for limiting generation unit # & transmission line builds system_wide_peak = {} for p in m.PERIODS: system_wide_peak[p] = sum( - m.zone_expected_coincident_peak_demand[lz, p] for lz in m.LOAD_ZONES) + m.zone_expected_coincident_peak_demand[lz, p] for lz in m.LOAD_ZONES + ) # BuildGen for g, bld_yr in m.NEW_GEN_BLD_YRS: @@ -27,6 +31,7 @@ def pysp_boundsetter_callback(self, scenario_tree, scenario): for tx, bld_yr in m.TRANS_BLD_YRS: m.BuildTx[tx, bld_yr].setub(5 * system_wide_peak[bld_yr]) + # For some reason runph looks for pysp_boundsetter_callback when run in # single-thread mode and ph_boundsetter_callback when called from mpirun with # remote execution via pyro. so we map both names to the same function. diff --git a/examples/3zone_toy_stochastic_PySP/rhosetter.py b/examples/3zone_toy_stochastic_PySP/rhosetter.py index 5d8499472..fd3969e7d 100644 --- a/examples/3zone_toy_stochastic_PySP/rhosetter.py +++ b/examples/3zone_toy_stochastic_PySP/rhosetter.py @@ -18,17 +18,22 @@ from switch_model.utilities import iteritems try: - from pyomo.repn import generate_standard_repn # Pyomo >=5.6 + from pyomo.repn import generate_standard_repn # Pyomo >=5.6 + newPyomo = True except ImportError: - from pyomo.repn import generate_canonical_repn # Pyomo <=5.6 + from pyomo.repn import generate_canonical_repn # Pyomo <=5.6 + newPyomo = False + def ph_rhosetter_callback(ph, scenario_tree, scenario): - # Derive coefficients from active objective - cost_expr = next(scenario._instance.component_data_objects( - Objective, active=True, descend_into=True - )) + # Derive coefficients from active objective + cost_expr = next( + scenario._instance.component_data_objects( + Objective, active=True, descend_into=True + ) + ) set_rho_values(ph, scenario_tree, scenario, cost_expr) @@ -57,8 +62,7 @@ def set_rho_values(ph, scenario_tree, scenario, cost_expr): cost_coefficients = {} var_names = {} - for (variable, coef) in \ - zip(standard_repn.linear_vars, standard_repn.linear_coefs): + for (variable, coef) in zip(standard_repn.linear_vars, standard_repn.linear_coefs): variable_id = symbol_map.getSymbol(variable) cost_coefficients[variable_id] = coef var_names[variable_id] = variable.name @@ -72,11 +76,13 @@ def set_rho_values(ph, scenario_tree, scenario, cost_expr): tree_node, scenario, variable_id, - cost_coefficients[variable_id] * rho_coefficient) + cost_coefficients[variable_id] * rho_coefficient, + ) set_rho = True break if not set_rho: print( - "Warning! Could not find tree node for variable {}; rho not set." - .format(var_names[variable_id]) + "Warning! Could not find tree node for variable {}; rho not set.".format( + var_names[variable_id] + ) ) diff --git a/examples/3zone_toy_stochastic_PySP/rhosetter_FS_only.py b/examples/3zone_toy_stochastic_PySP/rhosetter_FS_only.py index 7b06dab14..5b86c1bef 100644 --- a/examples/3zone_toy_stochastic_PySP/rhosetter_FS_only.py +++ b/examples/3zone_toy_stochastic_PySP/rhosetter_FS_only.py @@ -25,6 +25,7 @@ # The rhosetter module should be in the same directory as this file. from rhosetter import set_rho_values + def ph_rhosetter_callback(ph, scenario_tree, scenario): # This component name must match the expression used for first stage # costs defined in the ReferenceModel. diff --git a/examples/carbon_cap/inputs/balancing_areas.csv b/examples/carbon_cap/inputs/balancing_areas.csv deleted file mode 100644 index b3d2ee48b..000000000 --- a/examples/carbon_cap/inputs/balancing_areas.csv +++ /dev/null @@ -1,3 +0,0 @@ -BALANCING_AREAS,quickstart_res_load_frac,quickstart_res_wind_frac,quickstart_res_solar_frac,spinning_res_load_frac,spinning_res_wind_frac,spinning_res_solar_frac -NorthCentral,0.04,0.05,0.05,0.02,0.05,0.05 -South,0.04,0.06,0.06,0.02,0.07,0.07 diff --git a/examples/carbon_cap/inputs/fuel_supply_curves.csv b/examples/carbon_cap/inputs/fuel_supply_curves.csv index 747e53b66..a799d5e8f 100644 --- a/examples/carbon_cap/inputs/fuel_supply_curves.csv +++ b/examples/carbon_cap/inputs/fuel_supply_curves.csv @@ -1,10 +1,10 @@ regional_fuel_market,period,tier,unit_cost,max_avail_at_cost -All_DistOil,2020,0,21.9802,inf +All_DistOil,2020,0,21.9802,. All_DistOil,2030,0,24.5216,100000.0 All_NG,2020,0,4.4647,1950514555.0 -All_NG,2020,1,5.0709,inf +All_NG,2020,1,5.0709,. All_NG,2030,0,5.925,2368354558.0 -All_NG,2030,1,5.925,inf +All_NG,2030,1,5.925,. North_Bio,2020,0,1.7102,6864985.0 North_Bio,2020,1,3.3941,6782413.0 North_Bio,2030,0,2.0438,6064415.0 diff --git a/examples/carbon_cap/inputs/gen_build_predetermined.csv b/examples/carbon_cap/inputs/gen_build_predetermined.csv index 1dbc91204..fc9182464 100644 --- a/examples/carbon_cap/inputs/gen_build_predetermined.csv +++ b/examples/carbon_cap/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined N-Coal_ST,1995,2 N-Geothermal,2000,1 N-NG_CC,2008,2 diff --git a/examples/carbon_cap/inputs/generation_projects_info.csv b/examples/carbon_cap/inputs/gen_info.csv similarity index 100% rename from examples/carbon_cap/inputs/generation_projects_info.csv rename to examples/carbon_cap/inputs/gen_info.csv diff --git a/examples/carbon_cap/inputs/switch_inputs_version.txt b/examples/carbon_cap/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/carbon_cap/inputs/switch_inputs_version.txt +++ b/examples/carbon_cap/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/carbon_cap/outputs/total_cost.txt b/examples/carbon_cap/outputs/total_cost.txt index fe42aeac8..ed027f1e7 100644 --- a/examples/carbon_cap/outputs/total_cost.txt +++ b/examples/carbon_cap/outputs/total_cost.txt @@ -1 +1 @@ -139541670.129 +139541670.12902084 diff --git a/examples/ccs/inputs/gen_build_predetermined.csv b/examples/ccs/inputs/gen_build_predetermined.csv index b8fa3b9bd..cb69b5ed2 100644 --- a/examples/ccs/inputs/gen_build_predetermined.csv +++ b/examples/ccs/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-NG_CC,2000,5 S-Central_PV-1,2000,1 S-Geothermal,1998,1 diff --git a/examples/ccs/inputs/generation_projects_info.csv b/examples/ccs/inputs/gen_info.csv similarity index 100% rename from examples/ccs/inputs/generation_projects_info.csv rename to examples/ccs/inputs/gen_info.csv diff --git a/examples/ccs/inputs/switch_inputs_version.txt b/examples/ccs/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/ccs/inputs/switch_inputs_version.txt +++ b/examples/ccs/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/ccs/outputs/total_cost.txt b/examples/ccs/outputs/total_cost.txt index ae32cd34a..d0bf5c701 100644 --- a/examples/ccs/outputs/total_cost.txt +++ b/examples/ccs/outputs/total_cost.txt @@ -1 +1 @@ -19897963.4619 +19897963.461891923 diff --git a/examples/copperplate0/inputs/gen_build_predetermined.csv b/examples/copperplate0/inputs/gen_build_predetermined.csv index b8fa3b9bd..cb69b5ed2 100644 --- a/examples/copperplate0/inputs/gen_build_predetermined.csv +++ b/examples/copperplate0/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-NG_CC,2000,5 S-Central_PV-1,2000,1 S-Geothermal,1998,1 diff --git a/examples/copperplate0/inputs/generation_projects_info.csv b/examples/copperplate0/inputs/gen_info.csv similarity index 100% rename from examples/copperplate0/inputs/generation_projects_info.csv rename to examples/copperplate0/inputs/gen_info.csv diff --git a/examples/copperplate0/inputs/switch_inputs_version.txt b/examples/copperplate0/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/copperplate0/inputs/switch_inputs_version.txt +++ b/examples/copperplate0/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/copperplate0/outputs/BuildGen.tab b/examples/copperplate0/outputs/BuildGen.tab deleted file mode 100644 index aafa93867..000000000 --- a/examples/copperplate0/outputs/BuildGen.tab +++ /dev/null @@ -1,7 +0,0 @@ -GEN_BLD_YRS_1 GEN_BLD_YRS_2 BuildGen -S-Geothermal 1998 1.0 -S-Central_PV-1 2020 0.0 -S-Central_PV-1 2000 1.0 -S-NG_CC 2000 5.0 -S-NG_CC 2020 2.34276595744681 -S-Geothermal 2020 0.0 diff --git a/examples/copperplate0/outputs/BuildMinGenCap.tab b/examples/copperplate0/outputs/BuildMinGenCap.tab deleted file mode 100644 index c8e45c155..000000000 --- a/examples/copperplate0/outputs/BuildMinGenCap.tab +++ /dev/null @@ -1 +0,0 @@ -NEW_GEN_WITH_MIN_BUILD_YEARS_1 NEW_GEN_WITH_MIN_BUILD_YEARS_2 BuildMinGenCap diff --git a/examples/copperplate0/outputs/DispatchBaseloadByPeriod.tab b/examples/copperplate0/outputs/DispatchBaseloadByPeriod.tab deleted file mode 100644 index 18a41f8e2..000000000 --- a/examples/copperplate0/outputs/DispatchBaseloadByPeriod.tab +++ /dev/null @@ -1,2 +0,0 @@ -DispatchBaseloadByPeriod_index_1 DispatchBaseloadByPeriod_index_2 DispatchBaseloadByPeriod -S-Geothermal 2020 0.5 diff --git a/examples/copperplate0/outputs/DispatchGen.tab b/examples/copperplate0/outputs/DispatchGen.tab deleted file mode 100644 index 6ba424751..000000000 --- a/examples/copperplate0/outputs/DispatchGen.tab +++ /dev/null @@ -1,7 +0,0 @@ -GEN_TPS_1 GEN_TPS_2 DispatchGen -S-Geothermal 2 0.5 -S-Geothermal 1 0.5 -S-NG_CC 1 6.9022 -S-NG_CC 2 0.0 -S-Central_PV-1 2 0.0 -S-Central_PV-1 1 0.5978 diff --git a/examples/copperplate0/outputs/GenFuelUseRate.tab b/examples/copperplate0/outputs/GenFuelUseRate.tab deleted file mode 100644 index 096f1b8bc..000000000 --- a/examples/copperplate0/outputs/GenFuelUseRate.tab +++ /dev/null @@ -1,3 +0,0 @@ -GEN_TP_FUELS_1 GEN_TP_FUELS_2 GEN_TP_FUELS_3 GenFuelUseRate -S-NG_CC 1 NaturalGas 46.279251 -S-NG_CC 2 NaturalGas 0.0 diff --git a/examples/copperplate0/outputs/costs_itemized.csv b/examples/copperplate0/outputs/costs_itemized.csv deleted file mode 100644 index 2645a4aac..000000000 --- a/examples/copperplate0/outputs/costs_itemized.csv +++ /dev/null @@ -1,4 +0,0 @@ -PERIOD,Component,AnnualCost_NPV,AnnualCost_Real,Component_type -2020,TotalGenFixedCosts,11213737.1287,1601083.86234,annual -2020,GenVariableOMCostsInTP,1608195.12658,229616.160528,timepoint -2020,FuelCostsPerTP,5682691.43038,811367.828532,timepoint diff --git a/examples/copperplate0/outputs/dispatch-wide.txt b/examples/copperplate0/outputs/dispatch-wide.txt deleted file mode 100644 index 020a89a8c..000000000 --- a/examples/copperplate0/outputs/dispatch-wide.txt +++ /dev/null @@ -1,3 +0,0 @@ -timestamp S-Central_PV-1 S-Geothermal S-NG_CC -2025011512 0.5978 0.5 6.9022 -2025011600 0 0.5 0 diff --git a/examples/copperplate0/outputs/dispatch.csv b/examples/copperplate0/outputs/dispatch.csv deleted file mode 100644 index d99ff1162..000000000 --- a/examples/copperplate0/outputs/dispatch.csv +++ /dev/null @@ -1,7 +0,0 @@ -generation_project,timestamp,DispatchEmissions_tCO2_per_typical_yr,DispatchGen_MW,Energy_GWh_typical_yr,VariableCost_per_yr,gen_dbid,gen_energy_source,gen_load_zone,gen_tech,period,tp_weight_in_year_hrs -S-Geothermal,2025011600,,0.5,2.1915,63180.945,33,Geothermal,South,Geothermal,2020,4383.0 -S-Geothermal,2025011512,,0.5,2.1915,63180.945,33,Geothermal,South,Geothermal,2020,4383.0 -S-NG_CC,2025011512,10762.7942455,6.9022,30.2523426,103254.270528,34,NaturalGas,South,NG_CC,2020,4383.0 -S-NG_CC,2025011600,0.0,0.0,0.0,0.0,34,NaturalGas,South,NG_CC,2020,4383.0 -S-Central_PV-1,2025011600,,0.0,0.0,0.0,41,Solar,South,Central_PV,2020,4383.0 -S-Central_PV-1,2025011512,,0.5978,2.6201574,0.0,41,Solar,South,Central_PV,2020,4383.0 diff --git a/examples/copperplate0/outputs/dispatch_annual_summary.csv b/examples/copperplate0/outputs/dispatch_annual_summary.csv deleted file mode 100644 index bd42917d1..000000000 --- a/examples/copperplate0/outputs/dispatch_annual_summary.csv +++ /dev/null @@ -1,4 +0,0 @@ -gen_tech,gen_energy_source,period,Energy_GWh_typical_yr,VariableCost_per_yr,DispatchEmissions_tCO2_per_typical_yr -Central_PV,Solar,2020,2.6201574,0.0, -Geothermal,Geothermal,2020,4.383,126361.89, -NG_CC,NaturalGas,2020,30.2523426,103254.270528,10762.7942455 diff --git a/examples/copperplate0/outputs/dispatch_zonal_annual_summary.csv b/examples/copperplate0/outputs/dispatch_zonal_annual_summary.csv deleted file mode 100644 index 886d58705..000000000 --- a/examples/copperplate0/outputs/dispatch_zonal_annual_summary.csv +++ /dev/null @@ -1,4 +0,0 @@ -gen_tech,gen_load_zone,gen_energy_source,period,Energy_GWh_typical_yr,VariableCost_per_yr,DispatchEmissions_tCO2_per_typical_yr -Central_PV,South,Solar,2020,2.6201574,0.0, -Geothermal,South,Geothermal,2020,4.383,126361.89, -NG_CC,South,NaturalGas,2020,30.2523426,103254.270528,10762.7942455 diff --git a/examples/copperplate0/outputs/electricity_cost.csv b/examples/copperplate0/outputs/electricity_cost.csv deleted file mode 100644 index 37d774119..000000000 --- a/examples/copperplate0/outputs/electricity_cost.csv +++ /dev/null @@ -1,2 +0,0 @@ -PERIOD,EnergyCostReal_per_MWh,SystemCostPerPeriod_NPV,SystemCostPerPeriod_Real,SystemDemand_MWh -2020,7.09175249668,18504623.6857,2642067.8514,372555.0 diff --git a/examples/copperplate0/outputs/gen_cap.txt b/examples/copperplate0/outputs/gen_cap.txt deleted file mode 100644 index a6ce4024b..000000000 --- a/examples/copperplate0/outputs/gen_cap.txt +++ /dev/null @@ -1,4 +0,0 @@ -GENERATION_PROJECT PERIOD gen_tech gen_load_zone gen_energy_source GenCapacity GenCapitalCosts GenFixedOMCosts -S-Central_PV-1 2020 Central_PV South Solar 1 227410 41850 -S-NG_CC 2020 NG_CC South NaturalGas 7.34277 832743 43089.6 -S-Geothermal 2020 Geothermal South Geothermal 1 455992 0 diff --git a/examples/copperplate0/outputs/load_balance.txt b/examples/copperplate0/outputs/load_balance.txt deleted file mode 100644 index 0fb2e9455..000000000 --- a/examples/copperplate0/outputs/load_balance.txt +++ /dev/null @@ -1,3 +0,0 @@ -load_zone timestamp ZoneTotalCentralDispatch ZoneTotalDistributedDispatch zone_demand_mw -South 2025011512 8 0 8 -South 2025011600 0.5 0 0.5 diff --git a/examples/copperplate0/outputs/results.pickle b/examples/copperplate0/outputs/results.pickle deleted file mode 100644 index 1184d3901..000000000 Binary files a/examples/copperplate0/outputs/results.pickle and /dev/null differ diff --git a/examples/copperplate0/outputs/total_cost.txt b/examples/copperplate0/outputs/total_cost.txt index 950fc10cc..9d8f915eb 100644 --- a/examples/copperplate0/outputs/total_cost.txt +++ b/examples/copperplate0/outputs/total_cost.txt @@ -1 +1 @@ -18504623.6857 +18504623.68565862 diff --git a/examples/copperplate1/inputs/fuel_supply_curves.csv b/examples/copperplate1/inputs/fuel_supply_curves.csv index ea7bea186..a7f984f35 100644 --- a/examples/copperplate1/inputs/fuel_supply_curves.csv +++ b/examples/copperplate1/inputs/fuel_supply_curves.csv @@ -1,5 +1,5 @@ regional_fuel_market,period,tier,unit_cost,max_avail_at_cost All_NG,2020,0,4.4647,1950514555.0 -All_NG,2020,1,5.0709,inf +All_NG,2020,1,5.0709,. South_Bio,2020,0,1.7115,26427258.0 South_Bio,2020,1,17.1714,486066.0 diff --git a/examples/copperplate1/inputs/gen_build_predetermined.csv b/examples/copperplate1/inputs/gen_build_predetermined.csv index c27ab15b3..a1a9a59f6 100644 --- a/examples/copperplate1/inputs/gen_build_predetermined.csv +++ b/examples/copperplate1/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998,3 S-NG_CC,2000,5 S-NG_GT,1990,3 diff --git a/examples/copperplate1/inputs/generation_projects_info.csv b/examples/copperplate1/inputs/gen_info.csv similarity index 100% rename from examples/copperplate1/inputs/generation_projects_info.csv rename to examples/copperplate1/inputs/gen_info.csv diff --git a/examples/copperplate1/inputs/switch_inputs_version.txt b/examples/copperplate1/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/copperplate1/inputs/switch_inputs_version.txt +++ b/examples/copperplate1/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/copperplate1/outputs/total_cost.txt b/examples/copperplate1/outputs/total_cost.txt index 43fdd86ca..b37c0e30b 100644 --- a/examples/copperplate1/outputs/total_cost.txt +++ b/examples/copperplate1/outputs/total_cost.txt @@ -1 +1 @@ -33700474.3049 +33700474.30486566 diff --git a/examples/custom_extension/inputs/gen_build_predetermined.csv b/examples/custom_extension/inputs/gen_build_predetermined.csv index b8fa3b9bd..cb69b5ed2 100644 --- a/examples/custom_extension/inputs/gen_build_predetermined.csv +++ b/examples/custom_extension/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-NG_CC,2000,5 S-Central_PV-1,2000,1 S-Geothermal,1998,1 diff --git a/examples/custom_extension/inputs/generation_projects_info.csv b/examples/custom_extension/inputs/gen_info.csv similarity index 100% rename from examples/custom_extension/inputs/generation_projects_info.csv rename to examples/custom_extension/inputs/gen_info.csv diff --git a/examples/custom_extension/inputs/switch_inputs_version.txt b/examples/custom_extension/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/custom_extension/inputs/switch_inputs_version.txt +++ b/examples/custom_extension/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/custom_extension/outputs/total_cost.txt b/examples/custom_extension/outputs/total_cost.txt index 46cf7eac1..cc7dc73d9 100644 --- a/examples/custom_extension/outputs/total_cost.txt +++ b/examples/custom_extension/outputs/total_cost.txt @@ -1 +1 @@ -25508464.8913 +25508464.89126843 diff --git a/examples/custom_extension/sunk_costs.py b/examples/custom_extension/sunk_costs.py index 32986070f..133955ad5 100644 --- a/examples/custom_extension/sunk_costs.py +++ b/examples/custom_extension/sunk_costs.py @@ -28,6 +28,6 @@ def define_components(mod): mod.administration_fees = Param( - mod.PERIODS, - initialize=lambda m, p: 1000000) - mod.Cost_Components_Per_Period.append('administration_fees') + mod.PERIODS, initialize=lambda m, p: 1000000, within=Any + ) + mod.Cost_Components_Per_Period.append("administration_fees") diff --git a/examples/diagnose_infeasibility/README.md b/examples/diagnose_infeasibility/README.md new file mode 100644 index 000000000..e27a97e21 --- /dev/null +++ b/examples/diagnose_infeasibility/README.md @@ -0,0 +1,24 @@ +SYNOPSIS + switch solve --verbose --log-run --exclude-module switch_model.balancing.diagnose_infeasibility + switch solve --verbose --log-run + +This case is used to test the diagnose_infeasibility module and possibly to +ensure Switch reports infeasible models correctly with various solvers. + +The diagnose_infeasibility module ignores normal costs and minimizes constraint +violations instead. This model has too little capacity available to meet demand. +To ensure it has a unique constraint-minimizing solution, we include the +transmission.local_td module, which creates losses between the generation node +and the load node. Consequently, constraint violations can be minimized by +violating the load-balancing constraint at the load node, rather than over- +producing upstream. (Various upstream overproduction options would produce the +same amount of violation of the construction-dispatch balance but with +different costs, so different solvers could report different total_cost values, +interfering with the test suite.) + +This model is based on "new_builds_only", but with caps on construction +that make the model infeasible. Geothermal has also been made non-baseload so +it can be ramped down when needed. (In earlier versions, if geothermal was +forced on, it created equal-violation choices between oversupply in one hour vs +undersupply in the other, which have different costs.) + diff --git a/examples/diagnose_infeasibility/inputs/financials.csv b/examples/diagnose_infeasibility/inputs/financials.csv new file mode 100644 index 000000000..a40129d05 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/financials.csv @@ -0,0 +1,2 @@ +base_financial_year,discount_rate,interest_rate +2015,0.05,0.07 diff --git a/examples/diagnose_infeasibility/inputs/fuel_cost.csv b/examples/diagnose_infeasibility/inputs/fuel_cost.csv new file mode 100644 index 000000000..7783a8744 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/fuel_cost.csv @@ -0,0 +1,2 @@ +load_zone,fuel,period,fuel_cost +South,NaturalGas,2020,4 diff --git a/examples/diagnose_infeasibility/inputs/fuels.csv b/examples/diagnose_infeasibility/inputs/fuels.csv new file mode 100644 index 000000000..54dfca062 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/fuels.csv @@ -0,0 +1,2 @@ +fuel,co2_intensity,upstream_co2_intensity +NaturalGas,0.05306,0 diff --git a/examples/diagnose_infeasibility/inputs/gen_build_costs.csv b/examples/diagnose_infeasibility/inputs/gen_build_costs.csv new file mode 100644 index 000000000..844230405 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/gen_build_costs.csv @@ -0,0 +1,3 @@ +GENERATION_PROJECT,build_year,gen_overnight_cost,gen_fixed_om +S-Geothermal,2020,5524200,0.0 +S-Central_PV-1,2020,2334300,41850.0 diff --git a/examples/diagnose_infeasibility/inputs/gen_info.csv b/examples/diagnose_infeasibility/inputs/gen_info.csv new file mode 100644 index 000000000..d9a0532b4 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/gen_info.csv @@ -0,0 +1,4 @@ +GENERATION_PROJECT,gen_dbid,gen_tech,gen_load_zone,gen_connect_cost_per_mw,gen_capacity_limit_mw,gen_variable_om,gen_max_age,gen_min_build_capacity,gen_scheduled_outage_rate,gen_forced_outage_rate,gen_is_variable,gen_is_baseload,gen_is_cogen,gen_energy_source,gen_full_load_heat_rate +S-Geothermal,33,Geothermal,South,134222.0,1.0,28.83,30,0,0.0075,0.0241,0,0,0,Geothermal,. +S-NG_CC,34,NG_CC,South,57566.6,4.0,3.4131,20,0,0.04,0.06,0,0,0,NaturalGas,6.705 +S-Central_PV-1,41,Central_PV,South,74881.9,2.0,0.0,20,0,0.0,0.02,1,0,0,Solar,. diff --git a/examples/diagnose_infeasibility/inputs/load_zones.csv b/examples/diagnose_infeasibility/inputs/load_zones.csv new file mode 100644 index 000000000..9810665f2 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/load_zones.csv @@ -0,0 +1,2 @@ +LOAD_ZONE,cost_multipliers,ccs_distance_km,dbid +South,1,0,3 diff --git a/examples/diagnose_infeasibility/inputs/loads.csv b/examples/diagnose_infeasibility/inputs/loads.csv new file mode 100644 index 000000000..84c59e19a --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/loads.csv @@ -0,0 +1,3 @@ +LOAD_ZONE,TIMEPOINT,zone_demand_mw +South,1,8.0 +South,2,0.5 diff --git a/examples/diagnose_infeasibility/inputs/modules.txt b/examples/diagnose_infeasibility/inputs/modules.txt new file mode 100644 index 000000000..ba060c8f6 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/modules.txt @@ -0,0 +1,14 @@ +# Core Modules +switch_model +switch_model.timescales +switch_model.financials +switch_model.balancing.load_zones +switch_model.energy_sources.properties +switch_model.generators.core.build +switch_model.generators.core.dispatch +switch_model.reporting +# Custom Modules +switch_model.generators.core.no_commit +switch_model.energy_sources.fuel_costs.simple +switch_model.transmission.local_td +switch_model.balancing.diagnose_infeasibility diff --git a/examples/diagnose_infeasibility/inputs/non_fuel_energy_sources.csv b/examples/diagnose_infeasibility/inputs/non_fuel_energy_sources.csv new file mode 100644 index 000000000..c37ec3d8e --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/non_fuel_energy_sources.csv @@ -0,0 +1,3 @@ +energy_source +Solar +Geothermal diff --git a/examples/diagnose_infeasibility/inputs/periods.csv b/examples/diagnose_infeasibility/inputs/periods.csv new file mode 100644 index 000000000..27c58e07f --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/periods.csv @@ -0,0 +1,2 @@ +INVESTMENT_PERIOD,period_start,period_end +2020,2017,2026 diff --git a/examples/diagnose_infeasibility/inputs/switch_inputs_version.txt b/examples/diagnose_infeasibility/inputs/switch_inputs_version.txt new file mode 100644 index 000000000..f1547e6d1 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/switch_inputs_version.txt @@ -0,0 +1 @@ +2.0.7 diff --git a/examples/diagnose_infeasibility/inputs/timepoints.csv b/examples/diagnose_infeasibility/inputs/timepoints.csv new file mode 100644 index 000000000..54d33b02a --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/timepoints.csv @@ -0,0 +1,3 @@ +timepoint_id,timestamp,timeseries +1,2025011512,2020_all +2,2025011600,2020_all diff --git a/examples/diagnose_infeasibility/inputs/timeseries.csv b/examples/diagnose_infeasibility/inputs/timeseries.csv new file mode 100644 index 000000000..14e1fdeb0 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/timeseries.csv @@ -0,0 +1,2 @@ +TIMESERIES,ts_period,ts_duration_of_tp,ts_num_tps,ts_scale_to_period +2020_all,2020,12,2,3652.5 diff --git a/examples/diagnose_infeasibility/inputs/variable_capacity_factors.csv b/examples/diagnose_infeasibility/inputs/variable_capacity_factors.csv new file mode 100644 index 000000000..d8898d1c1 --- /dev/null +++ b/examples/diagnose_infeasibility/inputs/variable_capacity_factors.csv @@ -0,0 +1,3 @@ +GENERATION_PROJECT,timepoint,gen_max_capacity_factor +S-Central_PV-1,1,0.61 +S-Central_PV-1,2,0.0 diff --git a/examples/diagnose_infeasibility/outputs/total_cost.txt b/examples/diagnose_infeasibility/outputs/total_cost.txt new file mode 100644 index 000000000..4970deea9 --- /dev/null +++ b/examples/diagnose_infeasibility/outputs/total_cost.txt @@ -0,0 +1 @@ +8296364.004649366 diff --git a/examples/discrete_and_min_build/inputs/fuel_supply_curves.csv b/examples/discrete_and_min_build/inputs/fuel_supply_curves.csv index 355fd5498..7287aa349 100644 --- a/examples/discrete_and_min_build/inputs/fuel_supply_curves.csv +++ b/examples/discrete_and_min_build/inputs/fuel_supply_curves.csv @@ -1,2 +1,2 @@ regional_fuel_market,period,tier,unit_cost,max_avail_at_cost -All_NG,2020,0,4,inf +All_NG,2020,0,4,. diff --git a/examples/discrete_and_min_build/inputs/gen_build_predetermined.csv b/examples/discrete_and_min_build/inputs/gen_build_predetermined.csv index b8fa3b9bd..cb69b5ed2 100644 --- a/examples/discrete_and_min_build/inputs/gen_build_predetermined.csv +++ b/examples/discrete_and_min_build/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-NG_CC,2000,5 S-Central_PV-1,2000,1 S-Geothermal,1998,1 diff --git a/examples/discrete_and_min_build/inputs/generation_projects_info.csv b/examples/discrete_and_min_build/inputs/gen_info.csv similarity index 100% rename from examples/discrete_and_min_build/inputs/generation_projects_info.csv rename to examples/discrete_and_min_build/inputs/gen_info.csv diff --git a/examples/discrete_and_min_build/inputs/switch_inputs_version.txt b/examples/discrete_and_min_build/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/discrete_and_min_build/inputs/switch_inputs_version.txt +++ b/examples/discrete_and_min_build/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/discrete_and_min_build/outputs/total_cost.txt b/examples/discrete_and_min_build/outputs/total_cost.txt index 2f49a9f88..9d336e752 100644 --- a/examples/discrete_and_min_build/outputs/total_cost.txt +++ b/examples/discrete_and_min_build/outputs/total_cost.txt @@ -1 +1 @@ -30633194.2142 +30633194.214195732 diff --git a/examples/discrete_build/inputs/fuel_supply_curves.csv b/examples/discrete_build/inputs/fuel_supply_curves.csv index 355fd5498..7287aa349 100644 --- a/examples/discrete_build/inputs/fuel_supply_curves.csv +++ b/examples/discrete_build/inputs/fuel_supply_curves.csv @@ -1,2 +1,2 @@ regional_fuel_market,period,tier,unit_cost,max_avail_at_cost -All_NG,2020,0,4,inf +All_NG,2020,0,4,. diff --git a/examples/discrete_build/inputs/gen_build_predetermined.csv b/examples/discrete_build/inputs/gen_build_predetermined.csv index b8fa3b9bd..cb69b5ed2 100644 --- a/examples/discrete_build/inputs/gen_build_predetermined.csv +++ b/examples/discrete_build/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-NG_CC,2000,5 S-Central_PV-1,2000,1 S-Geothermal,1998,1 diff --git a/examples/discrete_build/inputs/generation_projects_info.csv b/examples/discrete_build/inputs/gen_info.csv similarity index 100% rename from examples/discrete_build/inputs/generation_projects_info.csv rename to examples/discrete_build/inputs/gen_info.csv diff --git a/examples/discrete_build/inputs/switch_inputs_version.txt b/examples/discrete_build/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/discrete_build/inputs/switch_inputs_version.txt +++ b/examples/discrete_build/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/discrete_build/outputs/total_cost.txt b/examples/discrete_build/outputs/total_cost.txt index f87d555c3..d27612d00 100644 --- a/examples/discrete_build/outputs/total_cost.txt +++ b/examples/discrete_build/outputs/total_cost.txt @@ -1 +1 @@ -28962382.4039 +28962382.40385321 diff --git a/examples/dr_simple/inputs/fuel_supply_curves.csv b/examples/dr_simple/inputs/fuel_supply_curves.csv index ea7bea186..a7f984f35 100644 --- a/examples/dr_simple/inputs/fuel_supply_curves.csv +++ b/examples/dr_simple/inputs/fuel_supply_curves.csv @@ -1,5 +1,5 @@ regional_fuel_market,period,tier,unit_cost,max_avail_at_cost All_NG,2020,0,4.4647,1950514555.0 -All_NG,2020,1,5.0709,inf +All_NG,2020,1,5.0709,. South_Bio,2020,0,1.7115,26427258.0 South_Bio,2020,1,17.1714,486066.0 diff --git a/examples/dr_simple/inputs/gen_build_predetermined.csv b/examples/dr_simple/inputs/gen_build_predetermined.csv index e036cc252..285ebad7b 100644 --- a/examples/dr_simple/inputs/gen_build_predetermined.csv +++ b/examples/dr_simple/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998,1.05 S-NG_CC,2000,5.0 S-NG_GT,1990,3.0 diff --git a/examples/dr_simple/inputs/generation_projects_info.csv b/examples/dr_simple/inputs/gen_info.csv similarity index 100% rename from examples/dr_simple/inputs/generation_projects_info.csv rename to examples/dr_simple/inputs/gen_info.csv diff --git a/examples/dr_simple/inputs/switch_inputs_version.txt b/examples/dr_simple/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/dr_simple/inputs/switch_inputs_version.txt +++ b/examples/dr_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/dr_simple/outputs/total_cost.txt b/examples/dr_simple/outputs/total_cost.txt index c295fa37b..b03ba2320 100644 --- a/examples/dr_simple/outputs/total_cost.txt +++ b/examples/dr_simple/outputs/total_cost.txt @@ -1 +1 @@ -26054604.0406 +26054604.04064445 diff --git a/examples/hydro_simple/inputs/gen_build_predetermined.csv b/examples/hydro_simple/inputs/gen_build_predetermined.csv index 5a2abe4dc..d9443f070 100644 --- a/examples/hydro_simple/inputs/gen_build_predetermined.csv +++ b/examples/hydro_simple/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-NG_CC,2000.0,5.0 S-Central_PV-1,2000.0,1.0 S-Geothermal,1998.0,1.0 diff --git a/examples/hydro_simple/inputs/generation_projects_info.csv b/examples/hydro_simple/inputs/gen_info.csv similarity index 100% rename from examples/hydro_simple/inputs/generation_projects_info.csv rename to examples/hydro_simple/inputs/gen_info.csv diff --git a/examples/hydro_simple/inputs/switch_inputs_version.txt b/examples/hydro_simple/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/hydro_simple/inputs/switch_inputs_version.txt +++ b/examples/hydro_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/hydro_simple/outputs/total_cost.txt b/examples/hydro_simple/outputs/total_cost.txt index f178d5cd9..756f6402a 100644 --- a/examples/hydro_simple/outputs/total_cost.txt +++ b/examples/hydro_simple/outputs/total_cost.txt @@ -1 +1 @@ -27519709.7687 +27519709.768722422 diff --git a/examples/hydro_system/inputs/gen_build_predetermined.csv b/examples/hydro_system/inputs/gen_build_predetermined.csv index c5490c573..ab95843dd 100644 --- a/examples/hydro_system/inputs/gen_build_predetermined.csv +++ b/examples/hydro_system/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined Unique-NG_CC,2015,10 Unique-Central_PV,2015,6 Unique-Hydro_Dam_1,2015,8 diff --git a/examples/hydro_system/inputs/generation_projects_info.csv b/examples/hydro_system/inputs/gen_info.csv similarity index 100% rename from examples/hydro_system/inputs/generation_projects_info.csv rename to examples/hydro_system/inputs/gen_info.csv diff --git a/examples/hydro_system/inputs/switch_inputs_version.txt b/examples/hydro_system/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/hydro_system/inputs/switch_inputs_version.txt +++ b/examples/hydro_system/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/hydro_system/outputs/total_cost.txt b/examples/hydro_system/outputs/total_cost.txt index 0c3601df5..81ebd2532 100644 --- a/examples/hydro_system/outputs/total_cost.txt +++ b/examples/hydro_system/outputs/total_cost.txt @@ -1 +1 @@ -8116934.47803 +8116934.478032047 diff --git a/examples/new_builds_only/inputs/generation_projects_info.csv b/examples/new_builds_only/inputs/gen_info.csv similarity index 100% rename from examples/new_builds_only/inputs/generation_projects_info.csv rename to examples/new_builds_only/inputs/gen_info.csv diff --git a/examples/new_builds_only/inputs/switch_inputs_version.txt b/examples/new_builds_only/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/new_builds_only/inputs/switch_inputs_version.txt +++ b/examples/new_builds_only/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/new_builds_only/outputs/total_cost.txt b/examples/new_builds_only/outputs/total_cost.txt index 13e74c8a8..a3f62d3e8 100644 --- a/examples/new_builds_only/outputs/total_cost.txt +++ b/examples/new_builds_only/outputs/total_cost.txt @@ -1 +1 @@ -14998608.8293 +14998608.829253994 diff --git a/examples/planning_reserves/inputs/balancing_areas.csv b/examples/planning_reserves/inputs/balancing_areas.csv deleted file mode 100644 index b3d2ee48b..000000000 --- a/examples/planning_reserves/inputs/balancing_areas.csv +++ /dev/null @@ -1,3 +0,0 @@ -BALANCING_AREAS,quickstart_res_load_frac,quickstart_res_wind_frac,quickstart_res_solar_frac,spinning_res_load_frac,spinning_res_wind_frac,spinning_res_solar_frac -NorthCentral,0.04,0.05,0.05,0.02,0.05,0.05 -South,0.04,0.06,0.06,0.02,0.07,0.07 diff --git a/examples/planning_reserves/inputs/fuel_supply_curves.csv b/examples/planning_reserves/inputs/fuel_supply_curves.csv index 747e53b66..a799d5e8f 100644 --- a/examples/planning_reserves/inputs/fuel_supply_curves.csv +++ b/examples/planning_reserves/inputs/fuel_supply_curves.csv @@ -1,10 +1,10 @@ regional_fuel_market,period,tier,unit_cost,max_avail_at_cost -All_DistOil,2020,0,21.9802,inf +All_DistOil,2020,0,21.9802,. All_DistOil,2030,0,24.5216,100000.0 All_NG,2020,0,4.4647,1950514555.0 -All_NG,2020,1,5.0709,inf +All_NG,2020,1,5.0709,. All_NG,2030,0,5.925,2368354558.0 -All_NG,2030,1,5.925,inf +All_NG,2030,1,5.925,. North_Bio,2020,0,1.7102,6864985.0 North_Bio,2020,1,3.3941,6782413.0 North_Bio,2030,0,2.0438,6064415.0 diff --git a/examples/planning_reserves/inputs/gen_build_predetermined.csv b/examples/planning_reserves/inputs/gen_build_predetermined.csv index 1dbc91204..fc9182464 100644 --- a/examples/planning_reserves/inputs/gen_build_predetermined.csv +++ b/examples/planning_reserves/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined N-Coal_ST,1995,2 N-Geothermal,2000,1 N-NG_CC,2008,2 diff --git a/examples/planning_reserves/inputs/generation_projects_info.csv b/examples/planning_reserves/inputs/gen_info.csv similarity index 100% rename from examples/planning_reserves/inputs/generation_projects_info.csv rename to examples/planning_reserves/inputs/gen_info.csv diff --git a/examples/planning_reserves/inputs/switch_inputs_version.txt b/examples/planning_reserves/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/planning_reserves/inputs/switch_inputs_version.txt +++ b/examples/planning_reserves/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/planning_reserves/outputs/total_cost.txt b/examples/planning_reserves/outputs/total_cost.txt index 1058d1428..03dcd0614 100644 --- a/examples/planning_reserves/outputs/total_cost.txt +++ b/examples/planning_reserves/outputs/total_cost.txt @@ -1 +1 @@ -135901915.109 +135901915.10941112 diff --git a/examples/production_cost_models/1plant/inputs/gen_build_predetermined.csv b/examples/production_cost_models/1plant/inputs/gen_build_predetermined.csv index ada550094..5a735df72 100644 --- a/examples/production_cost_models/1plant/inputs/gen_build_predetermined.csv +++ b/examples/production_cost_models/1plant/inputs/gen_build_predetermined.csv @@ -1,2 +1,2 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-NG_CC,2000,9 diff --git a/examples/production_cost_models/1plant/inputs/generation_projects_info.csv b/examples/production_cost_models/1plant/inputs/gen_info.csv similarity index 100% rename from examples/production_cost_models/1plant/inputs/generation_projects_info.csv rename to examples/production_cost_models/1plant/inputs/gen_info.csv diff --git a/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt b/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/production_cost_models/1plant/outputs/total_cost.txt b/examples/production_cost_models/1plant/outputs/total_cost.txt index e94a53ab4..2f3ab097c 100644 --- a/examples/production_cost_models/1plant/outputs/total_cost.txt +++ b/examples/production_cost_models/1plant/outputs/total_cost.txt @@ -1 +1 @@ -28415619.1709 +28415619.170866918 diff --git a/examples/production_cost_models/3plants/inputs/gen_build_predetermined.csv b/examples/production_cost_models/3plants/inputs/gen_build_predetermined.csv index c27ab15b3..a1a9a59f6 100644 --- a/examples/production_cost_models/3plants/inputs/gen_build_predetermined.csv +++ b/examples/production_cost_models/3plants/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998,3 S-NG_CC,2000,5 S-NG_GT,1990,3 diff --git a/examples/production_cost_models/3plants/inputs/generation_projects_info.csv b/examples/production_cost_models/3plants/inputs/gen_info.csv similarity index 100% rename from examples/production_cost_models/3plants/inputs/generation_projects_info.csv rename to examples/production_cost_models/3plants/inputs/gen_info.csv diff --git a/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt b/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/production_cost_models/3plants/outputs/total_cost.txt b/examples/production_cost_models/3plants/outputs/total_cost.txt index 9740cc06b..eb617f6b7 100644 --- a/examples/production_cost_models/3plants/outputs/total_cost.txt +++ b/examples/production_cost_models/3plants/outputs/total_cost.txt @@ -1 +1 @@ -37439925.1309 +37439925.13089197 diff --git a/examples/production_cost_models/4plants/inputs/gen_build_predetermined.csv b/examples/production_cost_models/4plants/inputs/gen_build_predetermined.csv index 5be435c9b..f79cdaff7 100644 --- a/examples/production_cost_models/4plants/inputs/gen_build_predetermined.csv +++ b/examples/production_cost_models/4plants/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998.0,2.0 S-NG_CC,2000.0,5.0 S-NG_GT,1990.0,3.0 diff --git a/examples/production_cost_models/4plants/inputs/generation_projects_info.csv b/examples/production_cost_models/4plants/inputs/gen_info.csv similarity index 100% rename from examples/production_cost_models/4plants/inputs/generation_projects_info.csv rename to examples/production_cost_models/4plants/inputs/gen_info.csv diff --git a/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt b/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/production_cost_models/4plants/outputs/total_cost.txt b/examples/production_cost_models/4plants/outputs/total_cost.txt index ee7e4ee32..da9b76c01 100644 --- a/examples/production_cost_models/4plants/outputs/total_cost.txt +++ b/examples/production_cost_models/4plants/outputs/total_cost.txt @@ -1 +1 @@ -34328152.2273 +34328152.227295786 diff --git a/examples/production_cost_models/4plants_with_unserved_load/inputs/gen_build_predetermined.csv b/examples/production_cost_models/4plants_with_unserved_load/inputs/gen_build_predetermined.csv index 5be435c9b..f79cdaff7 100644 --- a/examples/production_cost_models/4plants_with_unserved_load/inputs/gen_build_predetermined.csv +++ b/examples/production_cost_models/4plants_with_unserved_load/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998.0,2.0 S-NG_CC,2000.0,5.0 S-NG_GT,1990.0,3.0 diff --git a/examples/production_cost_models/4plants_with_unserved_load/inputs/generation_projects_info.csv b/examples/production_cost_models/4plants_with_unserved_load/inputs/gen_info.csv similarity index 100% rename from examples/production_cost_models/4plants_with_unserved_load/inputs/generation_projects_info.csv rename to examples/production_cost_models/4plants_with_unserved_load/inputs/gen_info.csv diff --git a/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt b/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/production_cost_models/4plants_with_unserved_load/outputs/total_cost.txt b/examples/production_cost_models/4plants_with_unserved_load/outputs/total_cost.txt index 6221598a7..e792c1b43 100644 --- a/examples/production_cost_models/4plants_with_unserved_load/outputs/total_cost.txt +++ b/examples/production_cost_models/4plants_with_unserved_load/outputs/total_cost.txt @@ -1 +1 @@ -34069676.0948 +34034308.6269113 diff --git a/examples/production_cost_models/discrete_unit_commit/inputs/gen_build_predetermined.csv b/examples/production_cost_models/discrete_unit_commit/inputs/gen_build_predetermined.csv index 5be435c9b..f79cdaff7 100644 --- a/examples/production_cost_models/discrete_unit_commit/inputs/gen_build_predetermined.csv +++ b/examples/production_cost_models/discrete_unit_commit/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998.0,2.0 S-NG_CC,2000.0,5.0 S-NG_GT,1990.0,3.0 diff --git a/examples/production_cost_models/discrete_unit_commit/inputs/generation_projects_info.csv b/examples/production_cost_models/discrete_unit_commit/inputs/gen_info.csv similarity index 100% rename from examples/production_cost_models/discrete_unit_commit/inputs/generation_projects_info.csv rename to examples/production_cost_models/discrete_unit_commit/inputs/gen_info.csv diff --git a/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt b/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/production_cost_models/discrete_unit_commit/outputs/total_cost.txt b/examples/production_cost_models/discrete_unit_commit/outputs/total_cost.txt index 973e8d511..627684725 100644 --- a/examples/production_cost_models/discrete_unit_commit/outputs/total_cost.txt +++ b/examples/production_cost_models/discrete_unit_commit/outputs/total_cost.txt @@ -1 +1 @@ -34524511.6092 +34524511.60922097 diff --git a/examples/production_cost_models/spinning_reserves/inputs/gen_build_predetermined.csv b/examples/production_cost_models/spinning_reserves/inputs/gen_build_predetermined.csv index 0ba241572..0fb41b583 100644 --- a/examples/production_cost_models/spinning_reserves/inputs/gen_build_predetermined.csv +++ b/examples/production_cost_models/spinning_reserves/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998,2.0 S-NG_CC,2000,7.0 S-NG_GT,1990,3.0 diff --git a/examples/production_cost_models/spinning_reserves/inputs/generation_projects_info.csv b/examples/production_cost_models/spinning_reserves/inputs/gen_info.csv similarity index 100% rename from examples/production_cost_models/spinning_reserves/inputs/generation_projects_info.csv rename to examples/production_cost_models/spinning_reserves/inputs/gen_info.csv diff --git a/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt b/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/production_cost_models/spinning_reserves/outputs/total_cost.txt b/examples/production_cost_models/spinning_reserves/outputs/total_cost.txt index c3df921a6..d7d9a1f62 100644 --- a/examples/production_cost_models/spinning_reserves/outputs/total_cost.txt +++ b/examples/production_cost_models/spinning_reserves/outputs/total_cost.txt @@ -1 +1 @@ -36406402.3685 +36406402.36853148 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.csv b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.csv index 0ba241572..0fb41b583 100644 --- a/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.csv +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998,2.0 S-NG_CC,2000,7.0 S-NG_GT,1990,3.0 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.csv b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_info.csv similarity index 100% rename from examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.csv rename to examples/production_cost_models/spinning_reserves_advanced/inputs/gen_info.csv diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt b/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt b/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt index c3df921a6..d7d9a1f62 100644 --- a/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt +++ b/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt @@ -1 +1 @@ -36406402.3685 +36406402.36853148 diff --git a/examples/production_cost_models/unit_commit/inputs/gen_build_predetermined.csv b/examples/production_cost_models/unit_commit/inputs/gen_build_predetermined.csv index 5be435c9b..f79cdaff7 100644 --- a/examples/production_cost_models/unit_commit/inputs/gen_build_predetermined.csv +++ b/examples/production_cost_models/unit_commit/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined S-Geothermal,1998.0,2.0 S-NG_CC,2000.0,5.0 S-NG_GT,1990.0,3.0 diff --git a/examples/production_cost_models/unit_commit/inputs/generation_projects_info.csv b/examples/production_cost_models/unit_commit/inputs/gen_info.csv similarity index 100% rename from examples/production_cost_models/unit_commit/inputs/generation_projects_info.csv rename to examples/production_cost_models/unit_commit/inputs/gen_info.csv diff --git a/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt b/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/production_cost_models/unit_commit/outputs/load_balance.txt b/examples/production_cost_models/unit_commit/outputs/load_balance.txt deleted file mode 100644 index cf9294cd4..000000000 --- a/examples/production_cost_models/unit_commit/outputs/load_balance.txt +++ /dev/null @@ -1,5 +0,0 @@ -load_zone timestamp LZ_NetDispatch lz_demand_mw DumpPower distribution_losses -South 2010011500 3.6376723 3 0.478672300000001 0.159 -South 2010011506 8.424 8 0.0 0.424 -South 2010011512 10.530000000000001 10 0.0 0.53 -South 2010011518 7.371 7 0.0 0.371 diff --git a/examples/production_cost_models/unit_commit/outputs/total_cost.txt b/examples/production_cost_models/unit_commit/outputs/total_cost.txt index f25a42166..d69182935 100644 --- a/examples/production_cost_models/unit_commit/outputs/total_cost.txt +++ b/examples/production_cost_models/unit_commit/outputs/total_cost.txt @@ -1 +1 @@ -35131346.3429 +35131346.34291913 diff --git a/examples/rps_simple/inputs/balancing_areas.csv b/examples/rps_simple/inputs/balancing_areas.csv deleted file mode 100644 index b3d2ee48b..000000000 --- a/examples/rps_simple/inputs/balancing_areas.csv +++ /dev/null @@ -1,3 +0,0 @@ -BALANCING_AREAS,quickstart_res_load_frac,quickstart_res_wind_frac,quickstart_res_solar_frac,spinning_res_load_frac,spinning_res_wind_frac,spinning_res_solar_frac -NorthCentral,0.04,0.05,0.05,0.02,0.05,0.05 -South,0.04,0.06,0.06,0.02,0.07,0.07 diff --git a/examples/rps_simple/inputs/fuel_supply_curves.csv b/examples/rps_simple/inputs/fuel_supply_curves.csv index 747e53b66..a799d5e8f 100644 --- a/examples/rps_simple/inputs/fuel_supply_curves.csv +++ b/examples/rps_simple/inputs/fuel_supply_curves.csv @@ -1,10 +1,10 @@ regional_fuel_market,period,tier,unit_cost,max_avail_at_cost -All_DistOil,2020,0,21.9802,inf +All_DistOil,2020,0,21.9802,. All_DistOil,2030,0,24.5216,100000.0 All_NG,2020,0,4.4647,1950514555.0 -All_NG,2020,1,5.0709,inf +All_NG,2020,1,5.0709,. All_NG,2030,0,5.925,2368354558.0 -All_NG,2030,1,5.925,inf +All_NG,2030,1,5.925,. North_Bio,2020,0,1.7102,6864985.0 North_Bio,2020,1,3.3941,6782413.0 North_Bio,2030,0,2.0438,6064415.0 diff --git a/examples/rps_simple/inputs/gen_build_predetermined.csv b/examples/rps_simple/inputs/gen_build_predetermined.csv index 1dbc91204..fc9182464 100644 --- a/examples/rps_simple/inputs/gen_build_predetermined.csv +++ b/examples/rps_simple/inputs/gen_build_predetermined.csv @@ -1,4 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap +GENERATION_PROJECT,build_year,build_gen_predetermined N-Coal_ST,1995,2 N-Geothermal,2000,1 N-NG_CC,2008,2 diff --git a/examples/rps_simple/inputs/generation_projects_info.csv b/examples/rps_simple/inputs/gen_info.csv similarity index 100% rename from examples/rps_simple/inputs/generation_projects_info.csv rename to examples/rps_simple/inputs/gen_info.csv diff --git a/examples/rps_simple/inputs/switch_inputs_version.txt b/examples/rps_simple/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/rps_simple/inputs/switch_inputs_version.txt +++ b/examples/rps_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/rps_simple/outputs/total_cost.txt b/examples/rps_simple/outputs/total_cost.txt index 1f97e0ad2..56fcdbf08 100644 --- a/examples/rps_simple/outputs/total_cost.txt +++ b/examples/rps_simple/outputs/total_cost.txt @@ -1 +1 @@ -140343019.693 +140343019.69266725 diff --git a/examples/storage/inputs/gen_build_costs.csv b/examples/storage/inputs/gen_build_costs.csv index 0a8f690d1..43608acff 100644 --- a/examples/storage/inputs/gen_build_costs.csv +++ b/examples/storage/inputs/gen_build_costs.csv @@ -4,3 +4,4 @@ S-Geothermal,1998,5524200.0,0.0,. S-Geothermal,2020,5524200.0,0.0,. S-Central_PV-1,2020,2334300.0,41850.0,. Battery_Storage,2020,10000.0,100.0,1000.0 +Battery_Storage,2010,10000.0,100.0,1000.0 diff --git a/examples/storage/inputs/gen_build_predetermined.csv b/examples/storage/inputs/gen_build_predetermined.csv index 3925858cb..c5ff177d0 100644 --- a/examples/storage/inputs/gen_build_predetermined.csv +++ b/examples/storage/inputs/gen_build_predetermined.csv @@ -1,3 +1,4 @@ -GENERATION_PROJECT,build_year,gen_predetermined_cap -S-Central_PV-1,2000,1 -S-Geothermal,1998,1 +GENERATION_PROJECT,build_year,build_gen_predetermined,build_gen_energy_predetermined +S-Central_PV-1,2000,1,. +S-Geothermal,1998,1,. +Battery_Storage,2010,1,4.0 diff --git a/examples/storage/inputs/generation_projects_info.csv b/examples/storage/inputs/gen_info.csv similarity index 100% rename from examples/storage/inputs/generation_projects_info.csv rename to examples/storage/inputs/gen_info.csv diff --git a/examples/storage/inputs/switch_inputs_version.txt b/examples/storage/inputs/switch_inputs_version.txt index 157e54f3e..f1547e6d1 100644 --- a/examples/storage/inputs/switch_inputs_version.txt +++ b/examples/storage/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.6 +2.0.7 diff --git a/examples/storage/outputs/BuildGen.tab b/examples/storage/outputs/BuildGen.tab deleted file mode 100644 index 914111b3f..000000000 --- a/examples/storage/outputs/BuildGen.tab +++ /dev/null @@ -1,6 +0,0 @@ -GEN_BLD_YRS_1 GEN_BLD_YRS_2 BuildGen -Battery_Storage 2020 4.02460641399417 -S-Geothermal 1998 1.0 -S-Geothermal 2020 3.58827442700496 -S-Central_PV-1 2020 0.0 -S-Central_PV-1 2000 1.0 diff --git a/examples/storage/outputs/BuildMinGenCap.tab b/examples/storage/outputs/BuildMinGenCap.tab deleted file mode 100644 index c8e45c155..000000000 --- a/examples/storage/outputs/BuildMinGenCap.tab +++ /dev/null @@ -1 +0,0 @@ -NEW_GEN_WITH_MIN_BUILD_YEARS_1 NEW_GEN_WITH_MIN_BUILD_YEARS_2 BuildMinGenCap diff --git a/examples/storage/outputs/BuildStorageEnergy.tab b/examples/storage/outputs/BuildStorageEnergy.tab deleted file mode 100644 index f4071b998..000000000 --- a/examples/storage/outputs/BuildStorageEnergy.tab +++ /dev/null @@ -1,2 +0,0 @@ -STORAGE_GEN_BLD_YRS_1 STORAGE_GEN_BLD_YRS_2 BuildStorageEnergy -Battery_Storage 2020 35.4970285714286 diff --git a/examples/storage/outputs/ChargeStorage.tab b/examples/storage/outputs/ChargeStorage.tab deleted file mode 100644 index 27685e3be..000000000 --- a/examples/storage/outputs/ChargeStorage.tab +++ /dev/null @@ -1,3 +0,0 @@ -STORAGE_GEN_TPS_1 STORAGE_GEN_TPS_2 ChargeStorage -Battery_Storage 1 0.0 -Battery_Storage 2 3.94411428571429 diff --git a/examples/storage/outputs/DispatchBaseloadByPeriod.tab b/examples/storage/outputs/DispatchBaseloadByPeriod.tab deleted file mode 100644 index 074caa7d0..000000000 --- a/examples/storage/outputs/DispatchBaseloadByPeriod.tab +++ /dev/null @@ -1,2 +0,0 @@ -DispatchBaseloadByPeriod_index_1 DispatchBaseloadByPeriod_index_2 DispatchBaseloadByPeriod -S-Geothermal 2020 4.44411428571429 diff --git a/examples/storage/outputs/DispatchGen.tab b/examples/storage/outputs/DispatchGen.tab deleted file mode 100644 index 2ffa0f993..000000000 --- a/examples/storage/outputs/DispatchGen.tab +++ /dev/null @@ -1,7 +0,0 @@ -GEN_TPS_1 GEN_TPS_2 DispatchGen -S-Geothermal 2 4.44411428571429 -S-Geothermal 1 4.44411428571429 -S-Central_PV-1 2 0.0 -Battery_Storage 1 2.95808571428571 -S-Central_PV-1 1 0.5978 -Battery_Storage 2 0.0 diff --git a/examples/storage/outputs/GenFuelUseRate.tab b/examples/storage/outputs/GenFuelUseRate.tab deleted file mode 100644 index b82bc6b09..000000000 --- a/examples/storage/outputs/GenFuelUseRate.tab +++ /dev/null @@ -1 +0,0 @@ -GEN_TP_FUELS_1 GEN_TP_FUELS_2 GEN_TP_FUELS_3 GenFuelUseRate diff --git a/examples/storage/outputs/StateOfCharge.tab b/examples/storage/outputs/StateOfCharge.tab deleted file mode 100644 index faf3e3d56..000000000 --- a/examples/storage/outputs/StateOfCharge.tab +++ /dev/null @@ -1,3 +0,0 @@ -STORAGE_GEN_TPS_1 STORAGE_GEN_TPS_2 StateOfCharge -Battery_Storage 1 0.0 -Battery_Storage 2 35.4970285714286 diff --git a/examples/storage/outputs/costs_itemized.csv b/examples/storage/outputs/costs_itemized.csv deleted file mode 100644 index a97969c06..000000000 --- a/examples/storage/outputs/costs_itemized.csv +++ /dev/null @@ -1,5 +0,0 @@ -PERIOD,Component,AnnualCost_NPV,AnnualCost_Real,Component_type -2020,TotalGenFixedCosts,16582355.87,2367608.77112,annual -2020,StorageEnergyInstallCosts,35397.2613454,5053.97828224,annual -2020,GenVariableOMCostsInTP,7867155.78173,1123263.01393,timepoint -2020,FuelCostsPerTP,0.0,0.0,timepoint diff --git a/examples/storage/outputs/dispatch-wide.txt b/examples/storage/outputs/dispatch-wide.txt deleted file mode 100644 index 7b03161d4..000000000 --- a/examples/storage/outputs/dispatch-wide.txt +++ /dev/null @@ -1,3 +0,0 @@ -timestamp Battery_Storage S-Central_PV-1 S-Geothermal -2025011512 2.95809 0.5978 4.44411 -2025011600 0 0 4.44411 diff --git a/examples/storage/outputs/dispatch.csv b/examples/storage/outputs/dispatch.csv deleted file mode 100644 index c7c17058a..000000000 --- a/examples/storage/outputs/dispatch.csv +++ /dev/null @@ -1,7 +0,0 @@ -generation_project,timestamp,DispatchEmissions_tCO2_per_typical_yr,DispatchGen_MW,Energy_GWh_typical_yr,VariableCost_per_yr,gen_dbid,gen_energy_source,gen_load_zone,gen_tech,period,tp_weight_in_year_hrs -S-Geothermal,2025011600,0,4.44411428571429,19.478552914285736,561566.6805188577,33.0,Geothermal,South,Geothermal,2020,4383.0 -S-Geothermal,2025011512,0,4.44411428571429,19.478552914285736,561566.6805188577,33.0,Geothermal,South,Geothermal,2020,4383.0 -S-Central_PV-1,2025011600,0,0.0,0.0,0.0,41.0,Solar,South,Central_PV,2020,4383.0 -Battery_Storage,2025011512,0,2.95808571428571,12.965289685714268,129.65289685714268,Battery_Storage,Electricity,South,Battery_Storage,2020,4383.0 -S-Central_PV-1,2025011512,0,0.5978,2.6201574,0.0,41.0,Solar,South,Central_PV,2020,4383.0 -Battery_Storage,2025011600,0,0.0,0.0,0.0,Battery_Storage,Electricity,South,Battery_Storage,2020,4383.0 diff --git a/examples/storage/outputs/dispatch_annual_summary.csv b/examples/storage/outputs/dispatch_annual_summary.csv deleted file mode 100644 index 5cb9777d8..000000000 --- a/examples/storage/outputs/dispatch_annual_summary.csv +++ /dev/null @@ -1,4 +0,0 @@ -gen_tech,gen_energy_source,period,Energy_GWh_typical_yr,VariableCost_per_yr,DispatchEmissions_tCO2_per_typical_yr -Battery_Storage,Electricity,2020,12.965289685714268,129.65289685714268,0 -Central_PV,Solar,2020,2.6201574,0.0,0 -Geothermal,Geothermal,2020,38.95710582857147,1123133.3610377153,0 diff --git a/examples/storage/outputs/dispatch_zonal_annual_summary.csv b/examples/storage/outputs/dispatch_zonal_annual_summary.csv deleted file mode 100644 index fa537899a..000000000 --- a/examples/storage/outputs/dispatch_zonal_annual_summary.csv +++ /dev/null @@ -1,4 +0,0 @@ -gen_tech,gen_load_zone,gen_energy_source,period,Energy_GWh_typical_yr,VariableCost_per_yr,DispatchEmissions_tCO2_per_typical_yr -Battery_Storage,South,Electricity,2020,12.965289685714268,129.65289685714268,0 -Central_PV,South,Solar,2020,2.6201574,0.0,0 -Geothermal,South,Geothermal,2020,38.95710582857147,1123133.3610377153,0 diff --git a/examples/storage/outputs/electricity_cost.csv b/examples/storage/outputs/electricity_cost.csv deleted file mode 100644 index f88f20123..000000000 --- a/examples/storage/outputs/electricity_cost.csv +++ /dev/null @@ -1,2 +0,0 @@ -PERIOD,EnergyCostReal_per_MWh,SystemCostPerPeriod_NPV,SystemCostPerPeriod_Real,SystemDemand_MWh -2020,9.3836501009,24484908.913,3495925.76334,372555.0 diff --git a/examples/storage/outputs/gen_cap.txt b/examples/storage/outputs/gen_cap.txt deleted file mode 100644 index b90584f09..000000000 --- a/examples/storage/outputs/gen_cap.txt +++ /dev/null @@ -1,4 +0,0 @@ -GENERATION_PROJECT PERIOD gen_tech gen_load_zone gen_energy_source GenCapacity GenCapitalCosts GenFixedOMCosts -Battery_Storage 2020 Battery_Storage South Electricity 4.02461 5730.71 402.461 -S-Central_PV-1 2020 Central_PV South Solar 1 227410 41850 -S-Geothermal 2020 Geothermal South Geothermal 4.58827 2.09222e+06 0 diff --git a/examples/storage/outputs/load_balance.txt b/examples/storage/outputs/load_balance.txt deleted file mode 100644 index 28ac334e7..000000000 --- a/examples/storage/outputs/load_balance.txt +++ /dev/null @@ -1,3 +0,0 @@ -load_zone timestamp ZoneTotalCentralDispatch ZoneTotalDistributedDispatch zone_demand_mw StorageNetCharge -South 2025011512 8 0 8 0 -South 2025011600 4.44411 0 0.5 3.94411 diff --git a/examples/storage/outputs/results.pickle b/examples/storage/outputs/results.pickle deleted file mode 100644 index 5439b604f..000000000 Binary files a/examples/storage/outputs/results.pickle and /dev/null differ diff --git a/examples/storage/outputs/storage_builds.txt b/examples/storage/outputs/storage_builds.txt deleted file mode 100644 index 4d10d9abd..000000000 --- a/examples/storage/outputs/storage_builds.txt +++ /dev/null @@ -1,2 +0,0 @@ -project period load_zone IncrementalPowerCapacityMW IncrementalEnergyCapacityMWh OnlinePowerCapacityMW OnlineEnergyCapacityMWh -Battery_Storage 2020 South 4.02461 35.497 4.02461 35.497 diff --git a/examples/storage/outputs/storage_dispatch.txt b/examples/storage/outputs/storage_dispatch.txt deleted file mode 100644 index 978444337..000000000 --- a/examples/storage/outputs/storage_dispatch.txt +++ /dev/null @@ -1,3 +0,0 @@ -project timepoint load_zone ChargeMW DischargeMW StateOfCharge -Battery_Storage 2025011512 South 0 2.95809 0 -Battery_Storage 2025011600 South 3.94411 0 35.497 diff --git a/examples/storage/outputs/total_cost.txt b/examples/storage/outputs/total_cost.txt index 39d8cdcbd..5cf7013c1 100644 --- a/examples/storage/outputs/total_cost.txt +++ b/examples/storage/outputs/total_cost.txt @@ -1 +1 @@ -24484908.913 +24484908.913032416 diff --git a/run_tests.py b/run_tests.py index a39cdc11c..b59eab943 100755 --- a/run_tests.py +++ b/run_tests.py @@ -13,18 +13,19 @@ class TestLoader(unittest.TestLoader): # unittest.main does not allow multiple "--start-directory" # options, but we can make it scan multiple separate directories # by overriding discover(). This allows us to have a "tests" - # directory that's separate from "switch_mod". + # directory that's separate from "switch_model". # # We don't want to scan for *.py files in the parent directory in # case any of those are throwaway scripts that have unexpected # effects when imported. def discover(self, start_dir, pattern, top_level_dir): test_suite = unittest.TestSuite() - for subdir in ('switch_model', 'tests'): + for subdir in ("switch_model", "tests"): test_suite.addTests( super(TestLoader, self).discover( - os.path.join(top_level_dir, subdir), - pattern, top_level_dir)) + os.path.join(top_level_dir, subdir), pattern, top_level_dir + ) + ) return test_suite # The unittest module does not have built-in support for finding @@ -37,7 +38,7 @@ def loadTestsFromModule(self, module, **kwargs): if not docstring: # Work around a misfeature whereby doctest complains if a # module contains no docstrings. - module.__doc__ = 'Placeholder docstring' + module.__doc__ = "Placeholder docstring" test_suite.addTests(doctest.DocTestSuite(module)) if not docstring: # Restore the original, in case this matters. @@ -48,12 +49,16 @@ def loadTestsFromModule(self, module, **kwargs): def main(): script_dir = os.path.join(os.getcwd(), os.path.dirname(__file__)) # print('old argv: {}'.format(sys.argv)) - argv = [sys.argv[0], - 'discover', - '--top-level-directory', script_dir, - '--pattern', '*.py'] + sys.argv[1:] + argv = [ + sys.argv[0], + "discover", + "--top-level-directory", + script_dir, + "--pattern", + "*.py", + ] + sys.argv[1:] unittest.TestProgram(testLoader=TestLoader(), argv=argv, module=None) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/setup.py b/setup.py index 87b63dce5..b974aa6ef 100644 --- a/setup.py +++ b/setup.py @@ -16,75 +16,83 @@ from setuptools import setup, find_packages # Get the version number. Strategy #3 from https://packaging.python.org/single_source_version/ -version_path = os.path.join(os.path.dirname(__file__), 'switch_model', 'version.py') +version_path = os.path.join(os.path.dirname(__file__), "switch_model", "version.py") version = {} with open(version_path) as f: exec(f.read(), version) -__version__ = version['__version__'] +__version__ = version["__version__"] + def read(*rnames): return open(os.path.join(os.path.dirname(__file__), *rnames)).read() + setup( - name='switch_model', + name="switch_model", version=__version__, - maintainer='Switch Authors', - maintainer_email='authors@switch-model.org', - url='http://switch-model.org', - license='Apache License 2.0', + maintainer="Switch Authors", + maintainer_email="authors@switch-model.org", + url="http://switch-model.org", + license="Apache License 2.0", platforms=["any"], - description='Switch Power System Planning Model', - long_description=read('README'), + description="Switch Power System Planning Model", + long_description=read("README"), long_description_content_type="text/markdown", classifiers=[ # from https://pypi.org/classifiers/ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Intended Audience :: Education', - 'Intended Audience :: End Users/Desktop', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: Apache Software License', - 'Natural Language :: English', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: POSIX :: Linux', - 'Operating System :: Unix', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Topic :: Scientific/Engineering', - 'Topic :: Software Development :: Libraries :: Python Modules' + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Education", + "Intended Audience :: End Users/Desktop", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Operating System :: Microsoft :: Windows", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Operating System :: Unix", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering", + "Topic :: Software Development :: Libraries :: Python Modules", ], - packages=find_packages(include=['switch_model', 'switch_model.*']), + packages=find_packages(include=["switch_model", "switch_model.*"]), keywords=[ - 'renewable', 'power', 'energy', 'electricity', - 'production cost', 'capacity expansion', - 'planning', 'optimization' + "renewable", + "power", + "energy", + "electricity", + "production cost", + "capacity expansion", + "planning", + "optimization", ], - python_requires='>=2.7.12', + python_requires=">=3.7.0", install_requires=[ - # Pyomo 4.4.1+ works with glpk 4.60+ - 'Pyomo >=4.4.1, <=5.6.8', - # pyutilib 6.0 breaks compatibility, and earlier versions of Pyomo - # will cheerfully install it, so we explicitly block it - 'pyutilib <=5.7.3', - 'pint', # needed by Pyomo when we run our tests, but not included - 'testfixtures', # used for standard tests - 'pandas', # used for input upgrades and testing that functionality + # 4.4.1+ works with glpk 4.60+; 5.6.9 gives warning and 5.7 gives error + "Pyomo >=5.5.1, <=6.4.2", + # by default, incompatible 6.0 gets installed with Pyomo 5.6.* + "pyutilib >=5.6.3, <=6.0.0", + # needed by Pyomo when we run our tests, but not included + "pint", + # used for standard tests + "testfixtures", + # used for input upgrades and some reporting + "pandas", ], extras_require={ # packages used for advanced demand response, progressive hedging # note: rpy2 discontinued support for Python 2 as of rpy2 2.9.0 - 'advanced': [ - 'numpy', 'scipy', + "advanced": [ + "numpy", + "scipy", 'rpy2<2.9.0;python_version<"3.0"', 'rpy2;python_version>="3.0"', - 'sympy' + "sympy", ], - 'dev': ['ipdb'], - 'plotting': ['ggplot'], - 'database_access': ['psycopg2-binary'] - }, - entry_points={ - 'console_scripts': ['switch = switch_model.main:main'] + "dev": ["ipdb"], + "plotting": ["plotnine"], + "database_access": ["psycopg2-binary"], }, + entry_points={"console_scripts": ["switch = switch_model.main:main"]}, ) diff --git a/switch_model/__init__.py b/switch_model/__init__.py index 66804cc85..f9ae223c1 100644 --- a/switch_model/__init__.py +++ b/switch_model/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -18,10 +18,12 @@ transmission, local_td, reserves, etc. """ from .version import __version__ + core_modules = [ - 'switch_model.timescales', - 'switch_model.financials', - 'switch_model.balancing.load_zones', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core', - 'switch_model.reporting'] + "switch_model.timescales", + "switch_model.financials", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties", + "switch_model.generators.core", + "switch_model.reporting", +] diff --git a/switch_model/balancing/demand_response/iterative/__init__.py b/switch_model/balancing/demand_response/iterative/__init__.py index 8bc133efa..d05e6711d 100644 --- a/switch_model/balancing/demand_response/iterative/__init__.py +++ b/switch_model/balancing/demand_response/iterative/__init__.py @@ -24,6 +24,7 @@ import os, sys, time from pprint import pprint from pyomo.environ import * + try: from pyomo.repn import generate_standard_repn except ImportError: @@ -31,23 +32,54 @@ from pyomo.repn import generate_canonical_repn as generate_standard_repn import switch_model.utilities as utilities + # TODO: move part of the reporting back into Hawaii module and eliminate these dependencies from switch_model.hawaii.save_results import DispatchGenByFuel import switch_model.hawaii.util as util -demand_module = None # will be set via command-line options +demand_module = None # will be set via command-line options + def define_arguments(argparser): - argparser.add_argument("--dr-flat-pricing", action='store_true', default=False, - help="Charge a constant (average) price for electricity, rather than varying hour by hour") - argparser.add_argument("--dr-demand-module", default=None, + argparser.add_argument( + "--dr-flat-pricing", + action="store_true", + default=False, + help="Charge a constant (average) price for electricity, rather than varying hour by hour", + ) + argparser.add_argument( + "--dr-demand-module", + default=None, help="Name of module to use for demand-response bids. This should also be " "specified in the modules list, and should provide calibrate() and bid() functions. " "Pre-written options include constant_elasticity_demand_system or r_demand_system. " - "Specify one of these in the modules list and use --help again to see module-specific options.") + "Specify one of these in the modules list and use --help again to see module-specific options.", + ) + argparser.add_argument( + "--demand-response-reserve-types", + nargs="+", + default=[], + help="Type(s) of reserves to provide from demand response (e.g., 'contingency' or 'regulation'). " + "Specify 'none' to disable. Default is 'spinning' if an operating reserve module is used, " + "otherwise it is 'none'.", + ) + def define_components(m): + # load scipy.optimize; this is done here to avoid loading it during unit tests + try: + global scipy + import scipy.optimize + except ImportError: + print("=" * 80) + print( + "Unable to load scipy package, which is used by the demand response system." + ) + print("Please install this via 'conda install scipy' or 'pip install scipy'.") + print("=" * 80) + raise + ################### # Choose the right demand module. # NOTE: we assume only one model will be run at a time, so it's safe to store @@ -68,22 +100,10 @@ def define_components(m): "Demand module {mod} cannot be used because it has not been loaded. " "Please add this module to the modules list (usually modules.txt) " "or specify --include-module {mod} in options.txt, scenarios.txt or " - "on the command line." - .format(mod=m.options.dr_demand_module) + "on the command line.".format(mod=m.options.dr_demand_module) ) demand_module = sys.modules[m.options.dr_demand_module] - # load scipy.optimize for use later - try: - global scipy - import scipy.optimize - except ImportError: - print("="*80) - print("Unable to load scipy package, which is used by the demand response system.") - print("Please install this via 'conda install scipy' or 'pip install scipy'.") - print("="*80) - raise - # Make sure the model has dual and rc suffixes if not hasattr(m, "dual"): m.dual = Suffix(direction=Suffix.IMPORT) @@ -96,27 +116,33 @@ def define_components(m): ################## # cost per MWh for unserved load (high) - m.dr_unserved_load_penalty_per_mwh = Param(initialize=10000) + m.dr_unserved_load_penalty_per_mwh = Param( + initialize=10000, within=NonNegativeReals + ) # amount of unserved load during each timepoint m.DRUnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved load - m.DR_Unserved_Load_Penalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.DRUnservedLoad[z, tp] * m.dr_unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) + m.DR_Unserved_Load_Penalty = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: sum( + m.DRUnservedLoad[z, tp] * m.dr_unserved_load_penalty_per_mwh + for z in m.LOAD_ZONES + ), ) # add unserved load to the zonal energy balance - m.Zone_Power_Injections.append('DRUnservedLoad') + m.Zone_Power_Injections.append("DRUnservedLoad") # add the unserved load penalty to the model's objective function - m.Cost_Components_Per_TP.append('DR_Unserved_Load_Penalty') + m.Cost_Components_Per_TP.append("DR_Unserved_Load_Penalty") # list of products (commodities and reserves) that can be bought or sold - m.DR_PRODUCTS = Set(initialize=['energy', 'energy up', 'energy down']) + m.DR_PRODUCTS = Set(dimen=1, initialize=["energy", "energy up", "energy down"]) ################### # Price Responsive Demand bids ################## # list of all bids that have been received from the demand system - m.DR_BID_LIST = Set(initialize = [], ordered=True) + m.DR_BID_LIST = Set(dimen=1, initialize=[], ordered=True) # we need an explicit indexing set for everything that depends on DR_BID_LIST # so we can reconstruct it (and them) each time we add an element to DR_BID_LIST # (not needed, and actually doesn't work -- reconstruct() fails for sets) @@ -126,16 +152,34 @@ def define_components(m): # data for the individual bids; each load_zone gets one bid for each timeseries, # and each bid covers all the timepoints in that timeseries. So we just record # the bid for each timepoint for each load_zone. - m.dr_bid = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, m.DR_PRODUCTS, mutable=True) + m.dr_bid = Param( + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMEPOINTS, + m.DR_PRODUCTS, + mutable=True, + within=NonNegativeReals, + ) # price used to get this bid (only kept for reference) - m.dr_price = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, m.DR_PRODUCTS, mutable=True) + m.dr_price = Param( + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMEPOINTS, + m.DR_PRODUCTS, + mutable=True, + within=NonNegativeReals, + ) # the private benefit of serving each bid - m.dr_bid_benefit = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, mutable=True) + m.dr_bid_benefit = Param( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, mutable=True, within=NonNegativeReals + ) # weights to assign to the bids for each timeseries when constructing an optimal demand profile - m.DRBidWeight = Var(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals) + m.DRBidWeight = Var( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals + ) # def DR_Convex_Bid_Weight_rule(m, z, ts): # if len(m.DR_BID_LIST) == 0: @@ -146,9 +190,12 @@ def define_components(m): # return (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) # # choose a convex combination of bids for each zone and timeseries - m.DR_Convex_Bid_Weight = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - Constraint.Skip if len(m.DR_BID_LIST) == 0 - else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) + m.DR_Convex_Bid_Weight = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: Constraint.Skip + if len(m.DR_BID_LIST) == 0 + else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1), ) # Since we don't have differentiated prices for each zone, we have to use the same @@ -157,8 +204,11 @@ def define_components(m): # Note: LOAD_ZONES is not an ordered set, so we have to use a trick to get a single # arbitrary one to refer to (list(m.LOAD_ZONES)[0] would also work). m.DR_Load_Zone_Shared_Bid_Weight = Constraint( - m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, z, ts: - m.DRBidWeight[b, z, ts] == m.DRBidWeight[b, next(iter(m.LOAD_ZONES)), ts] + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, b, z, ts: m.DRBidWeight[b, z, ts] + == m.DRBidWeight[b, next(iter(m.LOAD_ZONES)), ts], ) # For flat-price models, we have to use the same weight for all timeseries within the @@ -166,64 +216,56 @@ def define_components(m): # induce different adjustments in individual timeseries. if m.options.dr_flat_pricing: m.DR_Flat_Bid_Weight = Constraint( - m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, z, ts: - m.DRBidWeight[b, z, ts] - == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]] + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, b, z, ts: m.DRBidWeight[b, z, ts] + == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]], ) - # Optimal level of demand, calculated from available bids (negative, indicating consumption) - m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.FlexibleDemand = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, rule=lambda m, z, tp: sum( - m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy'] + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, "energy"] for b in m.DR_BID_LIST - ) + ), ) - # provide up and down reserves (from supply perspective, so "up" means less load) - # note: the bids are negative quantities, indicating _production_ of reserves; - # they contribute to the reserve requirement with opposite sign - m.DemandUpReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + + # calculate available slack from demand response for use as reserves (from + # supply perspective, so "up" means less load), then register spinning + # reserves + m.DemandUpReserveSales = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, rule=lambda m, z, tp: -sum( - m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy up'] + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, "energy up"] for b in m.DR_BID_LIST - ) + ), ) - m.DemandDownReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.DemandDownReserveSales = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, rule=lambda m, z, tp: -sum( - m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy down'] + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, "energy down"] for b in m.DR_BID_LIST - ) + ), ) - # Register with spinning reserves if it is available - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): - m.DemandSpinningReserveUp = Expression( + if hasattr(m, "ZONES_IN_BALANCING_AREA"): + m.DemandResponseSlackUp = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: sum( - m.DemandUpReserves[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + rule=lambda m, ba, tp: sum( + m.DemandUpReserveSales[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] + ), ) - m.Spinning_Reserve_Up_Provisions.append('DemandSpinningReserveUp') - - m.DemandSpinningReserveDown = Expression( + m.DemandResponseSlackDown = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: sum( - m.DemandDownReserves[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + rule=lambda m, ba, tp: sum( + m.DemandDownReserveSales[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] + ), ) - m.Spinning_Reserve_Down_Provisions.append('DemandSpinningReserveDown') - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): - # User has spacified advanced formulation with different reserve types. - # Code needs to be added to support this if needed (see simple.py - # for an example). This is not hard, but it gets messy to support - # both simple and advanced formulations. Eventually we should just - # standardize on the advanced formulation, and then the code will be - # fairly simple. - raise NotImplementedError( - "The {} module does not yet support provision of multiple reserve types. " - "Please contact the Switch team if you need this feature." - .format(__name__) - ) - + register_demand_response_reserves(m) # replace zone_demand_mw with FlexibleDemand in the energy balance constraint # note: the first two lines are simpler than the method I use, but my approach @@ -231,23 +273,28 @@ def define_components(m): # a certain ordering. # m.Zone_Power_Withdrawals.remove('zone_demand_mw') # m.Zone_Power_Withdrawals.append('FlexibleDemand') - idx = m.Zone_Power_Withdrawals.index('zone_demand_mw') - m.Zone_Power_Withdrawals[idx] = 'FlexibleDemand' + idx = m.Zone_Power_Withdrawals.index("zone_demand_mw") + m.Zone_Power_Withdrawals[idx] = "FlexibleDemand" # private benefit of the electricity consumption # (i.e., willingness to pay for the current electricity supply) # reported as negative cost, i.e., positive benefit # also divide by number of timepoints in the timeseries # to convert from a cost per timeseries to a cost per timepoint. - m.DR_Welfare_Cost = Expression(m.TIMEPOINTS, rule=lambda m, tp: - (-1.0) - * sum(m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] - for b in m.DR_BID_LIST for z in m.LOAD_ZONES) - * m.tp_duration_hrs[tp] / m.ts_num_tps[m.tp_ts[tp]] + m.DR_Welfare_Cost = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: (-1.0) + * sum( + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] + for b in m.DR_BID_LIST + for z in m.LOAD_ZONES + ) + * m.tp_duration_hrs[tp] + / m.ts_num_tps[m.tp_ts[tp]], ) # add the private benefit to the model's objective function - m.Cost_Components_Per_TP.append('DR_Welfare_Cost') + m.Cost_Components_Per_TP.append("DR_Welfare_Cost") # variable to store the baseline data m.base_data = None @@ -294,6 +341,10 @@ def define_components(m): # # m.tp_flat_pricing_block = Param(m.TIMEPOINTS, within=m.FLAT_PRICING_START_TIMES, initialize=rule) + # provide up and down reserves (from supply perspective, so "up" means less load) + # note: the bids are negative quantities, indicating _production_ of reserves; + # they contribute to the reserve requirement with opposite sign + def pre_iterate(m): # could all prev values be stored in post_iterate? @@ -310,21 +361,30 @@ def pre_iterate(m): # model hasn't been solved yet m.prev_marginal_cost = { (z, tp, prod): None - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + for prod in m.DR_PRODUCTS } m.prev_demand = { - (z, tp, prod): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS + (z, tp, prod): None + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + for prod in m.DR_PRODUCTS } m.prev_SystemCost = None else: # get values from previous solution m.prev_marginal_cost = { (z, tp, prod): electricity_marginal_cost(m, z, tp, prod) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + for prod in m.DR_PRODUCTS } m.prev_demand = { (z, tp, prod): electricity_demand(m, z, tp, prod) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + for prod in m.DR_PRODUCTS } m.prev_SystemCost = value(m.SystemCost) @@ -346,23 +406,27 @@ def pre_iterate(m): # different solution that would be much better than the one we have now. # This ignores other solutions far away, where an integer variable is flipped, # but that's OK. (?) - prev_direct_cost = value(sum( - ( - sum( - m.prev_marginal_cost[z, tp, prod] * m.prev_demand[z, tp, prod] - for z in m.LOAD_ZONES for prod in m.DR_PRODUCTS + prev_direct_cost = value( + sum( + ( + sum( + m.prev_marginal_cost[z, tp, prod] * m.prev_demand[z, tp, prod] + for z in m.LOAD_ZONES + for prod in m.DR_PRODUCTS + ) ) - ) * m.bring_timepoint_costs_to_base_year[tp] - for ts in m.TIMESERIES - for tp in m.TPS_IN_TS[ts] - )) - prev_welfare_cost = value(sum( - ( - m.DR_Welfare_Cost[tp] - ) * m.bring_timepoint_costs_to_base_year[tp] - for ts in m.TIMESERIES - for tp in m.TPS_IN_TS[ts] - )) + * m.bring_timepoint_costs_to_base_year[tp] + for ts in m.TIMESERIES + for tp in m.TPS_IN_TS[ts] + ) + ) + prev_welfare_cost = value( + sum( + (m.DR_Welfare_Cost[tp]) * m.bring_timepoint_costs_to_base_year[tp] + for ts in m.TIMESERIES + for tp in m.TPS_IN_TS[ts] + ) + ) prev_cost = prev_direct_cost + prev_welfare_cost # prev_cost = value(sum( @@ -377,8 +441,8 @@ def pre_iterate(m): # )) print("") - print('previous direct cost: ${:,.0f}'.format(prev_direct_cost)) - print('previous welfare cost: ${:,.0f}'.format(prev_welfare_cost)) + print("previous direct cost: ${:,.0f}".format(prev_direct_cost)) + print("previous welfare cost: ${:,.0f}".format(prev_welfare_cost)) print("") # get the next bid and attach it to the model @@ -388,13 +452,15 @@ def pre_iterate(m): # get an estimate of best possible net cost of serving load # (if we could completely serve the last bid at the prices we quoted, # that would be an optimum; the actual cost may be higher but never lower) - b = m.DR_BID_LIST.last() # current bid number + b = m.DR_BID_LIST.last() # current bid number best_direct_cost = value( sum( sum( m.prev_marginal_cost[z, tp, prod] * m.dr_bid[b, z, tp, prod] - for z in m.LOAD_ZONES for prod in m.DR_PRODUCTS - ) * m.bring_timepoint_costs_to_base_year[tp] + for z in m.LOAD_ZONES + for prod in m.DR_PRODUCTS + ) + * m.bring_timepoint_costs_to_base_year[tp] for ts in m.TIMESERIES for tp in m.TPS_IN_TS[ts] ) @@ -402,9 +468,11 @@ def pre_iterate(m): best_bid_benefit = value( sum( ( - - sum(m.dr_bid_benefit[b, z, ts] for z in m.LOAD_ZONES) - * m.tp_duration_hrs[tp] / m.ts_num_tps[ts] - ) * m.bring_timepoint_costs_to_base_year[tp] + -sum(m.dr_bid_benefit[b, z, ts] for z in m.LOAD_ZONES) + * m.tp_duration_hrs[tp] + / m.ts_num_tps[ts] + ) + * m.bring_timepoint_costs_to_base_year[tp] for ts in m.TIMESERIES for tp in m.TPS_IN_TS[ts] ) @@ -425,14 +493,17 @@ def pre_iterate(m): # )) print("") - print('best direct cost: ${:,.0f}'.format(best_direct_cost)) - print('best bid benefit: ${:,.0f}'.format(best_bid_benefit)) + print("best direct cost: ${:,.0f}".format(best_direct_cost)) + print("best bid benefit: ${:,.0f}".format(best_bid_benefit)) print("") - print("lower bound=${:,.0f}, previous cost=${:,.0f}, optimality gap (vs direct cost)={}" \ - .format(best_cost, prev_cost, (prev_cost-best_cost)/abs(prev_direct_cost))) + print( + "lower bound=${:,.0f}, previous cost=${:,.0f}, optimality gap (vs direct cost)={}".format( + best_cost, prev_cost, (prev_cost - best_cost) / abs(prev_direct_cost) + ) + ) if prev_cost < best_cost: - print ( + print( "WARNING: final cost is below reported lower bound; " "there is probably a problem with the demand system." ) @@ -479,28 +550,36 @@ def pre_iterate(m): # TODO: index this to the direct costs, rather than the direct costs minus benefits # as it stands, it converges with about $50,000,000 optimality gap, which is about # 3% of direct costs. - converged = (m.iteration_number > 0 and (prev_cost - best_cost)/abs(prev_direct_cost) <= 0.0001) + converged = ( + m.iteration_number > 0 + and (prev_cost - best_cost) / abs(prev_direct_cost) <= 0.0001 + ) return converged + def post_iterate(m): print("\n\n=======================================================") print("Solved model") print("=======================================================") print("Total cost: ${v:,.0f}".format(v=value(m.SystemCost))) - # TODO: # maybe calculate prices for the next round here and attach them to the # model, so they can be reported as final prices (currently we don't # report the final prices, only the prices prior to the final model run) - SystemCost = value(m.SystemCost) # calculate once to save time + SystemCost = value(m.SystemCost) # calculate once to save time if m.prev_SystemCost is None: - print("prev_SystemCost=, SystemCost={:,.0f}, ratio=".format(SystemCost)) + print( + "prev_SystemCost=, SystemCost={:,.0f}, ratio=".format(SystemCost) + ) else: - print("prev_SystemCost={:,.0f}, SystemCost={:,.0f}, ratio={}" \ - .format(m.prev_SystemCost, SystemCost, SystemCost/m.prev_SystemCost)) + print( + "prev_SystemCost={:,.0f}, SystemCost={:,.0f}, ratio={}".format( + m.prev_SystemCost, SystemCost, SystemCost / m.prev_SystemCost + ) + ) tag = m.options.scenario_name outputs_dir = m.options.outputs_dir @@ -509,52 +588,63 @@ def post_iterate(m): if m.iteration_number == 0: util.create_table( output_file=os.path.join(outputs_dir, "bid_{t}.csv".format(t=tag)), - headings= - ( - "bid_num", "load_zone", "timeseries", "timepoint", - ) + tuple("marginal_cost " + prod for prod in m.DR_PRODUCTS) - + tuple("price " + prod for prod in m.DR_PRODUCTS) - + tuple("bid " + prod for prod in m.DR_PRODUCTS) - + ( - "wtp", "base_price", "base_load" - ) + headings=("bid_num", "load_zone", "timeseries", "timepoint") + + tuple("marginal_cost " + prod for prod in m.DR_PRODUCTS) + + tuple("price " + prod for prod in m.DR_PRODUCTS) + + tuple("bid " + prod for prod in m.DR_PRODUCTS) + + ("wtp", "base_price", "base_load"), ) - b = m.DR_BID_LIST.last() # current bid + b = m.DR_BID_LIST.last() # current bid util.append_table( - m, m.LOAD_ZONES, m.TIMEPOINTS, + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "bid_{t}.csv".format(t=tag)), - values=lambda m, z, tp: - ( - b, - z, - m.tp_ts[tp], - m.tp_timestamp[tp], - ) - + tuple(m.prev_marginal_cost[z, tp, prod] for prod in m.DR_PRODUCTS) - + tuple(m.dr_price[b, z, tp, prod] for prod in m.DR_PRODUCTS) - + tuple(m.dr_bid[b, z, tp, prod] for prod in m.DR_PRODUCTS) - + ( - m.dr_bid_benefit[b, z, m.tp_ts[tp]], - m.base_data_dict[z, tp][1], - m.base_data_dict[z, tp][0], - ) + values=lambda m, z, tp: (b, z, m.tp_ts[tp], m.tp_timestamp[tp]) + + tuple(m.prev_marginal_cost[z, tp, prod] for prod in m.DR_PRODUCTS) + + tuple(m.dr_price[b, z, tp, prod] for prod in m.DR_PRODUCTS) + + tuple(m.dr_bid[b, z, tp, prod] for prod in m.DR_PRODUCTS) + + ( + m.dr_bid_benefit[b, z, m.tp_ts[tp]], + m.base_data_dict[z, tp][1], + m.base_data_dict[z, tp][0], + ), ) # store the current bid weights for future reference if m.iteration_number == 0: util.create_table( output_file=os.path.join(outputs_dir, "bid_weights_{t}.csv".format(t=tag)), - headings=("iteration", "load_zone", "timeseries", "bid_num", "weight") + headings=("iteration", "load_zone", "timeseries", "bid_num", "weight"), ) - util.append_table(m, m.LOAD_ZONES, m.TIMESERIES, m.DR_BID_LIST, + util.append_table( + m, + m.LOAD_ZONES, + m.TIMESERIES, + m.DR_BID_LIST, output_file=os.path.join(outputs_dir, "bid_weights_{t}.csv".format(t=tag)), - values=lambda m, z, ts, b: (len(m.DR_BID_LIST), z, ts, b, m.DRBidWeight[b, z, ts]) + values=lambda m, z, ts, b: ( + len(m.DR_BID_LIST), + z, + ts, + b, + m.DRBidWeight[b, z, ts], + ), ) # if m.iteration_number % 5 == 0: # # save time by only writing results every 5 iterations # write_results(m) + # Stop if there are no duals. This is an efficient point to check, and + # otherwise the errors later are pretty cryptic. + if not m.dual: + raise RuntimeError( + "No dual values have been calculated. Check that your solver is " + "able to provide duals for integer programs. If using cplex, you " + "may need to specify --retrieve-cplex-mip-duals." + ) + write_dual_costs(m) write_results(m) write_batch_results(m) @@ -570,17 +660,25 @@ def update_demand(m): and marginal costs to calibrate the demand system, and then replaces the fixed demand with the flexible demand system. """ - first_run = (m.base_data is None) + first_run = m.base_data is None print("attaching new demand bid to model") if first_run: calibrate_model(m) - else: # not first run + else: # not first run if m.options.verbose: print("m.DRBidWeight:") - pprint([(z, ts, [(b, value(m.DRBidWeight[b, z, ts])) for b in m.DR_BID_LIST]) - for z in m.LOAD_ZONES - for ts in m.TIMESERIES]) + pprint( + [ + ( + z, + ts, + [(b, value(m.DRBidWeight[b, z, ts])) for b in m.DR_BID_LIST], + ) + for z in m.LOAD_ZONES + for ts in m.TIMESERIES + ] + ) # get new bids from the demand system at the current prices bids = get_bids(m) @@ -617,44 +715,57 @@ def total_direct_costs_per_year(m, period): in each zone.) """ return value( - sum(getattr(m, annual_cost)[period] for annual_cost in m.Cost_Components_Per_Period) + sum( + getattr(m, annual_cost)[period] + for annual_cost in m.Cost_Components_Per_Period + ) + sum( getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[period] - for tp_cost in m.Cost_Components_Per_TP - if tp_cost != "DR_Welfare_Cost" + for tp_cost in m.Cost_Components_Per_TP + if tp_cost != "DR_Welfare_Cost" ) ) + def electricity_marginal_cost(m, z, tp, prod): """Return marginal cost of providing product prod in load_zone z during timepoint tp.""" - if prod == 'energy': + if hasattr(m, "zone_balancing_area"): + ba = m.zone_balancing_area[z] + if prod == "energy": component = m.Zone_Energy_Balance[z, tp] - elif prod == 'energy up': - component = m.Satisfy_Spinning_Reserve_Up_Requirement[m.zone_balancing_area[z], tp] - elif prod == 'energy down': - component = m.Satisfy_Spinning_Reserve_Down_Requirement[m.zone_balancing_area[z], tp] + elif prod == "energy up": + if hasattr(m, "Limit_DemandResponseSpinningReserveUp"): + component = m.Limit_DemandResponseSpinningReserveUp[ba, tp] + else: + component = m.Satisfy_Spinning_Reserve_Up_Requirement[ba, tp] + elif prod == "energy down": + if hasattr(m, "Limit_DemandResponseSpinningReserveUp"): + component = m.Limit_DemandResponseSpinningReserveDown[ba, tp] + else: + component = m.Satisfy_Spinning_Reserve_Down_Requirement[ba, tp] else: - raise ValueError('Unrecognized electricity product: {}.'.format(prod)) - return m.dual[component]/m.bring_timepoint_costs_to_base_year[tp] + raise ValueError("Unrecognized electricity product: {}.".format(prod)) + return m.dual[component] / m.bring_timepoint_costs_to_base_year[tp] + def electricity_demand(m, z, tp, prod): """Return total consumption of product prod in load_zone z during timepoint tp (negative if customers supply product).""" - if prod == 'energy': - if len(m.DR_BID_LIST)==0: + if prod == "energy": + if len(m.DR_BID_LIST) == 0: # use zone_demand_mw (base demand) if no bids have been received yet # (needed to find flat prices before solving the model the first time) demand = m.zone_demand_mw[z, tp] else: demand = m.FlexibleDemand[z, tp] - elif prod == 'energy up': + elif prod == "energy up": # note: reserves have positive sign when provided by demand side, # but that should be shown as negative demand - demand = -value(m.DemandUpReserves[z, tp]) - elif prod == 'energy down': - demand = -value(m.DemandDownReserves[z, tp]) + demand = -value(m.DemandUpReserveSales[z, tp]) + elif prod == "energy down": + demand = -value(m.DemandDownReserveSales[z, tp]) else: - raise ValueError('Unrecognized electricity product: {}.'.format(prod)) + raise ValueError("Unrecognized electricity product: {}.".format(prod)) return demand @@ -676,24 +787,30 @@ def calibrate_model(m): # For now, we just assume the base price was $180/MWh, which is HECO's average price in # 2007 according to EIA form 826. # TODO: add in something for the fixed costs, to make marginal cost commensurate with the base_price - #baseCosts = [m.dual[m.EnergyBalance[z, tp]] for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] + # baseCosts = [m.dual[m.EnergyBalance[z, tp]] for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] base_price = 180 # average retail price for 2007 ($/MWh) - m.base_data = [( - z, - ts, - [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], - [base_price] * len(m.TPS_IN_TS[ts]) - ) for z in m.LOAD_ZONES for ts in m.TIMESERIES] + m.base_data = [ + ( + z, + ts, + [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], + [base_price] * len(m.TPS_IN_TS[ts]), + ) + for z in m.LOAD_ZONES + for ts in m.TIMESERIES + ] # make a dict of base_data, indexed by load_zone and timepoint, for later reference m.base_data_dict = { (z, tp): (m.zone_demand_mw[z, tp], base_price) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS } # calibrate the demand module demand_module.calibrate(m, m.base_data) + def get_prices(m, flat_revenue_neutral=True): """Calculate appropriate prices for each day, based on the current state of the model.""" @@ -704,21 +821,26 @@ def get_prices(m, flat_revenue_neutral=True): marginal_costs = { (z, ts): { prod: ( - [m.base_data_dict[z, tp][1] for tp in m.TPS_IN_TS[ts]] if prod == 'energy' - else [0.0]*len(m.TPS_IN_TS[ts]) + [m.base_data_dict[z, tp][1] for tp in m.TPS_IN_TS[ts]] + if prod == "energy" + else [0.0] * len(m.TPS_IN_TS[ts]) ) for prod in m.DR_PRODUCTS } - for z in m.LOAD_ZONES for ts in m.TIMESERIES + for z in m.LOAD_ZONES + for ts in m.TIMESERIES } else: # use marginal costs from last solution marginal_costs = { (z, ts): { - prod: [electricity_marginal_cost(m, z, tp, prod) for tp in m.TPS_IN_TS[ts]] + prod: [ + electricity_marginal_cost(m, z, tp, prod) for tp in m.TPS_IN_TS[ts] + ] for prod in m.DR_PRODUCTS } - for z in m.LOAD_ZONES for ts in m.TIMESERIES + for z in m.LOAD_ZONES + for ts in m.TIMESERIES } if m.options.dr_flat_pricing: @@ -731,6 +853,7 @@ def get_prices(m, flat_revenue_neutral=True): return prices + def get_bids(m): """Get bids from the demand system showing quantities at the current prices and willingness-to-pay for those quantities call bid() with dictionary of prices for different products @@ -746,17 +869,19 @@ def get_bids(m): for z in m.LOAD_ZONES: for ts in m.TIMESERIES: demand, wtp = demand_module.bid(m, z, ts, prices[z, ts]) + # import pdb; pdb.set_trace() if m.options.dr_flat_pricing: # assume demand side will not provide reserves, even if they offered some # (at zero price) for (k, v) in demand.items(): - if k != 'energy': + if k != "energy": for i in range(len(v)): v[i] = 0.0 bids.append((z, ts, prices[z, ts], demand, wtp)) return bids + # def zone_period_average_marginal_cost(m, load_zone, period): # avg_cost = value( # sum( @@ -783,34 +908,34 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): # now selling to the LSE rather than directly to the customers # # LSE iterates in sub-loop (scipy.optimize.newton) to find flat price: - # set price (e.g., simple average of MC or avg weighted by expected demand) - # offer price to demand side - # receive bids - # calc revenue balance for LSE (q*price - q.MC) - # if > 0: decrease price (q will go up across the board) - # if < 0: increase price (q will go down across the board) but + # set price (e.g., simple average of MC or avg weighted by expected demand) + # offer price to demand side + # receive bids + # calc revenue balance for LSE (q*price - q.MC) + # if > 0: decrease price (q will go up across the board) + # if < 0: increase price (q will go down across the board) but flat_prices = dict() for z in m.LOAD_ZONES: for p in m.PERIODS: price_guess = value( sum( - marginal_costs[z, ts]['energy'][i] - * electricity_demand(m, z, tp, 'energy') + marginal_costs[z, ts]["energy"][i] + * electricity_demand(m, z, tp, "energy") * m.tp_weight_in_year[tp] - for ts in m.TS_IN_PERIOD[p] for i, tp in enumerate(m.TPS_IN_TS[ts]) + for ts in m.TS_IN_PERIOD[p] + for i, tp in enumerate(m.TPS_IN_TS[ts]) + ) + / sum( + electricity_demand(m, z, tp, "energy") * m.tp_weight_in_year[tp] + for tp in m.TPS_IN_PERIOD[p] ) - / - sum(electricity_demand(m, z, tp, 'energy') * m.tp_weight_in_year[tp] - for tp in m.TPS_IN_PERIOD[p]) ) if revenue_neutral: # find a flat price that produces revenue equal to marginal costs flat_prices[z, p] = scipy.optimize.newton( - revenue_imbalance, - price_guess, - args=(m, z, p, marginal_costs) + revenue_imbalance, price_guess, args=(m, z, p, marginal_costs) ) else: # used in final round, when LSE is considered to have @@ -820,12 +945,14 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): # construct a collection of flat prices with the right structure final_prices = { - (z, ts): - { - prod: [flat_prices[z, p] if prod=='energy' else 0.0] * len(m.TPS_IN_TS[ts]) - for prod in m.DR_PRODUCTS - } - for z in m.LOAD_ZONES for p in m.PERIODS for ts in m.TS_IN_PERIOD[p] + (z, ts): { + prod: [flat_prices[z, p] if prod == "energy" else 0.0] + * len(m.TPS_IN_TS[ts]) + for prod in m.DR_PRODUCTS + } + for z in m.LOAD_ZONES + for p in m.PERIODS + for ts in m.TS_IN_PERIOD[p] } return final_prices @@ -837,7 +964,7 @@ def revenue_imbalance(flat_price, m, load_zone, period, dynamic_prices): dynamic_price_revenue = 0.0 for ts in m.TS_IN_PERIOD[period]: prices = { - prod: [flat_price if prod=='energy' else 0.0] * len(m.TPS_IN_TS[ts]) + prod: [flat_price if prod == "energy" else 0.0] * len(m.TPS_IN_TS[ts]) for prod in m.DR_PRODUCTS } demand, wtp = demand_module.bid(m, load_zone, ts, prices) @@ -847,15 +974,19 @@ def revenue_imbalance(flat_price, m, load_zone, period, dynamic_prices): # ) flat_price_revenue += flat_price * sum( d * m.ts_duration_of_tp[ts] * m.ts_scale_to_year[ts] - for d in demand['energy'] + for d in demand["energy"] ) dynamic_price_revenue += sum( p * d * m.ts_duration_of_tp[ts] * m.ts_scale_to_year[ts] - for p, d in zip(dynamic_prices[load_zone, ts]['energy'], demand['energy']) + for p, d in zip(dynamic_prices[load_zone, ts]["energy"], demand["energy"]) ) imbalance = dynamic_price_revenue - flat_price_revenue - print("{}, {}: price ${} produces revenue imbalance of ${}/year".format(load_zone, period, flat_price, imbalance)) + print( + "{}, {}: price ${} produces revenue imbalance of ${}/year".format( + load_zone, period, flat_price, imbalance + ) + ) return imbalance @@ -894,11 +1025,18 @@ def add_bids(m, bids): m.DRBidWeight.reconstruct() m.DR_Convex_Bid_Weight.reconstruct() m.DR_Load_Zone_Shared_Bid_Weight.reconstruct() - if hasattr(m, 'DR_Flat_Bid_Weight'): + if hasattr(m, "DR_Flat_Bid_Weight"): m.DR_Flat_Bid_Weight.reconstruct() m.FlexibleDemand.reconstruct() - m.DemandUpReserves.reconstruct() - m.DemandDownReserves.reconstruct() + m.DemandUpReserveSales.reconstruct() + m.DemandDownReserveSales.reconstruct() + if hasattr(m, "DemandResponseSlackUp"): + m.DemandResponseSlackUp.reconstruct() + m.DemandResponseSlackDown.reconstruct() + if hasattr(m, "Limit_DemandResponseSpinningReserveUp"): + m.Limit_DemandResponseSpinningReserveUp.reconstruct() + m.Limit_DemandResponseSpinningReserveDown.reconstruct() + m.DR_Welfare_Cost.reconstruct() # it seems like we have to reconstruct the higher-level components that depend on these # ones (even though these are Expressions), because otherwise they refer to objects that @@ -907,15 +1045,16 @@ def add_bids(m, bids): # (i.e., Energy_Balance refers to the items returned by FlexibleDemand instead of referring # to FlexibleDemand itself) m.Zone_Energy_Balance.reconstruct() - if hasattr(m, 'SpinningReservesUpAvailable'): - m.SpinningReservesUpAvailable.reconstruct() - m.SpinningReservesDownAvailable.reconstruct() + if hasattr(m, "Aggregate_Spinning_Reserve_Details"): + m.Aggregate_Spinning_Reserve_Details.reconstruct() + if hasattr(m, "Satisfy_Spinning_Reserve_Up_Requirement"): m.Satisfy_Spinning_Reserve_Up_Requirement.reconstruct() m.Satisfy_Spinning_Reserve_Down_Requirement.reconstruct() # reconstruct_energy_balance(m) m.SystemCostPerPeriod.reconstruct() m.SystemCost.reconstruct() + def reconstruct_energy_balance(m): """Reconstruct Energy_Balance constraint, preserving dual values (if present).""" # copy the existing Energy_Balance object @@ -929,6 +1068,72 @@ def reconstruct_energy_balance(m): m.dual[m.Zone_Energy_Balance[k]] = m.dual.pop(old_Energy_Balance[k]) +def register_demand_response_reserves(m): + if m.options.demand_response_reserve_types == []: + if hasattr(m, "Spinning_Reserve_Up_Provisions"): + m.options.demand_response_reserve_types == ["spinning"] + else: + m.options.demand_response_reserve_types == ["none"] + + if [rt.lower() for rt in m.options.demand_response_reserve_types] != ["none"]: + # Register with spinning reserves + if not hasattr(m, "Spinning_Reserve_Up_Provisions"): + raise ValueError( + "--demand-response-reserve-types is set to a value other than " + "'none' ({}). This requires that a spinning reserve module be " + "specified in modules.txt.".format( + m.options.demand_response_reserve_types + ) + ) + + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.DR_SPINNING_RESERVE_TYPES = Set( + dimen=1, initialize=m.options.demand_response_reserve_types + ) + m.DemandResponseSpinningReserveUp = Var( + m.DR_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, + ) + m.DemandResponseSpinningReserveDown = Var( + m.DR_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, + ) + # constrain reserve provision within available slack + m.Limit_DemandResponseSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: sum( + m.DemandResponseSpinningReserveUp[rt, ba, tp] + for rt in m.DR_SPINNING_RESERVE_TYPES + ) + <= m.DemandResponseSlackUp[ba, tp], + ) + m.Limit_DemandResponseSpinningReserveDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: sum( + m.DemandResponseSpinningReserveDown[rt, ba, tp] + for rt in m.DR_SPINNING_RESERVE_TYPES + ) + <= m.DemandResponseSlackDown[ba, tp], + ) + m.Spinning_Reserve_Up_Provisions.append("DemandResponseSpinningReserveUp") + m.Spinning_Reserve_Down_Provisions.append( + "DemandResponseSpinningReserveDown" + ) + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.demand_response_reserve_types != ["spinning"]: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append("DemandResponseSlackUp") + m.Spinning_Reserve_Down_Provisions.append("DemandResponseSlackDown") + + def write_batch_results(m): # append results to the batch results file, creating it if needed output_file = os.path.join(m.options.outputs_dir, "demand_response_summary.csv") @@ -943,36 +1148,49 @@ def write_batch_results(m): util.append_table(m, output_file=output_file, values=lambda m: summary_values(m)) + def summary_headers(m): return ( ("tag", "iteration", "total_cost") - +tuple('total_direct_costs_per_year_'+str(p) for p in m.PERIODS) - +tuple('DR_Welfare_Cost_'+str(p) for p in m.PERIODS) - +tuple(prod + ' payment ' + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) - +tuple(prod + ' sold ' + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) + + tuple("total_direct_costs_per_year_" + str(p) for p in m.PERIODS) + + tuple("DR_Welfare_Cost_" + str(p) for p in m.PERIODS) + + tuple( + prod + " payment " + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS + ) + + tuple(prod + " sold " + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) ) + def summary_values(m): demand_components = [ - c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs', 'FlexibleDemand') if hasattr(m, c) + c + for c in ("zone_demand_mw", "ShiftDemand", "ChargeEVs", "FlexibleDemand") + if hasattr(m, c) ] values = [] # tag (configuration) - values.extend([ - m.options.scenario_name, - m.iteration_number, - m.SystemCost # total cost (all periods) - ]) + values.extend( + [ + m.options.scenario_name, + m.iteration_number, + m.SystemCost, # total cost (all periods) + ] + ) # direct costs (including "other") values.extend([total_direct_costs_per_year(m, p) for p in m.PERIODS]) # DR_Welfare_Cost - values.extend([ - sum(m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p]) - for p in m.PERIODS - ]) + values.extend( + [ + sum( + m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) + for p in m.PERIODS + ] + ) # payments by customers ([expected demand] * [price offered for that demand]) # note: this uses the final MC to set the final price, rather than using the @@ -984,41 +1202,53 @@ def summary_values(m): # as the customer payment during iteration 0, since m.dr_price[last_bid, z, tp, prod] # may not be defined yet. last_bid = m.DR_BID_LIST.last() - values.extend([ - sum( - # we assume customers pay final marginal cost, so we don't artificially - # electricity_demand(m, z, tp, prod) * m.dr_price[last_bid, z, tp, prod] * m.tp_weight_in_year[tp] - electricity_demand(m, z, tp, prod) - * electricity_marginal_cost(m, z, tp, prod) - * m.tp_weight_in_year[tp] - for z in m.LOAD_ZONES for tp in m.TPS_IN_PERIOD[p] - ) - for prod in m.DR_PRODUCTS for p in m.PERIODS - ]) + values.extend( + [ + sum( + # we assume customers pay final marginal cost, so we don't artificially + # electricity_demand(m, z, tp, prod) * m.dr_price[last_bid, z, tp, prod] * m.tp_weight_in_year[tp] + electricity_demand(m, z, tp, prod) + * electricity_marginal_cost(m, z, tp, prod) + * m.tp_weight_in_year[tp] + for z in m.LOAD_ZONES + for tp in m.TPS_IN_PERIOD[p] + ) + for prod in m.DR_PRODUCTS + for p in m.PERIODS + ] + ) # import pdb; pdb.set_trace() # total quantities bought (or sold) by customers each year - values.extend([ - sum( - electricity_demand(m, z, tp, prod) * m.tp_weight_in_year[tp] - for z in m.LOAD_ZONES for tp in m.TPS_IN_PERIOD[p] - ) - for prod in m.DR_PRODUCTS for p in m.PERIODS - ]) + values.extend( + [ + sum( + electricity_demand(m, z, tp, prod) * m.tp_weight_in_year[tp] + for z in m.LOAD_ZONES + for tp in m.TPS_IN_PERIOD[p] + ) + for prod in m.DR_PRODUCTS + for p in m.PERIODS + ] + ) return values + def get(component, idx, default): try: return component[idx] except KeyError: return default -def write_results(m): + +def write_results(m, include_iter_num=True): outputs_dir = m.options.outputs_dir - tag = filename_tag(m) + tag = filename_tag(m, include_iter_num) - avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) + avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES)) / len( + m.TIMESERIES + ) last_bid = m.DR_BID_LIST.last() # get final prices that will be charged to customers (not necessarily @@ -1033,10 +1263,12 @@ def write_results(m): for prod in m.DR_PRODUCTS } final_quantities = { - (lz, tp, prod): value(sum( - m.DRBidWeight[b, lz, ts] * m.dr_bid[b, lz, tp, prod] - for b in m.DR_BID_LIST - )) + (lz, tp, prod): value( + sum( + m.DRBidWeight[b, lz, ts] * m.dr_bid[b, lz, tp, prod] + for b in m.DR_BID_LIST + ) + ) for lz in m.LOAD_ZONES for ts in m.TIMESERIES for tp in m.TPS_IN_TS[ts] @@ -1071,50 +1303,55 @@ def write_results(m): # } util.write_table( - m, m.LOAD_ZONES, m.TIMEPOINTS, + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "energy_sources{t}.csv".format(t=tag)), - headings= - ("load_zone", "period", "timepoint_label") - +tuple(m.FUELS) - +tuple(m.NON_FUEL_ENERGY_SOURCES) - +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) - +tuple(m.Zone_Power_Injections) - +tuple(m.Zone_Power_Withdrawals) - +tuple("offered price "+prod for prod in m.DR_PRODUCTS) - +tuple("bid q "+prod for prod in m.DR_PRODUCTS) - +tuple("final mc "+prod for prod in m.DR_PRODUCTS) - +tuple("final price "+prod for prod in m.DR_PRODUCTS) - +tuple("final q "+prod for prod in m.DR_PRODUCTS) - +("peak_day", "base_load", "base_price"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) - +tuple( - sum(DispatchGenByFuel(m, p, t, f) for p in m.GENS_BY_FUEL[f]) - for f in m.FUELS - ) - +tuple( - sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) - for s in m.NON_FUEL_ENERGY_SOURCES - ) - +tuple( - sum( - get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - ) - for s in m.NON_FUEL_ENERGY_SOURCES + headings=("load_zone", "period", "timepoint_label") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + tuple("curtail_" + s for s in m.NON_FUEL_ENERGY_SOURCES) + + tuple(m.Zone_Power_Injections) + + tuple(m.Zone_Power_Withdrawals) + + tuple("offered price " + prod for prod in m.DR_PRODUCTS) + + tuple("bid q " + prod for prod in m.DR_PRODUCTS) + + tuple("final mc " + prod for prod in m.DR_PRODUCTS) + + tuple("final price " + prod for prod in m.DR_PRODUCTS) + + tuple("final q " + prod for prod in m.DR_PRODUCTS) + + ("peak_day", "base_load", "base_price"), + values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) + + tuple( + sum(DispatchGenByFuel(m, p, t, f) for p in m.GENS_BY_FUEL[f]) + for f in m.FUELS + ) + + tuple( + sum( + get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) - +tuple(m.dr_price[last_bid, z, t, prod] for prod in m.DR_PRODUCTS) - +tuple(m.dr_bid[last_bid, z, t, prod] for prod in m.DR_PRODUCTS) - +tuple(electricity_marginal_cost(m, z, t, prod) for prod in m.DR_PRODUCTS) - +tuple(final_prices[z, t, prod] for prod in m.DR_PRODUCTS) - +tuple(final_quantities[z, t, prod] for prod in m.DR_PRODUCTS) - +( - 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < 0.5*avg_ts_scale else 'typical', - m.base_data_dict[z, t][0], - m.base_data_dict[z, t][1], + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( + sum( + get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + + tuple(m.dr_price[last_bid, z, t, prod] for prod in m.DR_PRODUCTS) + + tuple(m.dr_bid[last_bid, z, t, prod] for prod in m.DR_PRODUCTS) + + tuple(electricity_marginal_cost(m, z, t, prod) for prod in m.DR_PRODUCTS) + + tuple(final_prices[z, t, prod] for prod in m.DR_PRODUCTS) + + tuple(final_quantities[z, t, prod] for prod in m.DR_PRODUCTS) + + ( + "peak" + if m.ts_scale_to_year[m.tp_ts[t]] < 0.5 * avg_ts_scale + else "typical", + m.base_data_dict[z, t][0], + m.base_data_dict[z, t][1], + ), ) # import pprint @@ -1122,9 +1359,10 @@ def write_results(m): # bt=set(x[3] for x in b) # technologies # pprint([(t, sum(x[2] for x in b if x[3]==t), sum(x[4] for x in b if x[3]==t)/sum(1.0 for x in b if x[3]==t)) for t in bt]) -def write_dual_costs(m): + +def write_dual_costs(m, include_iter_num=True): outputs_dir = m.options.outputs_dir - tag = filename_tag(m) + tag = filename_tag(m, include_iter_num) # with open(os.path.join(outputs_dir, "producer_surplus{t}.csv".format(t=tag)), 'w') as f: # for g, per in m.Max_Build_Potential: @@ -1142,9 +1380,9 @@ def write_dual_costs(m): outfile = os.path.join(outputs_dir, "dual_costs{t}.csv".format(t=tag)) dual_data = [] start_time = time.time() - print("Writing {} ... ".format(outfile), end=' ') + print("Writing {} ... ".format(outfile), end=" ") - def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): + def add_dual(const, lbound, ubound, duals, prefix="", offset=0.0): if const in duals: dual = duals[const] if dual >= 0.0: @@ -1156,12 +1394,23 @@ def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): if bound is None: # Variable is unbounded; dual should be 0.0 or possibly a tiny non-zero value. if not (-1e-5 < dual < 1e-5): - raise ValueError("{} has no {} bound but has a non-zero dual value {}.".format( - const.name, "lower" if dual > 0 else "upper", dual)) + raise ValueError( + "{} has no {} bound but has a non-zero dual value {}.".format( + const.name, "lower" if dual > 0 else "upper", dual + ) + ) else: total_cost = dual * (bound + offset) if total_cost != 0.0: - dual_data.append((prefix+const.name, direction, (bound+offset), dual, total_cost)) + dual_data.append( + ( + prefix + const.name, + direction, + (bound + offset), + dual, + total_cost, + ) + ) for comp in m.component_objects(ctype=Var): for idx in comp: @@ -1169,7 +1418,7 @@ def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): if var.value is not None: # ignore vars that weren't used in the model if var.is_integer() or var.is_binary(): # integrality constraint sets upper and lower bounds - add_dual(var, value(var), value(var), m.rc, prefix='integer: ') + add_dual(var, value(var), value(var), m.rc, prefix="integer: ") else: add_dual(var, var.lb, var.ub, m.rc) for comp in m.component_objects(ctype=Constraint): @@ -1183,25 +1432,39 @@ def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): standard_constraint = generate_standard_repn(constr.body) if standard_constraint.constant is not None: offset = -standard_constraint.constant - add_dual(constr, value(constr.lower), value(constr.upper), m.dual, offset=offset) + add_dual( + constr, + value(constr.lower), + value(constr.upper), + m.dual, + offset=offset, + ) + + dual_data.sort(key=lambda r: (not r[0].startswith("DR_Convex_"), r[3] >= 0) + r) - dual_data.sort(key=lambda r: (not r[0].startswith('DR_Convex_'), r[3] >= 0)+r) + with open(outfile, "w") as f: + f.write( + ",".join(["constraint", "direction", "bound", "dual", "total_cost"]) + "\n" + ) + f.writelines(",".join(map(str, r)) + "\n" for r in dual_data) + print("time taken: {dur:.2f}s".format(dur=time.time() - start_time)) - with open(outfile, 'w') as f: - f.write(','.join(['constraint', 'direction', 'bound', 'dual', 'total_cost']) + '\n') - f.writelines(','.join(map(str, r)) + '\n' for r in dual_data) - print("time taken: {dur:.2f}s".format(dur=time.time()-start_time)) -def filename_tag(m): +def filename_tag(m, include_iter_num=True): + tag = "" if m.options.scenario_name: - t = m.options.scenario_name + "_" - else: - t = "" - t = t + "_".join(map(str, m.iteration_node)) - if t: - t = "_" + t - return t - -# def post_solve(m, outputs_dir): -# # report the dual costs -# write_dual_costs(m) + tag += "_" + m.options.scenario_name + if include_iter_num: + if m.options.max_iter is None: + n_digits = 4 + else: + n_digits = len(str(m.options.max_iter - 1)) + tag += "".join(f"_{t:0{n_digits}d}" for t in m.iteration_node) + return tag + + +def post_solve(m, outputs_dir): + # report final results, possibly after smoothing, + # and without the iteration number + write_dual_costs(m, include_iter_num=False) + write_results(m, include_iter_num=False) diff --git a/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py b/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py index 47e345e60..b0e094d75 100644 --- a/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py +++ b/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py @@ -1,4 +1,6 @@ from __future__ import division + + def calibrate(base_data, dr_elasticity_scenario=3): """Accept a list of tuples showing [base hourly loads], and [base hourly prices] for each location (load_zone) and date (time_series). Store these for later reference by bid(). @@ -20,6 +22,7 @@ def calibrate(base_data, dr_elasticity_scenario=3): } elasticity_scenario = dr_elasticity_scenario + def bid(load_zone, time_series, prices): """Accept a vector of current prices, for a particular location (load_zone) and day (time_series). Return a tuple showing hourly load levels and willingness to pay for those loads (relative to the @@ -30,7 +33,7 @@ def bid(load_zone, time_series, prices): in total volume, but schedules itself to the cheapest hours (this part is called "shiftable load").""" elasticity = 0.1 - shiftable_share = 0.1 * elasticity_scenario # 1-3 + shiftable_share = 0.1 * elasticity_scenario # 1-3 # convert prices to a numpy vector, and make non-zero # to avoid errors when raising to a negative power @@ -40,10 +43,9 @@ def bid(load_zone, time_series, prices): bl = base_load_dict[load_zone, time_series] bp = base_price_dict[load_zone, time_series] - # spread shiftable load among all minimum-cost hours, # shaped like the original load during those hours (so base prices result in base loads) - mins = (p == np.min(p)) + mins = p == np.min(p) shiftable_load = np.zeros(len(p)) shiftable_load[mins] = bl[mins] * shiftable_share * np.sum(bl) / sum(bl[mins]) @@ -52,12 +54,14 @@ def bid(load_zone, time_series, prices): shiftable_load_wtp = 0 elastic_base_load = (1.0 - shiftable_share) * bl - elastic_load = elastic_base_load * (p/bp) ** (-elasticity) + elastic_load = elastic_base_load * (p / bp) ** (-elasticity) # _relative_ consumer surplus for the elastic load is the integral # of the load (quantity) function from p to bp; note: the hours are independent. # if p < bp, consumer surplus decreases as we move from p to bp, so cs_p - cs_p0 # (given by this integral) is positive. - elastic_load_cs_diff = np.sum((1 - (p/bp)**(1-elasticity)) * bp * elastic_base_load / (1-elasticity)) + elastic_load_cs_diff = np.sum( + (1 - (p / bp) ** (1 - elasticity)) * bp * elastic_base_load / (1 - elasticity) + ) # _relative_ amount actually paid for elastic load under current price, vs base price base_elastic_load_paid = np.sum(bp * elastic_base_load) elastic_load_paid = np.sum(p * elastic_load) diff --git a/switch_model/balancing/demand_response/iterative/r_demand_system.py b/switch_model/balancing/demand_response/iterative/r_demand_system.py index b5a06ce2c..14f22c7d8 100644 --- a/switch_model/balancing/demand_response/iterative/r_demand_system.py +++ b/switch_model/balancing/demand_response/iterative/r_demand_system.py @@ -10,14 +10,24 @@ returned by the python calibrate() function and attached to the model. """ from __future__ import print_function +from switch_model.utilities import unique_list + def define_arguments(argparser): - argparser.add_argument("--dr-elasticity-scenario", type=int, default=3, - help="Choose a scenario of customer elasticity to be used by R script") - argparser.add_argument("--dr-r-script", default=None, + argparser.add_argument( + "--dr-elasticity-scenario", + type=int, + default=3, + help="Choose a scenario of customer elasticity to be used by R script", + ) + argparser.add_argument( + "--dr-r-script", + default=None, help="Name of R script to use for preparing demand response bids. " "Only takes effect when using --dr-demand-module=r_demand_system. " - "This script should provide calibrate() and bid() functions. ") + "This script should provide calibrate() and bid() functions. ", + ) + def define_components(m): # load modules for use later (import is delayed to avoid interfering with unit tests) @@ -25,20 +35,24 @@ def define_components(m): global np import numpy as np except ImportError: - print("="*80) - print("Unable to load numpy package, which is used by the r_demand_system module.") + print("=" * 80) + print( + "Unable to load numpy package, which is used by the r_demand_system module." + ) print("Please install this via 'conda install numpy' or 'pip install numpy'.") - print("="*80) + print("=" * 80) raise try: global rpy2 # not actually needed outside this function import rpy2.robjects import rpy2.robjects.numpy2ri except ImportError: - print("="*80) - print("Unable to load rpy2 package, which is used by the r_demand_system module.") + print("=" * 80) + print( + "Unable to load rpy2 package, which is used by the r_demand_system module." + ) print("Please install this via 'conda install rpy2' or 'pip install rpy2'.") - print("="*80) + print("=" * 80) raise # initialize the R environment global r @@ -61,6 +75,7 @@ def define_components(m): ) r.source(m.options.dr_r_script) + def calibrate(m, base_data): """Accept a list of tuples showing load_zone, time_series, [base hourly loads], [base hourly prices] for each load_zone and time_series (day). Perform any calibration needed in the demand system @@ -68,21 +83,23 @@ def calibrate(m, base_data): Also accept an allocation among different elasticity classes (defined in the R module.) """ base_load_dict = { - (z, ts): base_loads - for (z, ts, base_loads, base_prices) in base_data + (z, ts): base_loads for (z, ts, base_loads, base_prices) in base_data } base_price_dict = { - (z, ts): base_prices - for (z, ts, base_loads, base_prices) in base_data + (z, ts): base_prices for (z, ts, base_loads, base_prices) in base_data } load_zones = unique_list(z for (z, ts, base_loads, base_prices) in base_data) time_series = unique_list(ts for (z, ts, base_loads, base_prices) in base_data) # maybe this should use the hour of day from the model, but this is good enough for now - hours_of_day = list(range(1, 1+len(base_data[0][2]))) + hours_of_day = list(range(1, 1 + len(base_data[0][2]))) # create r arrays of base loads and prices, with indices = (hour of day, time series, load zone) - base_loads = make_r_value_array(base_load_dict, hours_of_day, time_series, load_zones) - base_prices = make_r_value_array(base_price_dict, hours_of_day, time_series, load_zones) + base_loads = make_r_value_array( + base_load_dict, hours_of_day, time_series, load_zones + ) + base_prices = make_r_value_array( + base_price_dict, hours_of_day, time_series, load_zones + ) # calibrate the demand system within R r.calibrate(base_loads, base_prices, m.options.dr_elasticity_scenario) @@ -93,18 +110,19 @@ def bid(m, load_zone, timeseries, prices): Return a tuple showing hourly load levels and willingness to pay for those loads.""" bid = r.bid( - str(load_zone), str(timeseries), - np.array(prices['energy']), - np.array(prices['energy up']), - np.array(prices['energy down']), - m.options.dr_elasticity_scenario + str(load_zone), + str(timeseries), + np.array(prices["energy"]), + np.array(prices["energy up"]), + np.array(prices["energy down"]), + m.options.dr_elasticity_scenario, ) demand = { - 'energy': list(bid[0]), - 'energy up': list(bid[1]), - 'energy down': list(bid[2]), + "energy": list(bid[0]), + "energy up": list(bid[1]), + "energy down": list(bid[2]), } - wtp = bid[3][0] # everything is a vector in R, so we have to take the first element + wtp = bid[3][0] # everything is a vector in R, so we have to take the first element return (demand, wtp) @@ -112,7 +130,7 @@ def bid(m, load_zone, timeseries, prices): def test_calib(): """Test calibration routines with sample data. Results should match r.test_calib().""" base_data = [ - ("oahu", 100, [ 500, 1000, 1500], [0.35, 0.35, 0.35]), + ("oahu", 100, [500, 1000, 1500], [0.35, 0.35, 0.35]), ("oahu", 200, [2000, 2500, 3000], [0.35, 0.35, 0.35]), ("maui", 100, [3500, 4000, 4500], [0.35, 0.35, 0.35]), ("maui", 200, [5000, 5500, 6000], [0.35, 0.35, 0.35]), @@ -121,16 +139,11 @@ def test_calib(): r.print_calib() -def unique_list(seq): - # from http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order - seen = set() - return [x for x in seq if not (x in seen or seen.add(x))] - def make_r_value_array(base_value_dict, hours_of_day, time_series, load_zones): # create a numpy array with indices = (hour of day, time series, load zone) arr = np.array( - [ [base_value_dict[(z, ts)] for ts in time_series] for z in load_zones], - dtype=float + [[base_value_dict[(z, ts)] for ts in time_series] for z in load_zones], + dtype=float, ).transpose() # convert to an r array with dimnames, using R's standard array function # (it might be slightly neater to use rinterface to build r_array entirely @@ -143,7 +156,7 @@ def make_r_value_array(base_value_dict, hours_of_day, time_series, load_zones): dimnames=r.list( np.array(hours_of_day, dtype=str), np.array(time_series, dtype=str), - np.array(load_zones, dtype=str) - ) + np.array(load_zones, dtype=str), + ), ) return r_array diff --git a/switch_model/balancing/demand_response/simple.py b/switch_model/balancing/demand_response/simple.py index 81cf22c42..53dfce201 100644 --- a/switch_model/balancing/demand_response/simple.py +++ b/switch_model/balancing/demand_response/simple.py @@ -1,4 +1,4 @@ -# Copyright 2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. """ @@ -13,8 +13,8 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' -optional_dependencies = 'switch_model.transmission.local_td' +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" +optional_dependencies = "switch_model.transmission.local_td" def define_components(mod): @@ -49,32 +49,35 @@ def define_components(mod): """ mod.dr_shift_down_limit = Param( - mod.LOAD_ZONES, mod.TIMEPOINTS, - default= 0.0, + mod.LOAD_ZONES, + mod.TIMEPOINTS, + default=0.0, within=NonNegativeReals, - validate=lambda m, value, z, t: value <= m.zone_demand_mw[z, t]) + validate=lambda m, value, z, t: value <= m.zone_demand_mw[z, t], + ) mod.dr_shift_up_limit = Param( - mod.LOAD_ZONES, mod.TIMEPOINTS, - default= float('inf'), - within=NonNegativeReals) + mod.LOAD_ZONES, mod.TIMEPOINTS, default=float("inf"), within=NonNegativeReals + ) mod.ShiftDemand = Var( - mod.LOAD_ZONES, mod.TIMEPOINTS, + mod.LOAD_ZONES, + mod.TIMEPOINTS, within=Reals, - bounds=lambda m, z, t: - ( - (-1.0) * m.dr_shift_down_limit[z,t], - m.dr_shift_up_limit[z,t] - )) + bounds=lambda m, z, t: ( + (-1.0) * m.dr_shift_down_limit[z, t], + m.dr_shift_up_limit[z, t], + ), + ) mod.DR_Shift_Net_Zero = Constraint( - mod.LOAD_ZONES, mod.TIMESERIES, - rule=lambda m, z, ts: - sum(m.ShiftDemand[z, t] for t in m.TPS_IN_TS[ts]) == 0.0) + mod.LOAD_ZONES, + mod.TIMESERIES, + rule=lambda m, z, ts: sum(m.ShiftDemand[z, t] for t in m.TPS_IN_TS[ts]) == 0.0, + ) try: - mod.Distributed_Power_Withdrawals.append('ShiftDemand') + mod.Distributed_Power_Withdrawals.append("ShiftDemand") except AttributeError: - mod.Zone_Power_Withdrawals.append('ShiftDemand') + mod.Zone_Power_Withdrawals.append("ShiftDemand") def load_inputs(mod, switch_data, inputs_dir): @@ -89,6 +92,6 @@ def load_inputs(mod, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'dr_data.csv'), - autoselect=True, - param=(mod.dr_shift_down_limit, mod.dr_shift_up_limit)) + filename=os.path.join(inputs_dir, "dr_data.csv"), + param=(mod.dr_shift_down_limit, mod.dr_shift_up_limit), + ) diff --git a/switch_model/balancing/diagnose_infeasibility.py b/switch_model/balancing/diagnose_infeasibility.py new file mode 100755 index 000000000..95520b56b --- /dev/null +++ b/switch_model/balancing/diagnose_infeasibility.py @@ -0,0 +1,228 @@ +""" +Relax constraints to help diagnose data problems in infeasible models + +This module adds relaxation terms to all constraints in the model, which makes +every model feasible. It then minimizes the simple sum of the relaxation +variables (i.e., total violation of all constraints) instead of the normal cost +function. Then it report which constraints were violated and by how much. + +Users can experiment by specifying `--no-relax` for some constraints, to find +out which constraints cannot be met on their own or cannot be met in combination +with other constraints (e.g., if specifying `--no-relax Constraint1` causes +`Constraint2` to be violated instead, then we know these are related. Then if +the model becomes infeasible when specifying `--no-relax Constraint1 +Constraint2`, we know that Constraint1 and Constraint2 cannot be satisfied at +the same time. Users should then look for inconsistencies in the data used for +these two constraints. +""" +from switch_model.utilities import make_iterable +import pyomo.environ as pyo + +relax_var_prefix = "Relax" +relax_var_dir = {1: "up", -1: "down"} + +# TODO: look for a way to do all this from the pre_solve() step and in a more +# documented way. For example, deactivate all the existing constraints +# and create a new constraint list that has the relaxed versions of all of them, +# so we can work with the constructed constraints instead of the rules, and we +# can just check to see which deactivated constraints are violated after solving +# the model. + +# Note: this module mostly doesn't distinguish between indexed and scalar +# constraints, but Pyomo generally presents scalar constraints as having an +# indexing set of [None], which can then be used as an index on the scalar +# variable, so it generally works out OK automatically. + + +def define_arguments(argparser): + argparser.add_argument( + "--no-relax", + nargs="+", + default=[], + action="extend", + help="Names of one or more constraints that should not be relaxed by " + "the {} module. " + "It is often helpful to solve once, observe contraints " + "that are violated, then solve again without relaxing those " + "constraints and observe which other constraints are violated " + "instead. By repeating this process, you can identify a set of " + "constraints that cannot all be satisfied simultaneously. " + "(Note that this module never relaxes bounds on variables.)".format(__name__), + ) + + +def relaxable_constraints(m): + for c in list(m.component_objects(pyo.Constraint)): + if c.name not in m.options.no_relax: + # skip the "--no-relax" constraints + yield c + + +def define_dynamic_components(m): + # loop over an explicit list, otherwise the generator gets altered by the loop + for c in list(relaxable_constraints(m)): + # Define relaxation variables for all indices of this constraint + # in both directions (up or down), so we can handle ==, <= or >= + # constraints. + # These variables are initialized as zero, since many of them will be + # paired with Skip constraints and so never get sent to the solver. + for direction in [1, -1]: + var_name = relax_var_name(c, direction) + relax_var = pyo.Var( + c.index_set(), within=pyo.NonNegativeReals, initialize=0 + ) + setattr(m, var_name, relax_var) + # Make sure the relaxation variable is constructed before the + # constraint but after the constraint's indexing set. (This is why + # we define different relaxation variables for every constraint, + # instead of a single variable with indexes for all constraints.) + move_component_above(relax_var, c) + + # relax the constraint + relax_constraint(c) + m.logger.info(f"relaxed constraint {c.name}") + + +def pre_solve(m): + assign_relaxation_prices(m) + + +def post_solve(m, outputs_dir): + # report any constraints that were violated + unsatisfied_constraints = [] + for constraint in relaxable_constraints(m): + constraint_name = constraint.name + for key, c in constraint.items(): + for direction in [-1, 1]: + # get the matching relaxation variable + relax_var = getattr(m, relax_var_name(constraint, direction)) + val = relax_var[key].value + if val is not None and val >= 1e-9: + # We could use name = c.name here, but for it is slow to + # access constraints later in the model (see + # https://github.com/Pyomo/pyomo/issues/2560). Using repr() + # also gives a more copy-pastable representation of the + # constraint, which can be useful for debugging. + name = constraint_name + if key is not None: + name += repr(list(key)) + unsatisfied_constraints.append([name, direction * val]) + + # We report results using logger.info, so users must set log-level to + # info to see them. This is because these are diagnostic messages, not + # errors, and because it prevents chatter from the test suite. + if unsatisfied_constraints: + for name, val in unsatisfied_constraints: + m.logger.info( + "WARNING: Constraint {} violated by {:.4g} units.".format(name, val) + ) + else: + m.logger.info( + "\nCongratulations, the model is feasible. Please solve again " + "without using the {} module to obtain the optimal solution.\n".format( + __name__ + ) + ) + + +def relax_var_name(constraint, direction): + return "_".join( + [ + relax_var_prefix, + constraint.name, + relax_var_dir[direction], + ] + ) + + +def relax_constraint(c): + def new_rule(m, *idx): + # note: we use getattr(m, c.name) instead of just c, because + # c is an object in the AbstractModel and this rule will be called on + # a concrete instance. + expr = getattr(m, c.name).original_rule(m, *idx) + if expr is not pyo.Constraint.Skip and expr is not pyo.Constraint.Infeasible: + # pyomo provides a .args argument but it is not editable. + # some versions provide ._args and some provide ._args_, so we use + # what is available + a = "_args" if hasattr(expr, "_args") else "_args_" + args = list(getattr(expr, a)) # make mutable + # add up and down relaxation vars to an arbitrary point in the + # inequality (usually works out as high side) + for direction in [1, -1]: + relax_var = getattr(m, relax_var_name(c, direction)) + # next line uses idx if supplied, otherwise treats var as scalar + args[1] += direction * (relax_var[idx] if idx else relax_var) + setattr( + expr, a, type(getattr(expr, a))(args) + ) # convert back to original type + return expr + + # older versions of pyomo store the user's original rule function in the + # `rule` attribute of the constraint, but newer versions (beginning sometime + # between 5.4 and 6.4) convert the rule into a IndexedCallInitializer object. + if hasattr(c.rule, "_fcn"): + c.original_rule = c.rule._fcn + c.rule._fcn = new_rule + else: # older Pyomo + c.original_rule = c.rule + c.rule = new_rule + + +def move_component_above(new_component, old_component): + # move new component above old component within their parent block + block = new_component.parent_block() + if block is not old_component.parent_block(): + raise ValueError( + "Cannot move component {} above {} because they are declared in different blocks.".format( + new_component.name, old_component.name + ) + ) + old_idx = block._decl[old_component.name] + new_idx = block._decl[new_component.name] + if new_idx < old_idx: + # new_component is already above old_component + return + else: + # reorder components + # see https://groups.google.com/d/msg/pyomo-forum/dLbD2ly_hZo/5-INUaECNBkJ + # remove all components from this block + block_components = [c[0] for c in block._decl_order] + # import pdb; pdb.set_trace() + for c in block_components: + if c is not None: + block.del_component(c) + # move the new component above the old one + block_components.insert(old_idx, block_components.pop(new_idx)) + # add components back to the block + for c in block_components: + if c is not None: + block.add_component(c.name, c) + # the code below does the same thing, but seems a little too undocumented + # new_cmp_entry = block._decl_order.pop(new_idx) + # block._decl_order.insert(old_idx, new_cmp_entry) + # # renumber block._decl to match new indexes + # for i in range(old_idx, new_idx+1): + # block._decl[block._decl_order[i][0].name] = i + + +def assign_relaxation_prices(m): + # Assign costs to the constraint relaxation variables + def cost_rule(m): + violations = [] + for constraint in relaxable_constraints(m): + for direction in [1, -1]: + var_name = relax_var_name(constraint, direction) + for key, c in constraint.items(): + var = getattr(m, var_name)[key] # matching relaxation var + violations.append(var) + return sum(violations) + + # note: we create a new objective function that ignores all the normal costs, + # since we are focused only on minimizing constraint violations (possibly to + # zero). Once it is feasible, the model should be re-solved without this + # module to get a real solution. In principle we could use a high + # multiplier on the violations and then add in the standard costs, but that + # is not very useful and makes solutions much slower. + m.Total_Constraint_Relaxations = pyo.Objective(rule=cost_rule, sense=pyo.minimize) + m.Minimize_System_Cost.deactivate() diff --git a/switch_model/balancing/load_zones.py b/switch_model/balancing/load_zones.py index 4e04a255c..5c0356304 100644 --- a/switch_model/balancing/load_zones.py +++ b/switch_model/balancing/load_zones.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -8,8 +8,9 @@ from pyomo.environ import * from switch_model.reporting import write_table -dependencies = 'switch_model.timescales' -optional_dependencies = 'switch_model.transmission.local_td' +dependencies = "switch_model.timescales" +optional_dependencies = "switch_model.transmission.local_td" + def define_dynamic_lists(mod): """ @@ -65,10 +66,10 @@ def define_components(mod): zone_expected_coincident_peak_demand[z,p] is an optional parameter than can be used to externally specify peak load planning requirements in MW. Currently local_td and planning_reserves determine capacity requirements - use zone_expected_coincident_peak_demand as well as load timeseries. Do not - specify this parameter if you wish for the model to endogenously determine - capacity requirements after accounting for both load and Distributed - Energy Resources (DER). + based on zone_expected_coincident_peak_demand as well as load timeseries. Do + not specify this parameter if you wish for the model to endogenously + determine capacity requirements after accounting for both load and + Distributed Energy Resources (DER). Derived parameters: @@ -77,38 +78,39 @@ def define_components(mod): """ - mod.LOAD_ZONES = Set() - mod.ZONE_TIMEPOINTS = Set(dimen=2, + mod.LOAD_ZONES = Set(dimen=1) + mod.ZONE_TIMEPOINTS = Set( + dimen=2, initialize=lambda m: m.LOAD_ZONES * m.TIMEPOINTS, - doc="The cross product of load zones and timepoints, used for indexing.") - mod.zone_demand_mw = Param( - mod.ZONE_TIMEPOINTS, - within=NonNegativeReals) + doc="The cross product of load zones and timepoints, used for indexing.", + ) + mod.zone_demand_mw = Param(mod.ZONE_TIMEPOINTS, within=NonNegativeReals) mod.zone_ccs_distance_km = Param( - mod.LOAD_ZONES, - within=NonNegativeReals, - default=0.0) - mod.zone_dbid = Param( - mod.LOAD_ZONES, - default=lambda m, z: z) - mod.min_data_check('LOAD_ZONES', 'zone_demand_mw') + mod.LOAD_ZONES, within=NonNegativeReals, default=0.0 + ) + mod.zone_dbid = Param(mod.LOAD_ZONES, default=lambda m, z: z, within=Any) + mod.min_data_check("LOAD_ZONES", "zone_demand_mw") try: - mod.Distributed_Power_Withdrawals.append('zone_demand_mw') + mod.Distributed_Power_Withdrawals.append("zone_demand_mw") except AttributeError: - mod.Zone_Power_Withdrawals.append('zone_demand_mw') + mod.Zone_Power_Withdrawals.append("zone_demand_mw") mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS = Set( - dimen=2, within=mod.LOAD_ZONES * mod.PERIODS, - doc="Zone-Period combinations with zone_expected_coincident_peak_demand data.") + dimen=2, + within=mod.LOAD_ZONES * mod.PERIODS, + doc="Zone-Period combinations with zone_expected_coincident_peak_demand data.", + ) mod.zone_expected_coincident_peak_demand = Param( - mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS, - within=NonNegativeReals) + mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS, within=NonNegativeReals + ) mod.zone_total_demand_in_period_mwh = Param( - mod.LOAD_ZONES, mod.PERIODS, + mod.LOAD_ZONES, + mod.PERIODS, within=NonNegativeReals, initialize=lambda m, z, p: ( - sum(m.zone_demand_mw[z, t] * m.tp_weight[t] - for t in m.TPS_IN_PERIOD[p]))) + sum(m.zone_demand_mw[z, t] * m.tp_weight[t] for t in m.TPS_IN_PERIOD[p]) + ), + ) def define_dynamic_components(mod): @@ -129,12 +131,12 @@ def define_dynamic_components(mod): mod.Zone_Energy_Balance = Constraint( mod.ZONE_TIMEPOINTS, rule=lambda m, z, t: ( - sum( - getattr(m, component)[z, t] - for component in m.Zone_Power_Injections - ) == sum( - getattr(m, component)[z, t] - for component in m.Zone_Power_Withdrawals))) + sum(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + == sum( + getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals + ) + ), + ) def load_inputs(mod, switch_data, inputs_dir): @@ -162,20 +164,19 @@ def load_inputs(mod, switch_data, inputs_dir): # column names, be indifferent to column order, and throw an error # message if some columns are not found. switch_data.load_aug( - filename=os.path.join(inputs_dir, 'load_zones.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "load_zones.csv"), index=mod.LOAD_ZONES, - param=(mod.zone_ccs_distance_km, mod.zone_dbid)) + param=(mod.zone_ccs_distance_km, mod.zone_dbid), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'loads.csv'), - auto_select=True, - param=(mod.zone_demand_mw)) + filename=os.path.join(inputs_dir, "loads.csv"), param=(mod.zone_demand_mw) + ) switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'zone_coincident_peak_demand.csv'), + filename=os.path.join(inputs_dir, "zone_coincident_peak_demand.csv"), index=mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS, - select=('LOAD_ZONE', 'PERIOD', 'zone_expected_coincident_peak_demand'), - param=(mod.zone_expected_coincident_peak_demand)) + param=(mod.zone_expected_coincident_peak_demand), + ) def post_solve(instance, outdir): @@ -189,13 +190,15 @@ def post_solve(instance, outdir): """ write_table( - instance, instance.LOAD_ZONES, instance.TIMEPOINTS, + instance, + instance.LOAD_ZONES, + instance.TIMEPOINTS, output_file=os.path.join(outdir, "load_balance.csv"), - headings=("load_zone", "timestamp",) + tuple( - instance.Zone_Power_Injections + - instance.Zone_Power_Withdrawals), - values=lambda m, z, t: (z, m.tp_timestamp[t],) + tuple( + headings=("load_zone", "timestamp") + + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals), + values=lambda m, z, t: (z, m.tp_timestamp[t]) + + tuple( getattr(m, component)[z, t] - for component in ( - m.Zone_Power_Injections + - m.Zone_Power_Withdrawals))) + for component in (m.Zone_Power_Injections + m.Zone_Power_Withdrawals) + ), + ) diff --git a/switch_model/balancing/operating_reserves/areas.py b/switch_model/balancing/operating_reserves/areas.py index 053965fa2..e434d7a22 100644 --- a/switch_model/balancing/operating_reserves/areas.py +++ b/switch_model/balancing/operating_reserves/areas.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -6,8 +6,10 @@ """ import os from pyomo.environ import * +from switch_model.utilities import unique_list + +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' def define_components(mod): """ @@ -30,15 +32,25 @@ def define_components(mod): """ - mod.zone_balancing_area = Param(mod.LOAD_ZONES, default='system_wide_balancing_area') - mod.BALANCING_AREAS = Set(initialize=lambda m: set( - m.zone_balancing_area[z] for z in m.LOAD_ZONES)) + mod.zone_balancing_area = Param( + mod.LOAD_ZONES, default="system_wide_balancing_area", within=Any + ) + mod.BALANCING_AREAS = Set( + dimen=1, + initialize=lambda m: unique_list( + m.zone_balancing_area[z] for z in m.LOAD_ZONES + ), + ) mod.ZONES_IN_BALANCING_AREA = Set( mod.BALANCING_AREAS, + dimen=1, initialize=lambda m, b: ( - z for z in m.LOAD_ZONES if m.zone_balancing_area[z] == b)) + z for z in m.LOAD_ZONES if m.zone_balancing_area[z] == b + ), + ) mod.BALANCING_AREA_TIMEPOINTS = Set( - initialize=mod.BALANCING_AREAS * mod.TIMEPOINTS) + dimen=2, initialize=mod.BALANCING_AREAS * mod.TIMEPOINTS + ) def load_inputs(mod, switch_data, inputs_dir): @@ -54,6 +66,6 @@ def load_inputs(mod, switch_data, inputs_dir): # column names, be indifferent to column order, and throw an error # message if some columns are not found. switch_data.load_aug( - filename=os.path.join(inputs_dir, 'load_zones.csv'), - auto_select=True, - param=(mod.zone_balancing_area)) + filename=os.path.join(inputs_dir, "load_zones.csv"), + param=(mod.zone_balancing_area), + ) diff --git a/switch_model/balancing/operating_reserves/spinning_reserves.py b/switch_model/balancing/operating_reserves/spinning_reserves.py index 0d302ee1d..67e2a1ec1 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ A simple and flexible model of spinning reserves that tracks the state of unit @@ -90,45 +90,57 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', - 'switch_model.balancing.load_zones', - 'switch_model.balancing.operating_reserves.areas', - 'switch_model.financials', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch', - 'switch_model.generators.core.commit.operate', + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.balancing.operating_reserves.areas", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.commit.operate", ) def define_arguments(argparser): group = argparser.add_argument_group(__name__) - group.add_argument('--unit-contingency', default=False, - dest='unit_contingency', action='store_true', - help=("This will enable an n-1 contingency based on a single unit of " - "a generation project falling offline. Note: This create a new " - "binary variable for each project and timepoint that has a " - "proj_unit_size specified.") + group.add_argument( + "--unit-contingency", + default=False, + dest="unit_contingency", + action="store_true", + help=( + "This will enable an n-1 contingency based on a single unit of " + "a generation project falling offline. Note: This create a new " + "binary variable for each project and timepoint that has a " + "proj_unit_size specified." + ), ) - group.add_argument('--project-contingency', default=False, - dest='project_contingency', action='store_true', - help=("This will enable an n-1 contingency based on the entire " - "committed capacity of a generation project falling offline. " - "Unlike unit contingencies, this is a purely linear expression.") + group.add_argument( + "--project-contingency", + default=False, + dest="project_contingency", + action="store_true", + help=( + "This will enable an n-1 contingency based on the entire " + "committed capacity of a generation project falling offline. " + "Unlike unit contingencies, this is a purely linear expression." + ), ) - group.add_argument('--spinning-requirement-rule', default=None, - dest='spinning_requirement_rule', - choices = ["Hawaii", "3+5"], - help=("Choose rules for spinning reserves requirements as a function " - "of variable renewable power and load. Hawaii uses rules " - "bootstrapped from the GE RPS study, and '3+5' requires 3% of " - "load and 5% of variable renewable output, based on the heuristic " - "described in the 2010 Western Wind and Solar Integration Study.") + group.add_argument( + "--spinning-requirement-rule", + default=None, + dest="spinning_requirement_rule", + choices=["Hawaii", "3+5"], + help=( + "Choose rules for spinning reserves requirements as a function " + "of variable renewable power and load. Hawaii uses rules " + "bootstrapped from the GE RPS study, and '3+5' requires 3%% of " + "load and 5%% of variable renewable output, based on the heuristic " + "described in the 2010 Western Wind and Solar Integration Study." + ), ) - - def define_dynamic_lists(m): """ Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements @@ -189,37 +201,46 @@ def gen_unit_contingency(m): # justify the duplication because I don't think discrete unit commitment # should be a prerequisite for this functionality. m.UNIT_CONTINGENCY_DISPATCH_POINTS = Set( + dimen=2, initialize=m.GEN_TPS, - filter=lambda m, g, tp: g in m.DISCRETELY_SIZED_GENS + filter=lambda m, g, tp: g in m.DISCRETELY_SIZED_GENS, ) m.GenIsCommitted = Var( m.UNIT_CONTINGENCY_DISPATCH_POINTS, within=Binary, - doc="Stores the status of unit committment as a binary variable." + doc="Stores the status of unit committment as a binary variable.", ) m.Enforce_GenIsCommitted = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, - rule=lambda m, g, tp: - m.CommitGen[g, tp] <= m.GenIsCommitted[g, tp] * ( - m._gen_max_cap_for_binary_constraints - if g not in m.CAPACITY_LIMITED_GENS - else m.gen_capacity_limit_mw[g] - ) + rule=lambda m, g, tp: m.CommitGen[g, tp] + <= m.GenIsCommitted[g, tp] + * ( + m._gen_max_cap_for_binary_constraints + if g not in m.CAPACITY_LIMITED_GENS + else m.gen_capacity_limit_mw[g] + ), ) m.GenUnitLargestContingency = Var( m.BALANCING_AREA_TIMEPOINTS, - doc="Largest generating unit that could drop offline.") + doc="Largest generating unit that could drop offline.", + ) + def Enforce_GenUnitLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] - return (m.GenUnitLargestContingency[b,t] >= - m.GenIsCommitted[g, t] * m.gen_unit_size[g]) + return ( + m.GenUnitLargestContingency[b, t] + >= m.GenIsCommitted[g, t] * m.gen_unit_size[g] + ) + m.Enforce_GenUnitLargestContingency = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, rule=Enforce_GenUnitLargestContingency_rule, - doc=("Force GenUnitLargestContingency to be at least as big as the " - "maximum unit contingency.") + doc=( + "Force GenUnitLargestContingency to be at least as big as the " + "maximum unit contingency." + ), ) - m.Spinning_Reserve_Contingencies.append('GenUnitLargestContingency') + m.Spinning_Reserve_Contingencies.append("GenUnitLargestContingency") def gen_project_contingency(m): @@ -245,21 +266,28 @@ def gen_project_contingency(m): """ m.GenProjectLargestContingency = Var( m.BALANCING_AREA_TIMEPOINTS, - doc="Largest generating project that could drop offline.") + doc="Largest generating project that could drop offline.", + ) + def Enforce_GenProjectLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] if m.gen_can_provide_spinning_reserves[g]: - return m.GenProjectLargestContingency[b, t] >= \ - m.DispatchGen[g, t] + m.CommitGenSpinningReservesUp[g, t] + return ( + m.GenProjectLargestContingency[b, t] + >= m.DispatchGen[g, t] + m.CommitGenSpinningReservesUp[g, t] + ) else: return m.GenProjectLargestContingency[b, t] >= m.DispatchGen[g, t] + m.Enforce_GenProjectLargestContingency = Constraint( m.GEN_TPS, rule=Enforce_GenProjectLargestContingency_rule, - doc=("Force GenProjectLargestContingency to be at least as big as the " - "maximum generation project contingency.") + doc=( + "Force GenProjectLargestContingency to be at least as big as the " + "maximum generation project contingency." + ), ) - m.Spinning_Reserve_Contingencies.append('GenProjectLargestContingency') + m.Spinning_Reserve_Contingencies.append("GenProjectLargestContingency") def hawaii_spinning_reserve_requirements(m): @@ -273,21 +301,28 @@ def hawaii_spinning_reserve_requirements(m): # fit_renewable_reserves.ipynb ) # TODO: supply these parameters in input files m.var_gen_power_reserve = Param( - m.VARIABLE_GENS, default=1.0, - doc=("Spinning reserves required to back up variable renewable " - "generators, as fraction of potential output.") + m.VARIABLE_GENS, + default=1.0, + within=NonNegativeReals, + doc=( + "Spinning reserves required to back up variable renewable " + "generators, as fraction of potential output." + ), ) + def var_gen_cap_reserve_limit_default(m, g): - if m.gen_energy_source[g] == 'Solar': + if m.gen_energy_source[g] == "Solar": return 0.21288916 - elif m.gen_energy_source[g] == 'Wind': + elif m.gen_energy_source[g] == "Wind": return 0.21624407 else: raise RuntimeError() + m.var_gen_cap_reserve_limit = Param( m.VARIABLE_GENS, default=var_gen_cap_reserve_limit_default, - doc="Maximum spinning reserves required, as fraction of installed capacity" + within=NonNegativeReals, + doc="Maximum spinning reserves required, as fraction of installed capacity", ) m.HawaiiVarGenUpSpinningReserveRequirement = Expression( m.BALANCING_AREA_TIMEPOINTS, @@ -295,25 +330,33 @@ def var_gen_cap_reserve_limit_default(m, g): m.GenCapacityInTP[g, t] * min( m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], - m.var_gen_cap_reserve_limit[g] + m.var_gen_cap_reserve_limit[g], ) for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and b == m.zone_balancing_area[m.gen_load_zone[g]]), - doc="The spinning reserves for backing up variable generation with Hawaii rules." + if (g, t) in m.VARIABLE_GEN_TPS + and b == m.zone_balancing_area[m.gen_load_zone[g]] + ), + doc="The spinning reserves for backing up variable generation with Hawaii rules.", + ) + m.Spinning_Reserve_Up_Requirements.append( + "HawaiiVarGenUpSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') def HawaiiLoadDownSpinningReserveRequirement_rule(m, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.lz_demand_mw - return 0.10 * sum(load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z]) + return 0.10 * sum( + load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z] + ) + m.HawaiiLoadDownSpinningReserveRequirement = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=HawaiiLoadDownSpinningReserveRequirement_rule + m.BALANCING_AREA_TIMEPOINTS, rule=HawaiiLoadDownSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Down_Requirements.append( + "HawaiiLoadDownSpinningReserveRequirement" ) - m.Spinning_Reserve_Down_Requirements.append('HawaiiLoadDownSpinningReserveRequirement') def nrel_3_5_spinning_reserve_requirements(m): @@ -327,22 +370,28 @@ def nrel_3_5_spinning_reserve_requirements(m): be set to WithdrawFromCentralGrid. Otherwise load will be set to lz_demand_mw. """ + def NREL35VarGenSpinningReserveRequirement_rule(m, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.lz_demand_mw - return (0.03 * sum(load[z, t] for z in m.LOAD_ZONES - if b == m.zone_balancing_area[z]) - + 0.05 * sum(m.DispatchGen[g, t] for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and - b == m.zone_balancing_area[m.gen_load_zone[g]])) + return 0.03 * sum( + load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z] + ) + 0.05 * sum( + m.DispatchGen[g, t] + for g in m.VARIABLE_GENS + if (g, t) in m.VARIABLE_GEN_TPS + and b == m.zone_balancing_area[m.gen_load_zone[g]] + ) + m.NREL35VarGenSpinningReserveRequirement = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=NREL35VarGenSpinningReserveRequirement_rule + m.BALANCING_AREA_TIMEPOINTS, rule=NREL35VarGenSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Up_Requirements.append("NREL35VarGenSpinningReserveRequirement") + m.Spinning_Reserve_Down_Requirements.append( + "NREL35VarGenSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('NREL35VarGenSpinningReserveRequirement') - m.Spinning_Reserve_Down_Requirements.append('NREL35VarGenSpinningReserveRequirement') def define_components(m): @@ -365,87 +414,114 @@ def define_components(m): corresponding variable for downward spinning reserves. CommitGenSpinningReservesUp_Limit[(g,t) in SPINNING_RESERVE_GEN_TPS] and - CommitGenSpinningReservesDown_Limit constraint the CommitGenSpinningReserves - variables based on DispatchSlackUp and DispatchSlackDown. + CommitGenSpinningReservesDown_Limit constrain the + CommitGenSpinningReserves variables based on DispatchSlackUp and + DispatchSlackDown (and ChargeStorage, as applicable). CommittedSpinningReserveUp[(b,t) in BALANCING_AREA_TIMEPOINTS] and CommittedSpinningReserveDown are expressions summarizing the CommitGenSpinningReserves variables for generators within each balancing area. + CommitGenSpinningReservesUp and CommitGenSpinningReservesDown are + variables instead of aliases to DispatchSlackUp & DispatchSlackDown + because they may need to take on lower values to reduce the + project-level contigencies, especially when discrete unit commitment is + enabled, and committed capacity may exceed the amount of capacity that + is strictly needed. Having these as variables also flags them for + automatic export in model dumps and tab files, and opens up the + possibility of further customizations like adding variable costs for + spinning reserve provision. + Depending on the configuration parameters unit_contingency, project_contingency and spinning_requirement_rule, other components may be added by other functions which are documented above. """ - m.contingency_safety_factor = Param(default=2.0, - doc=("The spinning reserve requiremet will be set to this value " - "times the maximum contingency. This defaults to 2 to ensure " - "that the largest generator cannot be providing contingency " - "reserves for itself.")) + m.contingency_safety_factor = Param( + default=2.0, + within=NonNegativeReals, + doc=( + "The spinning reserve requiremet will be set to this value " + "times the maximum contingency. This defaults to 2 to ensure " + "that the largest generator cannot be providing contingency " + "reserves for itself." + ), + ) m.gen_can_provide_spinning_reserves = Param( m.GENERATION_PROJECTS, within=Boolean, default=True ) m.SPINNING_RESERVE_GEN_TPS = Set( dimen=2, initialize=m.GEN_TPS, - filter=lambda m, g, t: m.gen_can_provide_spinning_reserves[g]) - # CommitGenSpinningReservesUp and CommitGenSpinningReservesDown are - # variables instead of aliases to DispatchSlackUp & DispatchSlackDown - # because they may need to take on lower values to reduce the - # project-level contigencies, especially when discrete unit commitment is - # enabled, and committed capacity may exceed the amount of capacity that - # is strictly needed. Having these as variables also flags them for - # automatic export in model dumps and tab files, and opens up the - # possibility of further customizations like adding variable costs for - # spinning reserve provision. + filter=lambda m, g, t: m.gen_can_provide_spinning_reserves[g], + ) m.CommitGenSpinningReservesUp = Var( - m.SPINNING_RESERVE_GEN_TPS, - within=NonNegativeReals + m.SPINNING_RESERVE_GEN_TPS, within=NonNegativeReals ) m.CommitGenSpinningReservesDown = Var( + m.SPINNING_RESERVE_GEN_TPS, within=NonNegativeReals + ) + m.CommitGenSpinningReservesSlackUp = Var( m.SPINNING_RESERVE_GEN_TPS, - within=NonNegativeReals + within=NonNegativeReals, + doc="Denotes the upward slack in spinning reserves that could be used " + "for quickstart reserves, or possibly other reserve products.", ) m.CommitGenSpinningReservesUp_Limit = Constraint( m.SPINNING_RESERVE_GEN_TPS, - rule=lambda m, g, t: \ - m.CommitGenSpinningReservesUp[g,t] <= m.DispatchSlackUp[g, t] + rule=lambda m, g, t: ( + m.CommitGenSpinningReservesUp[g, t] + + m.CommitGenSpinningReservesSlackUp[g, t] + == m.DispatchSlackUp[g, t] + + # storage can give more up response by stopping charging + (m.ChargeStorage[g, t] if g in getattr(m, "STORAGE_GENS", []) else 0.0) + ), ) m.CommitGenSpinningReservesDown_Limit = Constraint( m.SPINNING_RESERVE_GEN_TPS, - rule=lambda m, g, t: \ - m.CommitGenSpinningReservesDown[g,t] <= m.DispatchSlackDown[g, t] + rule=lambda m, g, t: m.CommitGenSpinningReservesDown[g, t] + <= m.DispatchSlackDown[g, t] + + # storage could give more down response by raising ChargeStorage + # to the maximum rate + ( + ( + m.DispatchUpperLimit[g, t] * m.gen_store_to_release_ratio[g] + - m.ChargeStorage[g, t] + ) + if g in getattr(m, "STORAGE_GENS", []) + else 0.0 + ), ) # Sum of spinning reserve capacity per balancing area and timepoint.. m.CommittedSpinningReserveUp = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.CommitGenSpinningReservesUp[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b] - for g in m.GENS_IN_ZONE[z] - if (g,t) in m.SPINNING_RESERVE_GEN_TPS - ) + rule=lambda m, b, t: sum( + m.CommitGenSpinningReservesUp[g, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.GENS_IN_ZONE[z] + if (g, t) in m.SPINNING_RESERVE_GEN_TPS + ), ) - m.Spinning_Reserve_Up_Provisions.append('CommittedSpinningReserveUp') + m.Spinning_Reserve_Up_Provisions.append("CommittedSpinningReserveUp") m.CommittedSpinningReserveDown = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.CommitGenSpinningReservesDown[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b] - for g in m.GENS_IN_ZONE[z] - if (g,t) in m.SPINNING_RESERVE_GEN_TPS - ) + rule=lambda m, b, t: sum( + m.CommitGenSpinningReservesDown[g, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.GENS_IN_ZONE[z] + if (g, t) in m.SPINNING_RESERVE_GEN_TPS + ), ) - m.Spinning_Reserve_Down_Provisions.append('CommittedSpinningReserveDown') + m.Spinning_Reserve_Down_Provisions.append("CommittedSpinningReserveDown") if m.options.unit_contingency: gen_unit_contingency(m) if m.options.project_contingency: gen_project_contingency(m) - if m.options.spinning_requirement_rule == 'Hawaii': + if m.options.spinning_requirement_rule == "Hawaii": hawaii_spinning_reserve_requirements(m) - elif m.options.spinning_requirement_rule == '3+5': + elif m.options.spinning_requirement_rule == "3+5": nrel_3_5_spinning_reserve_requirements(m) @@ -475,41 +551,48 @@ def define_dynamic_components(m): """ m.MaximumContingency = Var( m.BALANCING_AREA_TIMEPOINTS, - doc=("Maximum of the registered Spinning_Reserve_Contingencies, after " - "multiplying by contingency_safety_factor.") + doc=( + "Maximum of the registered Spinning_Reserve_Contingencies, after " + "multiplying by contingency_safety_factor." + ), ) m.BALANCING_AREA_TIMEPOINT_CONTINGENCIES = Set( + dimen=3, initialize=m.BALANCING_AREA_TIMEPOINTS * m.Spinning_Reserve_Contingencies, - doc=("The set of spinning reserve contingencies, copied from the " - "dynamic list Spinning_Reserve_Contingencies to simplify the " - "process of defining one constraint per contingency in the list.") + doc=( + "The set of spinning reserve contingencies, copied from the " + "dynamic list Spinning_Reserve_Contingencies to simplify the " + "process of defining one constraint per contingency in the list." + ), ) m.Enforce_MaximumContingency = Constraint( m.BALANCING_AREA_TIMEPOINT_CONTINGENCIES, - rule=lambda m, b, t, contingency: - m.MaximumContingency[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + rule=lambda m, b, t, contingency: m.MaximumContingency[b, t] + >= m.contingency_safety_factor * getattr(m, contingency)[b, t], ) - m.Spinning_Reserve_Up_Requirements.append('MaximumContingency') + m.Spinning_Reserve_Up_Requirements.append("MaximumContingency") m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(getattr(m, requirement)[b,t] - for requirement in m.Spinning_Reserve_Up_Requirements - ) <= - sum(getattr(m, provision)[b,t] - for provision in m.Spinning_Reserve_Up_Provisions - ) + rule=lambda m, b, t: sum( + getattr(m, requirement)[b, t] + for requirement in m.Spinning_Reserve_Up_Requirements + ) + <= sum( + getattr(m, provision)[b, t] + for provision in m.Spinning_Reserve_Up_Provisions + ), ) m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(getattr(m, requirement)[b,t] - for requirement in m.Spinning_Reserve_Down_Requirements - ) <= - sum(getattr(m, provision)[b,t] - for provision in m.Spinning_Reserve_Down_Provisions - ) + rule=lambda m, b, t: sum( + getattr(m, requirement)[b, t] + for requirement in m.Spinning_Reserve_Down_Requirements + ) + <= sum( + getattr(m, provision)[b, t] + for provision in m.Spinning_Reserve_Down_Provisions + ), ) @@ -517,7 +600,7 @@ def load_inputs(m, switch_data, inputs_dir): """ All files & columns are optional. - generation_projects_info.csv + gen_info.csv GENERATION_PROJECTS, ... gen_can_provide_spinning_reserves spinning_reserve_params.csv may override the default value of @@ -525,13 +608,12 @@ def load_inputs(m, switch_data, inputs_dir): header row and one data row. """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'generation_projects_info.csv'), - auto_select=True, - optional_params=['gen_can_provide_spinning_reserves'], - param=(m.gen_can_provide_spinning_reserves) + filename=os.path.join(inputs_dir, "gen_info.csv"), + optional_params=["gen_can_provide_spinning_reserves"], + param=(m.gen_can_provide_spinning_reserves), ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'spinning_reserve_params.csv'), - optional=True, auto_select=True, - param=(m.contingency_safety_factor,) + filename=os.path.join(inputs_dir, "spinning_reserve_params.csv"), + optional=True, + param=(m.contingency_safety_factor,), ) diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py index 30dc6deb1..2bd919d6d 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -1,76 +1,94 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ This is an advanced version of the basic spinning_reserves reserves module, and can be used in place of it (not in addition to). + +Specifically, this module can differentiate spinning reserve products into regulating reserves, contigency reserves, and potentially other reserve types. """ import os from collections import defaultdict from pyomo.environ import * -from switch_model.utilities import iteritems +from switch_model.utilities import iteritems, unique_list dependencies = ( - 'switch_model.timescales', - 'switch_model.balancing.load_zones', - 'switch_model.balancing.operating_reserves.areas', - 'switch_model.financials', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch', - 'switch_model.generators.core.commit.operate', + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.balancing.operating_reserves.areas", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.commit.operate", ) def define_arguments(argparser): group = argparser.add_argument_group(__name__) - group.add_argument('--unit-contingency', default=False, action='store_true', - help=("This will enable an n-1 contingency based on a single unit of " - "a generation project falling offline. Note: This create a new " - "binary variable for each timepoint for each generation project " - "that has a gen_unit_size specified.") - ) - group.add_argument('--project-contingency', default=False, action='store_true', - help=("This will enable an n-1 contingency based on the entire " - "committed capacity of a generation project falling offline. " - "Unlike unit contingencies, this is a purely linear expression.") - ) - group.add_argument('--fixed-contingency', type=float, default=0.0, - help=("Add a fixed generator contingency reserve margin, specified in MW. " - "This can be used alone or in combination with the other " - "contingency options.") - ) - group.add_argument('--spinning-requirement-rule', default=None, - choices = ["Hawaii", "3+5", "none"], - help=("Choose rules for spinning reserves requirements as a function " - "of variable renewable power and load. Hawaii uses rules " - "bootstrapped from the GE RPS study, and '3+5' requires 3%% of " - "load and 5%% of variable renewable output, based on the heuristic " - "described in the 2010 Western Wind and Solar Integration Study. " - "Specify 'none' if applying your own rules instead. " - ) + group.add_argument( + "--unit-contingency", + default=False, + action="store_true", + help=( + "This will enable an n-1 contingency based on a single unit of " + "a generation project falling offline. Note: This create a new " + "binary variable for each timepoint for each generation project " + "that has a gen_unit_size specified." + ), + ) + group.add_argument( + "--project-contingency", + default=False, + action="store_true", + help=( + "This will enable an n-1 contingency based on the entire " + "committed capacity of a generation project falling offline. " + "Unlike unit contingencies, this is a purely linear expression." + ), + ) + group.add_argument( + "--fixed-contingency", + type=float, + default=0.0, + help=( + "Add a fixed generator contingency reserve margin, specified in MW. " + "This can be used alone or in combination with the other " + "contingency options." + ), + ) + group.add_argument( + "--spinning-requirement-rule", + default=None, + choices=["Hawaii", "3+5", "none"], + help=( + "Choose rules for spinning reserves requirements as a function " + "of variable renewable power and load. Hawaii uses rules " + "bootstrapped from the GE RPS study, and '3+5' requires 3%% of " + "load and 5%% of variable renewable output, based on the heuristic " + "described in the 2010 Western Wind and Solar Integration Study. " + "Specify 'none' if applying your own rules instead. " + ), ) # TODO: define these inputs in data files group.add_argument( - '--contingency-reserve-type', dest='contingency_reserve_type', - default='spinning', - help= - "Type of reserves to use to meet the contingency reserve requirements " - "defined for generation projects and sometimes for loss-of-load events " - "(e.g., 'contingency' or 'spinning'); default is 'spinning'." + "--contingency-reserve-type", + dest="contingency_reserve_type", + default="spinning", + help="Type of reserves to use to meet the contingency reserve requirements " + "defined for generation projects and sometimes for loss-of-load events " + "(e.g., 'contingency' or 'spinning'); default is 'spinning'.", ) group.add_argument( - '--regulating-reserve-type', dest='regulating_reserve_type', - default='spinning', - help= - "Type of reserves to use to meet the regulating reserve requirements " - "defined by the spinning requirements rule (e.g., 'spinning' or " - "'regulation'); default is 'spinning'." + "--regulating-reserve-type", + dest="regulating_reserve_type", + default="spinning", + help="Type of reserves to use to meet the regulating reserve requirements " + "defined by the spinning requirements rule (e.g., 'spinning' or " + "'regulation'); default is 'spinning'.", ) - - def define_dynamic_lists(m): """ Spinning_Reserve_Requirements and Spinning_Reserve_Provisions are @@ -121,9 +139,11 @@ def gen_fixed_contingency(m): """ m.GenFixedContingency = Param( m.BALANCING_AREA_TIMEPOINTS, - initialize=lambda m: m.options.fixed_contingency + initialize=lambda m: m.options.fixed_contingency, + within=NonNegativeReals, ) - m.Spinning_Reserve_Up_Contingencies.append('GenFixedContingency') + m.Spinning_Reserve_Up_Contingencies.append("GenFixedContingency") + def gen_unit_contingency(m): """ @@ -161,40 +181,50 @@ def gen_unit_contingency(m): # should be a prerequisite for this functionality. m.UNIT_CONTINGENCY_DISPATCH_POINTS = Set( dimen=2, - initialize=lambda m: - [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] + initialize=lambda m: [ + (g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g] + ], ) m.GenIsCommitted = Var( m.UNIT_CONTINGENCY_DISPATCH_POINTS, within=Binary, - doc="Stores the status of unit committment as a binary variable." + doc="Stores the status of unit committment as a binary variable.", ) m.Enforce_GenIsCommitted = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, - rule=lambda m, g, tp: - m.CommitGen[g, tp] <= m.GenIsCommitted[g, tp] * ( - m._gen_max_cap_for_binary_constraints - if g not in m.CAPACITY_LIMITED_GENS - else m.gen_capacity_limit_mw[g] - ) + rule=lambda m, g, tp: m.CommitGen[g, tp] + <= m.GenIsCommitted[g, tp] + * ( + m._gen_max_cap_for_binary_constraints + if g not in m.CAPACITY_LIMITED_GENS + else m.gen_capacity_limit_mw[g] + ), ) # TODO: would it be faster to add all generator contingencies directly # to Spinning_Reserve_Contingencies instead of introducing this intermediate # variable and constraint? m.GenUnitLargestContingency = Var( - m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, - doc="Largest generating unit that could drop offline.") + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, + doc="Largest generating unit that could drop offline.", + ) + def Enforce_GenUnitLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] - return (m.GenUnitLargestContingency[b,t] >= - m.GenIsCommitted[g, t] * m.gen_unit_size[g]) + return ( + m.GenUnitLargestContingency[b, t] + >= m.GenIsCommitted[g, t] * m.gen_unit_size[g] + ) + m.Enforce_GenUnitLargestContingency = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, rule=Enforce_GenUnitLargestContingency_rule, - doc=("Force GenUnitLargestContingency to be at least as big as the " - "maximum unit contingency.") + doc=( + "Force GenUnitLargestContingency to be at least as big as the " + "maximum unit contingency." + ), ) - m.Spinning_Reserve_Up_Contingencies.append('GenUnitLargestContingency') + m.Spinning_Reserve_Up_Contingencies.append("GenUnitLargestContingency") def gen_project_contingency(m): @@ -220,24 +250,33 @@ def gen_project_contingency(m): """ m.GenProjectLargestContingency = Var( m.BALANCING_AREA_TIMEPOINTS, - doc="Largest generating project that could drop offline.") + doc="Largest generating project that could drop offline.", + ) + def Enforce_GenProjectLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] if g in m.SPINNING_RESERVE_CAPABLE_GENS: total_up_reserves = sum( m.CommitGenSpinningReservesUp[rt, g, t] - for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) - return m.GenProjectLargestContingency[b, t] >= \ - m.DispatchGen[g, t] + total_up_reserves + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g] + ) + return ( + m.GenProjectLargestContingency[b, t] + >= m.DispatchGen[g, t] + total_up_reserves + ) else: return m.GenProjectLargestContingency[b, t] >= m.DispatchGen[g, t] + m.Enforce_GenProjectLargestContingency = Constraint( m.GEN_TPS, rule=Enforce_GenProjectLargestContingency_rule, - doc=("Force GenProjectLargestContingency to be at least as big as the " - "maximum generation project contingency.") + doc=( + "Force GenProjectLargestContingency to be at least as big as the " + "maximum generation project contingency." + ), ) - m.Spinning_Reserve_Up_Contingencies.append('GenProjectLargestContingency') + m.Spinning_Reserve_Up_Contingencies.append("GenProjectLargestContingency") + def hawaii_spinning_reserve_requirements(m): # These parameters were found by regressing the reserve requirements from @@ -253,23 +292,32 @@ def hawaii_spinning_reserve_requirements(m): # (could eventually use some linearized quadratic formulation based # on load, magnitude of renewables and geographic dispersion of renewables) m.var_gen_power_reserve = Param( - m.VARIABLE_GENS, default=1.0, - doc=("Spinning reserves required to back up variable renewable " - "generators, as fraction of potential output.") + m.VARIABLE_GENS, + default=1.0, + doc=( + "Spinning reserves required to back up variable renewable " + "generators, as fraction of potential output." + ), + within=NonNegativeReals, ) + def var_gen_cap_reserve_limit_default(m, g): - if m.gen_energy_source[g] == 'SUN': + if m.gen_energy_source[g] == "SUN": return 0.21288916 - elif m.gen_energy_source[g] == 'WND': + elif m.gen_energy_source[g] == "WND": return 0.21624407 else: raise ValueError( - "Unable to calculate reserve requirement for energy source {}".format(m.gen_energy_source[g]) + "Unable to calculate reserve requirement for energy source {}".format( + m.gen_energy_source[g] + ) ) + m.var_gen_cap_reserve_limit = Param( m.VARIABLE_GENS, default=var_gen_cap_reserve_limit_default, - doc="Maximum spinning reserves required, as fraction of installed capacity" + within=NonNegativeReals, + doc="Maximum spinning reserves required, as fraction of installed capacity", ) m.HawaiiVarGenUpSpinningReserveRequirement = Expression( [m.options.regulating_reserve_type], @@ -278,17 +326,20 @@ def var_gen_cap_reserve_limit_default(m, g): m.GenCapacityInTP[g, t] * min( m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], - m.var_gen_cap_reserve_limit[g] + m.var_gen_cap_reserve_limit[g], ) for z in m.ZONES_IN_BALANCING_AREA[b] for g in m.VARIABLE_GENS_IN_ZONE[z] - if (g, t) in m.VARIABLE_GEN_TPS), - doc="The spinning reserves for backing up variable generation with Hawaii rules." + if (g, t) in m.VARIABLE_GEN_TPS + ), + doc="The spinning reserves for backing up variable generation with Hawaii rules.", + ) + m.Spinning_Reserve_Up_Requirements.append( + "HawaiiVarGenUpSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') # Calculate and register loss-of-load (down) contingencies - if hasattr(m, 'WithdrawFromCentralGrid'): + if hasattr(m, "WithdrawFromCentralGrid"): rule = lambda m, ba, tp: 0.10 * sum( m.WithdrawFromCentralGrid[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] ) @@ -297,10 +348,8 @@ def var_gen_cap_reserve_limit_default(m, g): rule = lambda m, ba, tp: 0.10 * sum( m.zone_demand_mw[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] ) - m.HawaiiLoadDownContingency = Expression( - m.BALANCING_AREA_TIMEPOINTS, rule=rule - ) - m.Spinning_Reserve_Down_Contingencies.append('HawaiiLoadDownContingency') + m.HawaiiLoadDownContingency = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=rule) + m.Spinning_Reserve_Down_Contingencies.append("HawaiiLoadDownContingency") def nrel_3_5_spinning_reserve_requirements(m): @@ -314,26 +363,30 @@ def nrel_3_5_spinning_reserve_requirements(m): be set to WithdrawFromCentralGrid. Otherwise load will be set to zone_demand_mw. """ + def NREL35VarGenSpinningReserveRequirement_rule(m, rt, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.zone_demand_mw - return ( - 0.03 * sum(load[z, t] - for z in m.LOAD_ZONES - if b == m.zone_balancing_area[z]) - + 0.05 * sum(m.DispatchGen[g, t] - for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and - b == m.zone_balancing_area[m.gen_load_zone[g]])) + return 0.03 * sum( + load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z] + ) + 0.05 * sum( + m.DispatchGen[g, t] + for g in m.VARIABLE_GENS + if (g, t) in m.VARIABLE_GEN_TPS + and b == m.zone_balancing_area[m.gen_load_zone[g]] + ) + m.NREL35VarGenSpinningReserveRequirement = Expression( [m.options.regulating_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=NREL35VarGenSpinningReserveRequirement_rule + rule=NREL35VarGenSpinningReserveRequirement_rule, + ) + m.Spinning_Reserve_Up_Requirements.append("NREL35VarGenSpinningReserveRequirement") + m.Spinning_Reserve_Down_Requirements.append( + "NREL35VarGenSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('NREL35VarGenSpinningReserveRequirement') - m.Spinning_Reserve_Down_Requirements.append('NREL35VarGenSpinningReserveRequirement') def define_components(m): @@ -373,26 +426,33 @@ def define_components(m): project_contingency and spinning_requirement_rule, other components may be added by other functions which are documented above. """ - m.contingency_safety_factor = Param(default=1.0, - doc=("The spinning reserve requiremet will be set to this value " - "times the maximum contingency. This defaults to 1 to provide " - "n-1 security for the largest committed generator. ")) + m.contingency_safety_factor = Param( + default=1.0, + within=NonNegativeReals, + doc=( + "The spinning reserve requirement will be set to this value " + "times the maximum contingency. This defaults to 1 to provide " + "n-1 security for the largest committed generator. " + ), + ) m.GEN_SPINNING_RESERVE_TYPES = Set(dimen=2) m.gen_reserve_type_max_share = Param( - m.GEN_SPINNING_RESERVE_TYPES, - within=PercentFraction, - default=1.0 + m.GEN_SPINNING_RESERVE_TYPES, within=PercentFraction, default=1.0 ) # reserve types that are supplied by generation projects # and generation projects that can provide reserves # note: these are also the indexing sets of the above set arrays; maybe that could be used? m.SPINNING_RESERVE_TYPES_FROM_GENS = Set( - initialize=lambda m: set(rt for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES) + dimen=1, + initialize=lambda m: unique_list( + rt for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES + ), ) m.SPINNING_RESERVE_CAPABLE_GENS = Set( - initialize=lambda m: set(g for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES) + dimen=1, + initialize=lambda m: unique_list(g for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES), ) # slice GEN_SPINNING_RESERVE_TYPES both ways for later use @@ -402,56 +462,81 @@ def rule(m): for g, rt in m.GEN_SPINNING_RESERVE_TYPES: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict[g].append(rt) m.GENS_FOR_SPINNING_RESERVE_TYPE_dict[rt].append(g) + m.build_spinning_reserve_indexed_sets = BuildAction(rule=rule) m.SPINNING_RESERVE_TYPES_FOR_GEN = Set( m.SPINNING_RESERVE_CAPABLE_GENS, - rule=lambda m, g: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict.pop(g) + dimen=1, + initialize=lambda m, g: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict.pop(g), ) m.GENS_FOR_SPINNING_RESERVE_TYPE = Set( m.SPINNING_RESERVE_TYPES_FROM_GENS, - rule=lambda m, rt: m.GENS_FOR_SPINNING_RESERVE_TYPE_dict.pop(rt) + dimen=1, + initialize=lambda m, rt: m.GENS_FOR_SPINNING_RESERVE_TYPE_dict.pop(rt), ) # types, generators and timepoints when reserves could be supplied - m.SPINNING_RESERVE_TYPE_GEN_TPS = Set(dimen=3, initialize=lambda m: ( - (rt, g, tp) - for g, rt in m.GEN_SPINNING_RESERVE_TYPES - for tp in m.TPS_FOR_GEN[g] - )) + m.SPINNING_RESERVE_TYPE_GEN_TPS = Set( + dimen=3, + initialize=lambda m: ( + (rt, g, tp) + for g, rt in m.GEN_SPINNING_RESERVE_TYPES + for tp in m.TPS_FOR_GEN[g] + ), + ) # generators and timepoints when reserves could be supplied - m.SPINNING_RESERVE_CAPABLE_GEN_TPS = Set(dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.SPINNING_RESERVE_CAPABLE_GENS - for tp in m.TPS_FOR_GEN[g] - )) + m.SPINNING_RESERVE_CAPABLE_GEN_TPS = Set( + dimen=2, + initialize=lambda m: ( + (g, tp) for g in m.SPINNING_RESERVE_CAPABLE_GENS for tp in m.TPS_FOR_GEN[g] + ), + ) # decide how much of each type of reserves to produce from each generator # during each timepoint - m.CommitGenSpinningReservesUp = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) - m.CommitGenSpinningReservesDown = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) + m.CommitGenSpinningReservesUp = Var( + m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals + ) + m.CommitGenSpinningReservesDown = Var( + m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals + ) + m.CommitGenSpinningReservesSlackUp = Var( + m.SPINNING_RESERVE_CAPABLE_GEN_TPS, + within=NonNegativeReals, + doc="Denotes the upward slack in spinning reserves that could be used " + "for quickstart reserves, or possibly other reserve products.", + ) # constrain reserve provision appropriately m.CommitGenSpinningReservesUp_Limit = Constraint( m.SPINNING_RESERVE_CAPABLE_GEN_TPS, - rule=lambda m, g, tp: - sum(m.CommitGenSpinningReservesUp[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) - <= - m.DispatchSlackUp[g, tp] + rule=lambda m, g, tp: ( + sum( + m.CommitGenSpinningReservesUp[rt, g, tp] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g] + ) + + m.CommitGenSpinningReservesSlackUp[g, tp] + == m.DispatchSlackUp[g, tp] # storage can give more up response by stopping charging - + (m.ChargeStorage[g, tp] if g in getattr(m, 'STORAGE_GENS', []) else 0.0) + + (m.ChargeStorage[g, tp] if g in getattr(m, "STORAGE_GENS", []) else 0.0) + ), ) m.CommitGenSpinningReservesDown_Limit = Constraint( m.SPINNING_RESERVE_CAPABLE_GEN_TPS, - rule=lambda m, g, tp: - sum(m.CommitGenSpinningReservesDown[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) - <= - m.DispatchSlackDown[g, tp] - + ( # storage could give more down response by raising ChargeStorage to the maximum rate - (m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] - m.ChargeStorage[g, tp]) - if g in getattr(m, 'STORAGE_GENS', []) - else 0.0 + rule=lambda m, g, tp: sum( + m.CommitGenSpinningReservesDown[rt, g, tp] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g] + ) + <= m.DispatchSlackDown[g, tp] + + ( # storage could give more down response by raising ChargeStorage to the maximum rate + ( + m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] + - m.ChargeStorage[g, tp] ) + if g in getattr(m, "STORAGE_GENS", []) + else 0.0 + ), ) # Calculate total spinning reserves from generation projects, @@ -467,13 +552,16 @@ def rule(m): up[rt, ba, tp] += m.CommitGenSpinningReservesUp[rt, g, tp] down[rt, ba, tp] += m.CommitGenSpinningReservesDown[rt, g, tp] m.TotalGenSpinningReservesUp = Expression(list(up.keys()), initialize=dict(up)) - m.TotalGenSpinningReservesDown = Expression(list(down.keys()), initialize=dict(down)) + m.TotalGenSpinningReservesDown = Expression( + list(down.keys()), initialize=dict(down) + ) # construct these, so they can be used immediately for c in [m.TotalGenSpinningReservesUp, m.TotalGenSpinningReservesDown]: c.index_set().construct() c.construct() - m.Spinning_Reserve_Up_Provisions.append('TotalGenSpinningReservesUp') - m.Spinning_Reserve_Down_Provisions.append('TotalGenSpinningReservesDown') + m.Spinning_Reserve_Up_Provisions.append("TotalGenSpinningReservesUp") + m.Spinning_Reserve_Down_Provisions.append("TotalGenSpinningReservesDown") + m.TotalGenSpinningReserves_aggregate = BuildAction(rule=rule) # define reserve requirements @@ -483,14 +571,16 @@ def rule(m): gen_unit_contingency(m) if m.options.project_contingency: gen_project_contingency(m) - if m.options.spinning_requirement_rule == 'Hawaii': + if m.options.spinning_requirement_rule == "Hawaii": hawaii_spinning_reserve_requirements(m) - elif m.options.spinning_requirement_rule == '3+5': + elif m.options.spinning_requirement_rule == "3+5": nrel_3_5_spinning_reserve_requirements(m) - elif m.options.spinning_requirement_rule == 'none': - pass # users can turn off the rules and use their own instead + elif m.options.spinning_requirement_rule == "none": + pass # users can turn off the rules and use their own instead else: - raise ValueError('No --spinning-requirement-rule specified on command line; unable to allocate reserves.') + raise ValueError( + "No --spinning-requirement-rule specified on command line; unable to allocate reserves." + ) def define_dynamic_components(m): @@ -522,26 +612,32 @@ def define_dynamic_components(m): # define largest contingencies m.MaximumContingencyUp = Var( - m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, - doc=("Maximum of the registered Spinning_Reserve_Up_Contingencies, after " - "multiplying by contingency_safety_factor.") + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, + doc=( + "Maximum of the registered Spinning_Reserve_Up_Contingencies, after " + "multiplying by contingency_safety_factor." + ), ) m.MaximumContingencyDown = Var( - m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, - doc=("Maximum of the registered Spinning_Reserve_Down_Contingencies, after " - "multiplying by contingency_safety_factor.") + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, + doc=( + "Maximum of the registered Spinning_Reserve_Down_Contingencies, after " + "multiplying by contingency_safety_factor." + ), ) m.Calculate_MaximumContingencyUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - m.Spinning_Reserve_Up_Contingencies, # list of contingency events - rule=lambda m, b, t, contingency: - m.MaximumContingencyUp[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + m.Spinning_Reserve_Up_Contingencies, # list of contingency events + rule=lambda m, b, t, contingency: m.MaximumContingencyUp[b, t] + >= m.contingency_safety_factor * getattr(m, contingency)[b, t], ) m.Calculate_MaximumContingencyDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - m.Spinning_Reserve_Down_Contingencies, # list of contingency events - rule=lambda m, b, t, contingency: - m.MaximumContingencyDown[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + m.Spinning_Reserve_Down_Contingencies, # list of contingency events + rule=lambda m, b, t, contingency: m.MaximumContingencyDown[b, t] + >= m.contingency_safety_factor * getattr(m, contingency)[b, t], ) # create reserve requirements equal to the largest contingencies @@ -549,16 +645,16 @@ def define_dynamic_components(m): m.MaximumContingencyUpRequirement = Expression( [m.options.contingency_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: m.MaximumContingencyUp[ba, tp] + rule=lambda m, rt, ba, tp: m.MaximumContingencyUp[ba, tp], ) m.MaximumContingencyDownRequirement = Expression( [m.options.contingency_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: m.MaximumContingencyDown[ba, tp] + rule=lambda m, rt, ba, tp: m.MaximumContingencyDown[ba, tp], ) - m.Spinning_Reserve_Up_Requirements.append('MaximumContingencyUpRequirement') - m.Spinning_Reserve_Down_Requirements.append('MaximumContingencyDownRequirement') + m.Spinning_Reserve_Up_Requirements.append("MaximumContingencyUpRequirement") + m.Spinning_Reserve_Down_Requirements.append("MaximumContingencyDownRequirement") # aggregate the requirements for each type of reserves during each timepoint def rule(m): @@ -566,38 +662,40 @@ def makedict(m, lst): # lst is the name of a dynamic list from which to aggregate components d = defaultdict(float) for comp in getattr(m, lst): - for key, val in iteritems(getattr(m, comp)): + for key, val in getattr(m, comp).items(): d[key] += val - setattr(m, lst + '_dict', d) - makedict(m, 'Spinning_Reserve_Up_Requirements') - makedict(m, 'Spinning_Reserve_Down_Requirements') - makedict(m, 'Spinning_Reserve_Up_Provisions') - makedict(m, 'Spinning_Reserve_Down_Provisions') + setattr(m, lst + "_dict", d) + + makedict(m, "Spinning_Reserve_Up_Requirements") + makedict(m, "Spinning_Reserve_Down_Requirements") + makedict(m, "Spinning_Reserve_Up_Provisions") + makedict(m, "Spinning_Reserve_Down_Provisions") + m.Aggregate_Spinning_Reserve_Details = BuildAction(rule=rule) m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS = Set( dimen=3, - rule=lambda m: list(m.Spinning_Reserve_Up_Requirements_dict.keys()) + initialize=lambda m: list(m.Spinning_Reserve_Up_Requirements_dict.keys()), ) m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS = Set( dimen=3, - rule=lambda m: list(m.Spinning_Reserve_Down_Requirements_dict.keys()) + initialize=lambda m: list(m.Spinning_Reserve_Down_Requirements_dict.keys()), ) # satisfy all spinning reserve requirements m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: - m.Spinning_Reserve_Up_Provisions_dict.pop((rt, ba, tp), 0.0) - >= - m.Spinning_Reserve_Up_Requirements_dict.pop((rt, ba, tp)) + rule=lambda m, rt, ba, tp: m.Spinning_Reserve_Up_Provisions_dict.pop( + (rt, ba, tp), 0.0 + ) + >= m.Spinning_Reserve_Up_Requirements_dict.pop((rt, ba, tp)), ) m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: - m.Spinning_Reserve_Down_Provisions_dict.pop((rt, ba, tp), 0.0) - >= - m.Spinning_Reserve_Down_Requirements_dict.pop((rt, ba, tp)) + rule=lambda m, rt, ba, tp: m.Spinning_Reserve_Down_Provisions_dict.pop( + (rt, ba, tp), 0.0 + ) + >= m.Spinning_Reserve_Down_Requirements_dict.pop((rt, ba, tp)), ) @@ -612,23 +710,23 @@ def load_inputs(m, switch_data, inputs_dir): contingency_safety_factor. Note that this only contains one header row and one data row. """ - path=os.path.join(inputs_dir, 'generation_projects_reserve_capability.csv') + path = os.path.join(inputs_dir, "generation_projects_reserve_capability.csv") switch_data.load_aug( filename=path, optional=True, - auto_select=True, - optional_params=['gen_reserve_type_max_share]'], + optional_params=["gen_reserve_type_max_share]"], index=m.GEN_SPINNING_RESERVE_TYPES, - param=(m.gen_reserve_type_max_share) + param=(m.gen_reserve_type_max_share), ) if not os.path.isfile(path): - gen_projects = switch_data.data()['GENERATION_PROJECTS'][None] - switch_data.data()['GEN_SPINNING_RESERVE_TYPES'] = {} - switch_data.data()['GEN_SPINNING_RESERVE_TYPES'][None] = \ - [(g, "spinning") for g in gen_projects] + gen_projects = switch_data.data()["GENERATION_PROJECTS"][None] + switch_data.data()["GEN_SPINNING_RESERVE_TYPES"] = {} + switch_data.data()["GEN_SPINNING_RESERVE_TYPES"][None] = [ + (g, "spinning") for g in gen_projects + ] switch_data.load_aug( - filename=os.path.join(inputs_dir, 'spinning_reserve_params.csv'), - optional=True, auto_select=True, - param=(m.contingency_safety_factor,) + filename=os.path.join(inputs_dir, "spinning_reserve_params.csv"), + optional=True, + param=(m.contingency_safety_factor,), ) diff --git a/switch_model/balancing/planning_reserves.py b/switch_model/balancing/planning_reserves.py index 94f6a440d..5c9d2b5df 100644 --- a/switch_model/balancing/planning_reserves.py +++ b/switch_model/balancing/planning_reserves.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ This module defines planning reserves margins to support resource adequacy @@ -50,20 +50,21 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', - 'switch_model.financials', - 'switch_model.balancing.load_zones', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch', + "switch_model.timescales", + "switch_model.financials", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", ) optional_prerequisites = ( - 'switch_model.generators.storage', - 'switch_model.transmission.local_td', - 'switch_model.transmission.transport.build', - 'switch_model.transmission.transport.dispatch', + "switch_model.generators.storage", + "switch_model.transmission.local_td", + "switch_model.transmission.transport.build", + "switch_model.transmission.transport.dispatch", ) + def define_dynamic_lists(model): """ CAPACITY_FOR_RESERVES is a list of model components than can contribute @@ -127,26 +128,29 @@ def define_components(model): will not reflect any DER activities. """ model.PLANNING_RESERVE_REQUIREMENTS = Set( - doc="Areas and times where planning reserve margins are specified." + dimen=1, doc="Areas and times where planning reserve margins are specified." ) model.PRR_ZONES = Set( dimen=2, - doc=("A set of (prr, z) that describes which zones contribute to each " - "Planning Reserve Requirement.") + doc=( + "A set of (prr, z) that describes which zones contribute to each " + "Planning Reserve Requirement." + ), ) model.prr_cap_reserve_margin = Param( - model.PLANNING_RESERVE_REQUIREMENTS, - within=PercentFraction, - default=0.15 + model.PLANNING_RESERVE_REQUIREMENTS, within=PercentFraction, default=0.15 ) model.prr_enforcement_timescale = Param( model.PLANNING_RESERVE_REQUIREMENTS, - default='period_peak_load', - validate=lambda m, value, prr: - value in ('all_timepoints', 'peak_load'), - doc=("Determines whether planning reserve requirements are enforced in " - "each timepoint, or just timepoints with peak load (zone_demand_mw).") + default="peak_load", + within=Any, + validate=lambda m, value, prr: value in {"all_timepoints", "peak_load"}, + doc=( + "Determines whether planning reserve requirements are enforced in " + "each timepoint, or just timepoints with peak load (zone_demand_mw)." + ), ) + def get_peak_timepoints(m, prr): """ Return the set of timepoints with peak load within a planning reserve @@ -154,57 +158,67 @@ def get_peak_timepoints(m, prr): statically (zone_demand_mw), ignoring the impact of all distributed energy resources. """ - peak_timepoint_list = set() + peak_timepoint_list = [] ZONES = [z for (_prr, z) in m.PRR_ZONES if _prr == prr] for p in m.PERIODS: peak_load = 0.0 for t in m.TPS_IN_PERIOD[p]: load = sum(m.zone_demand_mw[z, t] for z in ZONES) - if load > peak_load: + if load >= peak_load: peak_timepoint = t peak_load = load - peak_timepoint_list.add(peak_timepoint) + peak_timepoint_list.append(peak_timepoint) return peak_timepoint_list + def PRR_TIMEPOINTS_init(m): PRR_TIMEPOINTS = [] for prr in m.PLANNING_RESERVE_REQUIREMENTS: - if m.prr_enforcement_timescale[prr] == 'all_timepoints': + if m.prr_enforcement_timescale[prr] == "all_timepoints": PRR_TIMEPOINTS.extend([(prr, t) for t in m.TIMEPOINTS]) - elif m.prr_enforcement_timescale[prr] == 'peak_load': + elif m.prr_enforcement_timescale[prr] == "peak_load": PRR_TIMEPOINTS.extend([(prr, t) for t in get_peak_timepoints(m, prr)]) else: - raise ValueError("prr_enforcement_timescale not recognized: '{}'".format( - m.prr_enforcement_timescale[prr])) + raise ValueError( + "prr_enforcement_timescale not recognized: '{}'".format( + m.prr_enforcement_timescale[prr] + ) + ) return PRR_TIMEPOINTS + model.PRR_TIMEPOINTS = Set( dimen=2, within=model.PLANNING_RESERVE_REQUIREMENTS * model.TIMEPOINTS, initialize=PRR_TIMEPOINTS_init, - doc=("The sparse set of (prr, t) for which planning reserve " - "requirements are enforced.") + doc=( + "The sparse set of (prr, t) for which planning reserve " + "requirements are enforced." + ), ) model.gen_can_provide_cap_reserves = Param( model.GENERATION_PROJECTS, within=Boolean, default=True, - doc="Indicates whether a generator can provide capacity reserves." + doc="Indicates whether a generator can provide capacity reserves.", ) + def gen_capacity_value_default(m, g, t): if not m.gen_can_provide_cap_reserves[g]: return 0.0 elif g in m.VARIABLE_GENS: - return m.gen_max_capacity_factor[g, t] + # This can be > 1 (Ex solar on partly cloudy days). Take a + # conservative approach of capping at 100% of nameplate capacity. + return min(1.0, m.gen_max_capacity_factor[g, t]) else: return 1.0 + model.gen_capacity_value = Param( model.GEN_TPS, within=PercentFraction, default=gen_capacity_value_default, validate=lambda m, value, g, t: ( - value == 0.0 - if not m.gen_can_provide_cap_reserves[g] - else True) + value == 0.0 if not m.gen_can_provide_cap_reserves[g] else True + ), ) def zones_for_prr(m, prr): @@ -219,94 +233,104 @@ def AvailableReserveCapacity_rule(m, prr, t): for g in m.GENS_IN_ZONE[z] if (g, t) in m.GEN_TPS and m.gen_can_provide_cap_reserves[g] ] + STORAGE_GENS = getattr(m, "STORAGE_GENS", set()) for g in GENS: # Storage is only credited with its expected output - # Note: this code appears to have no users, since it references - # DispatchGen, which doesn't exist (should be m.DispatchGen). - if g in getattr(m, 'STORAGE_GENS', set()): - reserve_cap += DispatchGen[g, t] - m.ChargeStorage[g, t] + if g in STORAGE_GENS: + reserve_cap += m.DispatchGen[g, t] - m.ChargeStorage[g, t] # If local_td is included with DER modeling, avoid allocating # distributed generation to central grid capacity because it will # be credited with adjusting load at the distribution node. - elif hasattr(m, 'Distributed_Power_Injections') and m.gen_is_distributed[g]: + elif hasattr(m, "Distributed_Power_Injections") and m.gen_is_distributed[g]: pass else: reserve_cap += m.gen_capacity_value[g, t] * m.GenCapacityInTP[g, t] return reserve_cap + model.AvailableReserveCapacity = Expression( model.PRR_TIMEPOINTS, rule=AvailableReserveCapacity_rule ) - model.CAPACITY_FOR_RESERVES.append('AvailableReserveCapacity') + model.CAPACITY_FOR_RESERVES.append("AvailableReserveCapacity") - if 'TXPowerNet' in model: - model.CAPACITY_FOR_RESERVES.append('TXPowerNet') + if "TXPowerNet" in model: + model.CAPACITY_FOR_RESERVES.append("TXPowerNet") def CapacityRequirements_rule(m, prr, t): ZONES = zones_for_prr(m, prr) - if hasattr(m, 'WithdrawFromCentralGrid'): + if hasattr(m, "WithdrawFromCentralGrid"): return sum( - (1 + m.prr_cap_reserve_margin[prr]) * m.WithdrawFromCentralGrid[z,t] + (1 + m.prr_cap_reserve_margin[prr]) * m.WithdrawFromCentralGrid[z, t] for z in ZONES ) else: return sum( - (1 + m.prr_cap_reserve_margin[prr]) * m.zone_demand_mw[z,t] + (1 + m.prr_cap_reserve_margin[prr]) * m.zone_demand_mw[z, t] for z in ZONES ) + model.CapacityRequirements = Expression( - model.PRR_TIMEPOINTS, - rule=CapacityRequirements_rule + model.PRR_TIMEPOINTS, rule=CapacityRequirements_rule ) - model.REQUIREMENTS_FOR_CAPACITY_RESERVES.append('CapacityRequirements') + model.REQUIREMENTS_FOR_CAPACITY_RESERVES.append("CapacityRequirements") def define_dynamic_components(model): - """ - """ + """ """ model.Enforce_Planning_Reserve_Margin = Constraint( - model.PRR_TIMEPOINTS, rule=lambda m, prr, t: ( - sum(getattr(m, reserve_cap)[prr,t] + model.PRR_TIMEPOINTS, + rule=lambda m, prr, t: ( + sum( + getattr(m, reserve_cap)[prr, t] for reserve_cap in m.CAPACITY_FOR_RESERVES - ) >= sum(getattr(m, cap_requirement)[prr,t] - for cap_requirement in m.REQUIREMENTS_FOR_CAPACITY_RESERVES)), - doc=("Ensures that the sum of CAPACITY_FOR_RESERVES satisfies the sum " - "of REQUIREMENTS_FOR_CAPACITY_RESERVES for each of PRR_TIMEPOINTS.")) + ) + >= sum( + getattr(m, cap_requirement)[prr, t] + for cap_requirement in m.REQUIREMENTS_FOR_CAPACITY_RESERVES + ) + ), + doc=( + "Ensures that the sum of CAPACITY_FOR_RESERVES satisfies the sum " + "of REQUIREMENTS_FOR_CAPACITY_RESERVES for each of PRR_TIMEPOINTS." + ), + ) def load_inputs(model, switch_data, inputs_dir): """ - reserve_capacity_value.csv + Files or columns marked with * are optional. See notes above on default + values. + + reserve_capacity_value.csv* GEN, TIMEPOINT, gen_capacity_value - planning_reserve_requirement_zones.csv - PLANNING_RESERVE_REQUIREMENTS, prr_cap_reserve_margin, prr_enforcement_timescale + planning_reserve_requirements.csv* + PLANNING_RESERVE_REQUIREMENTS, prr_cap_reserve_margin*, prr_enforcement_timescale* - generation_projects_info.csv - ..., gen_can_provide_cap_reserves + gen_info.csv + ..., gen_can_provide_cap_reserves* planning_reserve_requirement_zones.csv PRR, ZONE """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'reserve_capacity_value.csv'), + filename=os.path.join(inputs_dir, "reserve_capacity_value.csv"), optional=True, - auto_select=True, - param=(model.gen_capacity_value) + param=(model.gen_capacity_value), ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'planning_reserve_requirements.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "planning_reserve_requirements.csv"), + optional=True, index=model.PLANNING_RESERVE_REQUIREMENTS, - param=(model.prr_cap_reserve_margin, model.prr_enforcement_timescale) + optional_params=["gen_can_provide_cap_reserves", "prr_enforcement_timescale"], + param=(model.prr_cap_reserve_margin, model.prr_enforcement_timescale), ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'generation_projects_info.csv'), - auto_select=True, - optional_params=['gen_can_provide_cap_reserves'], - param=(model.gen_can_provide_cap_reserves) + filename=os.path.join(inputs_dir, "gen_info.csv"), + optional_params=["gen_can_provide_cap_reserves"], + param=(model.gen_can_provide_cap_reserves), ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'planning_reserve_requirement_zones.csv'), - set=model.PRR_ZONES + filename=os.path.join(inputs_dir, "planning_reserve_requirement_zones.csv"), + set=model.PRR_ZONES, ) diff --git a/switch_model/balancing/unserved_load.py b/switch_model/balancing/unserved_load.py index 723bcbb7e..d06156b54 100644 --- a/switch_model/balancing/unserved_load.py +++ b/switch_model/balancing/unserved_load.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,8 +11,12 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales',\ - 'switch_model.balancing.load_areas', 'switch_model.financials' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_areas", + "switch_model.financials", +) + def define_components(mod): """ @@ -24,26 +28,30 @@ def define_components(mod): load in any load zone. UnservedLoad[z, tp] is a decision variable that describes how much - load in MWh is not supplied in a given load zone, at a given timepoint. + load (MW) is not supplied in a given load zone, at a given timepoint. This + is applied at distribution nodes if available, otherwise at zone-center + nodes. UnservedLoadPenalty[tp] is an expression that summarizes the cost penalties of the load that is left unserved in all load zones at a given timepoint. """ - mod.unserved_load_penalty = Param( - within=NonNegativeReals, - default=500) - mod.UnservedLoad = Var( - mod.LOAD_ZONES, mod.TIMEPOINTS, - within=NonNegativeReals) - mod.Zone_Power_Injections.append('UnservedLoad') + mod.unserved_load_penalty = Param(within=NonNegativeReals, default=500) + mod.UnservedLoad = Var(mod.LOAD_ZONES, mod.TIMEPOINTS, within=NonNegativeReals) + try: + mod.Distributed_Power_Injections.append("UnservedLoad") + except AttributeError: + mod.Zone_Power_Injections.append("UnservedLoad") mod.UnservedLoadPenalty = Expression( mod.TIMEPOINTS, - rule=lambda m, tp: sum(m.UnservedLoad[z, tp] * - m.unserved_load_penalty for z in m.LOAD_ZONES)) - mod.Cost_Components_Per_TP.append('UnservedLoadPenalty') + rule=lambda m, tp: sum( + m.UnservedLoad[z, tp] * m.unserved_load_penalty for z in m.LOAD_ZONES + ), + ) + mod.Cost_Components_Per_TP.append("UnservedLoadPenalty") + def load_inputs(mod, switch_data, inputs_dir): """ @@ -58,7 +66,7 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'lost_load_cost.csv'), - optional=True, auto_select=True, - param=(mod.unserved_load_penalty,) + filename=os.path.join(inputs_dir, "lost_load_cost.csv"), + optional=True, + param=(mod.unserved_load_penalty,), ) diff --git a/switch_model/energy_sources/fuel_costs/markets.py b/switch_model/energy_sources/fuel_costs/markets.py index bd0f02e13..0b7f68841 100644 --- a/switch_model/energy_sources/fuel_costs/markets.py +++ b/switch_model/energy_sources/fuel_costs/markets.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,10 +11,16 @@ import os import csv from pyomo.environ import * +from switch_model.utilities import unique_list + +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.energy_sources.properties.properties',\ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' def define_components(mod): """ @@ -42,7 +48,7 @@ def define_components(mod): ZONE_FUELS is the set of fuels available in load zones. It is specified as set of 2-member tuples of (load_zone, fuel). - zone_rfm[z, f] is the regional fuel market that supplies a a given load + zone_fuel_rfm[z, f] is the regional fuel market that supplies a a given load zone. Regional fuel markets may be referred to as fuel regions for brevity. A regional fuel market could be as small as a single load zone or as large as the entire study region. In general, each fuel @@ -52,7 +58,7 @@ def define_components(mod): that define different regional markets. ZONE_RFMS is the set of all load-zone regional fuel market combinations. - It is the input data from which zone_rfm[z,f] is derived. + It is the input data from which zone_fuel_rfm[z,f] is derived. ZONES_IN_RFM[rfm] is an indexed set that lists the load zones within each regional fuel market. @@ -204,51 +210,67 @@ def define_components(mod): """ - mod.REGIONAL_FUEL_MARKETS = Set() + mod.REGIONAL_FUEL_MARKETS = Set(dimen=1) mod.rfm_fuel = Param(mod.REGIONAL_FUEL_MARKETS, within=mod.FUELS) mod.ZONE_RFMS = Set( - dimen=2, validate=lambda m, z, rfm: ( - rfm in m.REGIONAL_FUEL_MARKETS and z in m.LOAD_ZONES)) + dimen=2, + validate=lambda m, z, rfm: ( + rfm in m.REGIONAL_FUEL_MARKETS and z in m.LOAD_ZONES + ), + ) mod.ZONE_FUELS = Set( - dimen=2, initialize=lambda m: set( - (z, m.rfm_fuel[rfm]) for (z, rfm) in m.ZONE_RFMS)) + dimen=2, initialize=lambda m: [(z, m.rfm_fuel[rfm]) for (z, rfm) in m.ZONE_RFMS] + ) - def zone_rfm_init(m, load_zone, fuel): + def zone_fuel_rfm_init(m, load_zone, fuel): + # find first (only) matching rfm for (z, rfm) in m.ZONE_RFMS: - if(z == load_zone and fuel == m.rfm_fuel[rfm]): + if z == load_zone and fuel == m.rfm_fuel[rfm]: return rfm - mod.zone_rfm = Param( - mod.ZONE_FUELS, within=mod.REGIONAL_FUEL_MARKETS, - initialize=zone_rfm_init) - mod.min_data_check('REGIONAL_FUEL_MARKETS', 'rfm_fuel', 'zone_rfm') + + mod.zone_fuel_rfm = Param( + mod.ZONE_FUELS, within=mod.REGIONAL_FUEL_MARKETS, initialize=zone_fuel_rfm_init + ) + mod.min_data_check("REGIONAL_FUEL_MARKETS", "rfm_fuel", "zone_fuel_rfm") mod.ZONES_IN_RFM = Set( mod.REGIONAL_FUEL_MARKETS, - initialize=lambda m, rfm: set( - z for (z, r) in m.ZONE_RFMS if r == rfm)) + dimen=1, + initialize=lambda m, rfm: unique_list(z for (z, r) in m.ZONE_RFMS if r == rfm), + ) # RFM_SUPPLY_TIERS = [(regional_fuel_market, period, supply_tier_index)...] mod.RFM_SUPPLY_TIERS = Set( - dimen=3, validate=lambda m, r, p, st: ( - r in m.REGIONAL_FUEL_MARKETS and p in m.PERIODS)) - mod.rfm_supply_tier_cost = Param( - mod.RFM_SUPPLY_TIERS, within=Reals) + dimen=3, + validate=lambda m, r, p, st: (r in m.REGIONAL_FUEL_MARKETS and p in m.PERIODS), + ) + mod.rfm_supply_tier_cost = Param(mod.RFM_SUPPLY_TIERS, within=Reals) mod.rfm_supply_tier_limit = Param( - mod.RFM_SUPPLY_TIERS, within=NonNegativeReals, default=float('inf')) + mod.RFM_SUPPLY_TIERS, within=NonNegativeReals, default=float("inf") + ) mod.min_data_check( - 'RFM_SUPPLY_TIERS', 'rfm_supply_tier_cost', 'rfm_supply_tier_limit') + "RFM_SUPPLY_TIERS", "rfm_supply_tier_cost", "rfm_supply_tier_limit" + ) mod.SUPPLY_TIERS_FOR_RFM_PERIOD = Set( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, dimen=3, - initialize=lambda m, rfm, ip: set( - (r, p, st) for (r, p, st) in m.RFM_SUPPLY_TIERS - if r == rfm and p == ip)) + mod.REGIONAL_FUEL_MARKETS, + mod.PERIODS, + dimen=3, + initialize=lambda m, rfm, ip: [ + (r, p, st) for (r, p, st) in m.RFM_SUPPLY_TIERS if r == rfm and p == ip + ], + ) mod.ConsumeFuelTier = Var( mod.RFM_SUPPLY_TIERS, domain=NonNegativeReals, bounds=lambda m, rfm, p, st: ( - 0, (m.rfm_supply_tier_limit[rfm, p, st] - if value(m.rfm_supply_tier_limit[rfm, p, st]) != float('inf') - else None))) + 0, + ( + m.rfm_supply_tier_limit[rfm, p, st] + if value(m.rfm_supply_tier_limit[rfm, p, st]) != float("inf") + else None + ), + ), + ) # The if statement in the upper bound of ConsumeFuelTier is a # work-around for a Pyomo bug in writing a cpxlp problem file for # glpk. Lines 771-774 of pyomo/repn/plugins/cpxlp.py prints '<= inf' @@ -259,90 +281,127 @@ def zone_rfm_init(m, load_zone, fuel): # solvers is: 0, m.rfm_supply_tier_limit[rfm, p, st])) mod.FuelConsumptionInMarket = Expression( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, + mod.REGIONAL_FUEL_MARKETS, + mod.PERIODS, rule=lambda m, rfm, p: sum( m.ConsumeFuelTier[rfm_supply_tier] - for rfm_supply_tier in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p])) + for rfm_supply_tier in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p] + ), + ) # Ensure that adjusted fuel costs of unbounded supply tiers are not # negative because that would create an unbounded optimization # problem. def zone_fuel_cost_adder_validate(model, val, z, fuel, p): - rfm = model.zone_rfm[z, fuel] + rfm = model.zone_fuel_rfm[z, fuel] for rfm_supply_tier in model.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p]: - if(val + model.rfm_supply_tier_cost[rfm_supply_tier] < 0 and - model.rfm_supply_tier_limit[rfm_supply_tier] == float('inf')): + if val + model.rfm_supply_tier_cost[ + rfm_supply_tier + ] < 0 and model.rfm_supply_tier_limit[rfm_supply_tier] == float("inf"): return False return True + mod.zone_fuel_cost_adder = Param( - mod.ZONE_FUELS, mod.PERIODS, - within=Reals, default=0, validate=zone_fuel_cost_adder_validate) + mod.ZONE_FUELS, + mod.PERIODS, + within=Reals, + default=0, + validate=zone_fuel_cost_adder_validate, + ) # Summarize annual fuel costs for the objective function def rfm_annual_costs(m, rfm, p): return sum( m.ConsumeFuelTier[rfm_st] * m.rfm_supply_tier_cost[rfm_st] - for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p]) + for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p] + ) + mod.FuelCostsPerPeriod = Expression( mod.PERIODS, rule=lambda m, p: sum( - rfm_annual_costs(m, rfm, p) - for rfm in m.REGIONAL_FUEL_MARKETS)) - mod.Cost_Components_Per_Period.append('FuelCostsPerPeriod') + rfm_annual_costs(m, rfm, p) for rfm in m.REGIONAL_FUEL_MARKETS + ), + ) + mod.Cost_Components_Per_Period.append("FuelCostsPerPeriod") # Components to link aggregate fuel consumption from project # dispatch into market framework def GENS_FOR_RFM_PERIOD_rule(m, rfm, p): - # Construct and cache a set of gens for each zone/fuel/period, then - # return lists of gens for each rfm/period as needed + # Construct and cache list of gens for each rfm/period, then return them + # as needed try: d = m.GENS_FOR_RFM_PERIOD_dict except AttributeError: d = m.GENS_FOR_RFM_PERIOD_dict = dict() - # d uses (zone, fuel, period) as key; could use (rfm, period) as key - # if m.zone_fuel_rfm (back-lookup) existed for g in m.FUEL_BASED_GENS: for f in m.FUELS_FOR_GEN[g]: - for p_ in m.PERIODS_FOR_GEN[g]: - d.setdefault((m.gen_load_zone[g], f, p_), []).append(g) - relevant_gens = [ - g - for z in m.ZONES_IN_RFM[rfm] - for g in d.pop((z, m.rfm_fuel[rfm], p), []) # pop releases memory - ] - return relevant_gens + try: + _rfm = m.zone_fuel_rfm[m.gen_load_zone[g], f] + except KeyError: # no rfm provides this fuel + pass + else: + for _p in m.PERIODS_FOR_GEN[g]: + d.setdefault((_rfm, _p), []).append(g) + return d.pop((rfm, p), []) # pop releases memory + mod.GENS_FOR_RFM_PERIOD = Set( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, - initialize=GENS_FOR_RFM_PERIOD_rule + mod.REGIONAL_FUEL_MARKETS, + mod.PERIODS, + dimen=1, + initialize=GENS_FOR_RFM_PERIOD_rule, ) + def Enforce_Fuel_Consumption_rule(m, rfm, p): return m.FuelConsumptionInMarket[rfm, p] == sum( m.GenFuelUseRate[g, t, m.rfm_fuel[rfm]] * m.tp_weight_in_year[t] for g in m.GENS_FOR_RFM_PERIOD[rfm, p] for t in m.TPS_IN_PERIOD[p] - ) + ) + mod.Enforce_Fuel_Consumption = Constraint( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, - rule=Enforce_Fuel_Consumption_rule) + mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, rule=Enforce_Fuel_Consumption_rule + ) mod.GEN_TP_FUELS_UNAVAILABLE = Set( + dimen=3, initialize=mod.GEN_TP_FUELS, - filter=lambda m, g, t, f: \ - (m.gen_load_zone[g], f) not in m.ZONE_FUELS) + filter=lambda m, g, t, f: (m.gen_load_zone[g], f) not in m.ZONE_FUELS, + ) mod.Enforce_Fuel_Unavailability = Constraint( mod.GEN_TP_FUELS_UNAVAILABLE, - rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0) - + rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0, + ) - # Calculate average fuel costs to allow post-optimization inspection - # and cost allocation. mod.AverageFuelCosts = Expression( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, + mod.REGIONAL_FUEL_MARKETS, + mod.PERIODS, + doc="Average fuel costs to allow post-optimization inspection " + "and cost allocation.", rule=lambda m, rfm, p: ( - rfm_annual_costs(m, rfm, p) / - sum(m.ConsumeFuelTier[rfm_st] - for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p]))) + rfm_annual_costs(m, rfm, p) + / ( + # Avoid divide-by-zero errors if no fuel is consumed. + m.FuelConsumptionInMarket[rfm, p] + + 0.0001 + ) + ), + ) + + def GenFuelCosts_rule(m, g, t, f): + try: + rfm = m.zone_fuel_rfm[m.gen_load_zone[g], f] + except KeyError: # Fuel is unavailable + return 0.0 + p = m.tp_period[t] + return m.GenFuelUseRate[g, t, f] * m.AverageFuelCosts[rfm, p] + + mod.GenFuelCosts = Expression( + mod.GEN_TP_FUELS, + doc="Average cost of fuel consumption, $/hr for post-optimization " + "reporting.", + rule=GenFuelCosts_rule, + ) def load_inputs(mod, switch_data, inputs_dir): @@ -383,91 +442,120 @@ def load_inputs(mod, switch_data, inputs_dir): # message if some columns are not found. switch_data.load_aug( - filename=os.path.join(inputs_dir, 'regional_fuel_markets.csv'), - select=('regional_fuel_market', 'fuel'), + filename=os.path.join(inputs_dir, "regional_fuel_markets.csv"), + select=("regional_fuel_market", "fuel"), index=mod.REGIONAL_FUEL_MARKETS, - param=(mod.rfm_fuel)) + param=(mod.rfm_fuel), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'fuel_supply_curves.csv'), - select=('regional_fuel_market', 'period', 'tier', 'unit_cost', - 'max_avail_at_cost'), + filename=os.path.join(inputs_dir, "fuel_supply_curves.csv"), + select=( + "regional_fuel_market", + "period", + "tier", + "unit_cost", + "max_avail_at_cost", + ), index=mod.RFM_SUPPLY_TIERS, - param=(mod.rfm_supply_tier_cost, mod.rfm_supply_tier_limit)) + param=(mod.rfm_supply_tier_cost, mod.rfm_supply_tier_limit), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'zone_to_regional_fuel_market.csv'), - set=mod.ZONE_RFMS) + filename=os.path.join(inputs_dir, "zone_to_regional_fuel_market.csv"), + set=mod.ZONE_RFMS, + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'zone_fuel_cost_diff.csv'), + filename=os.path.join(inputs_dir, "zone_fuel_cost_diff.csv"), optional=True, - select=('load_zone', 'fuel', 'period', 'fuel_cost_adder'), - param=(mod.zone_fuel_cost_adder)) + select=("load_zone", "fuel", "period", "fuel_cost_adder"), + param=(mod.zone_fuel_cost_adder), + ) # Load a simple specifications of costs if the file exists. The # actual loading, error checking, and casting into a supply curve is # slightly complicated, so I moved that logic to a separate function. - path = os.path.join(inputs_dir, 'fuel_cost.csv') + path = os.path.join(inputs_dir, "fuel_cost.csv") if os.path.isfile(path): _load_simple_cost_data(mod, switch_data, path) def _load_simple_cost_data(mod, switch_data, path): - with open(path, 'r') as simple_cost_file: - simple_cost_dat = list(csv.DictReader(simple_cost_file, delimiter=',')) + with open(path, "r") as simple_cost_file: + simple_cost_dat = list(csv.DictReader(simple_cost_file, delimiter=",")) # Scan once for error checking for row in simple_cost_dat: - z = row['load_zone'] - f = row['fuel'] - p = int(row['period']) - f_cost = float(row['fuel_cost']) + z = row["load_zone"] + f = row["fuel"] + p = int(row["period"]) + f_cost = float(row["fuel_cost"]) # Basic data validity checks - if z not in switch_data.data(name='LOAD_ZONES'): + if z not in switch_data.data(name="LOAD_ZONES"): raise ValueError( - "Load zone " + z + " in zone_simple_fuel_cost.csv is not " + - "a known load zone from load_zones.csv.") - if f not in switch_data.data(name='FUELS'): + "Load zone " + + z + + " in zone_simple_fuel_cost.csv is not " + + "a known load zone from load_zones.csv." + ) + if f not in switch_data.data(name="FUELS"): raise ValueError( - "Fuel " + f + " in zone_simple_fuel_cost.csv is not " + - "a known fuel from fuels.csv.") - if p not in switch_data.data(name='PERIODS'): + "Fuel " + + f + + " in zone_simple_fuel_cost.csv is not " + + "a known fuel from fuels.csv." + ) + if p not in switch_data.data(name="PERIODS"): raise ValueError( - "Period " + p + " in zone_simple_fuel_cost.csv is not " + - "a known investment period.") + "Period " + + p + + " in zone_simple_fuel_cost.csv is not " + + "a known investment period." + ) # Make sure they aren't overriding a supply curve or # regional fuel market defined in previous files. - for (z, rfm) in switch_data.data(name='ZONE_RFMS'): - if(z == z and - switch_data.data(name='rfm_fuel')[rfm] == f): + for (z, rfm) in switch_data.data(name="ZONE_RFMS"): + if z == z and switch_data.data(name="rfm_fuel")[rfm] == f: raise ValueError( - "The supply for fuel '" + f + "' for load_zone '" + z + - "' was already registered with the regional fuel " + - "market '" + rfm + "', so you cannot " + - "specify a simple fuel cost for it in " + - "zone_simple_fuel_cost.csv. You either need to delete " + - "that entry from zone_to_regional_fuel_market.csv, or " + - "remove those entries in zone_simple_fuel_cost.csv.") + "The supply for fuel '" + + f + + "' for load_zone '" + + z + + "' was already registered with the regional fuel " + + "market '" + + rfm + + "', so you cannot " + + "specify a simple fuel cost for it in " + + "zone_simple_fuel_cost.csv. You either need to delete " + + "that entry from zone_to_regional_fuel_market.csv, or " + + "remove those entries in zone_simple_fuel_cost.csv." + ) # Make a new single-load zone regional fuel market. rfm = z + "_" + f - if rfm in switch_data.data(name='REGIONAL_FUEL_MARKETS'): + if rfm in switch_data.data(name="REGIONAL_FUEL_MARKETS"): raise ValueError( - "Trying to construct a simple Regional Fuel Market " + - "called " + rfm + " from data in zone_simple_fuel_cost.csv" + - ", but an RFM of that name already exists. Bailing out!") + "Trying to construct a simple Regional Fuel Market " + + "called " + + rfm + + " from data in zone_simple_fuel_cost.csv" + + ", but an RFM of that name already exists. Bailing out!" + ) # Scan again and actually import the data for row in simple_cost_dat: - z = row['load_zone'] - f = row['fuel'] - p = int(row['period']) - f_cost = float(row['fuel_cost']) + z = row["load_zone"] + f = row["fuel"] + p = int(row["period"]) + f_cost = float(row["fuel_cost"]) # Make a new single-load zone regional fuel market unless we # already defined one in this loop for a different period. rfm = z + "_" + f - if(rfm not in switch_data.data(name='REGIONAL_FUEL_MARKETS')): - switch_data.data(name='REGIONAL_FUEL_MARKETS').append(rfm) - switch_data.data(name='rfm_fuel')[rfm] = f - switch_data.data(name='ZONE_RFMS').append((z, rfm)) + if rfm not in switch_data.data(name="REGIONAL_FUEL_MARKETS"): + switch_data.data(name="REGIONAL_FUEL_MARKETS").append(rfm) + switch_data.data(name="rfm_fuel")[rfm] = f + switch_data.data(name="ZONE_RFMS").append((z, rfm)) # Make a single supply tier for this RFM and period st = 0 - switch_data.data(name='RFM_SUPPLY_TIERS').append((rfm, p, st)) - switch_data.data(name='rfm_supply_tier_cost')[rfm, p, st] = f_cost - switch_data.data(name='rfm_supply_tier_limit')[rfm, p, st] = \ - float('inf') + switch_data.data(name="RFM_SUPPLY_TIERS").append((rfm, p, st)) + switch_data.data(name="rfm_supply_tier_cost")[rfm, p, st] = f_cost + # No need to specify an upper limit, since default is infinity + # (and this creates inf values in the data portal that could get + # written out to .dat files for PySP, which would be unable to read + # those values in Pyomo 5.7+) + # switch_data.data(name="rfm_supply_tier_limit")[rfm, p, st] = float("inf") diff --git a/switch_model/energy_sources/fuel_costs/markets_expansion.py b/switch_model/energy_sources/fuel_costs/markets_expansion.py new file mode 100644 index 000000000..a039c08e0 --- /dev/null +++ b/switch_model/energy_sources/fuel_costs/markets_expansion.py @@ -0,0 +1,186 @@ +""" +Defines model components to allow capital investment to expand fuel markets. +""" + +# TODO: eventually this should be extended to use capital costs instead of fixed +# per-unit costs (probably as part of a generalized asset tracking system). + +# TODO: create indexing set for fuel markets expansion that only refers to +# limited-capacity tiers, then only define the variables and constraints over +# that set. This will simplify the code some -- no need to force activation of +# unlimited tiers. We could go further and only consider the tiers with prices. +# But covering all the limited tiers may make it easier to interpret the output +# files, and also allows users to add side constraints (e.g., some other side +# effect happens in the year a tier is built, even if it's a zero-cost tier). +# If users want costs and/or side-constraints for activation of unlimited tiers +# (e.g., to model fuel switching for a utility), they should supply a limit. If +# we didn't require them to provide a limit, then we would have to create an +# arbitrary limit here anyway for the big-M constraint that prevents usage if +# the tier is not activated. + +import os +from pyomo.environ import * + +infinity = float("inf") + + +def define_components(m): + """ + This makes it possible to invest capital to gain access to a fuel supply + tier, as defined in ./markets.py. Each fuel market tier is one + capacity-expansion choice, and it must be fully built and/or activated each + period. To do this, we add binary variables and confine additions and + activations to match them. Each tier has a fixed and variable cost and + duration (locked in if it is developed). Variable costs are + implemented in markets.py, and this module adds fixed costs. These are + defined as a cost per MMBtu of fuel supply made _available_ by that tier (not + necessarily used). In the future we may replace this with a more complete + capital cost system, similar to generation projects. + + This module defines binary activation variables for all supply tiers, but + forces activation of tiers with unlimited capacity, because otherwise we + would need to introduce an arbitrary limit for them for the big-M + constraints below. This requirement doesn't affect costs, because unlimited + tiers must have zero cost in the current formulation. If there are side + effects of the build/activate decisions, then users should provide high + limits for these tiers, (but not infinite limits, which are the default). + + Unlimited tiers must also have zero cost to avoid infinite activation cost + in the current formulation (with per-unit fixed costs). We could instead use + lump-sum activation costs, but then it would be a bad idea to force + activation of unlimited tiers with nonzero costs. So we would instead need + to introduce an arbitrary limit for the big-M constraints. + + This module defines the following components: + + rfm_supply_tier_fixed_cost[RFM_SUPPLY_TIERS]: cost to activate each supply + tier, expressed per MMBtu of potential supply. Defaults to 0.0 (same as if + this module were not used). Should be specified as 'fixed_cost' in + fuel_supply_curves.csv. + + rfm_supply_tier_max_age[RFM_SUPPLY_TIERS]: lifetime for each tier, once it is placed in + service. Default is one period. Should be specified as 'max_age' in + fuel_supply_curves.csv. + + RFMBuildSupplyTier[RFM_SUPPLY_TIERS]: binary variable indicating whether + this tier is first deployed in the specified period + + RFMSupplyTierActive[RFM_SUPPLY_TIERS]: binary expression indicating whether + this tier is active in the specified period (based on whether + RFMBuildSupplyTier was set within the previous rfm_supply_tier_max_age + years) + + RFM_Fixed_Costs_Annual[PERIODS]: total fixed cost for supply tiers that have + been activated; included in model objective function. + + Only_One_RFMSupplyTierActive: constraint that prevents activating a single + tier multiple times in the same year (e.g., by building once, then building + again before retirement) + + Force_Activate_Unlimited_RFM_Supply_Tier: constraint that forces all + unlimited tiers to be activated; avoids applying the big-M constraint + with an infinite upper limit. + + Enforce_RFM_Supply_Tier_Activated: constraint that prevents delivery of fuel + from tiers that have not been activated + """ + + # fixed cost (per mmBtu/year of capacity) of having each tier in service + # during each period note: this must be zero if a tier has unlimited + # capacity, to avoid having infinite cost + m.rfm_supply_tier_fixed_cost = Param( + m.RFM_SUPPLY_TIERS, + default=0.0, + within=NonNegativeReals, + validate=lambda m, v, r, p, st: v == 0.0 + or m.rfm_supply_tier_limit[r, p, st] < infinity, + ) + + # lifetime for each tier, once it is placed in service + # (default is one period) + m.rfm_supply_tier_max_age = Param( + m.RFM_SUPPLY_TIERS, + default=lambda m, r, p, st: m.period_length_years[p], + within=NonNegativeReals, + ) + + # Note: in large regions, a tier represents a block of expandable capacity, + # so this could be continuous. But to model that, you can just lump the + # fixed cost into the variable cost and not use this module. + m.RFMBuildSupplyTier = Var(m.RFM_SUPPLY_TIERS, within=Binary) + + # will the tier be active during each period? + m.RFMSupplyTierActive = Expression( + m.RFM_SUPPLY_TIERS, + rule=lambda m, r, p, st: sum( + m.RFMBuildSupplyTier[r, vintage, st] + for vintage in m.PERIODS + if ( + # starts before end of current period + vintage < m.period_start[p] + m.period_length_years[p] + # available to be built + and (r, vintage, st) in m.RFM_SUPPLY_TIERS + # ends after start of current period + and vintage + m.rfm_supply_tier_max_age[r, vintage, st] + > m.period_start[p] + ) + ), + ) + + # Don't double-activate any tier + m.Only_One_RFMSupplyTierActive = Constraint( + m.RFM_SUPPLY_TIERS, + rule=lambda m, r, p, st: m.RFMSupplyTierActive[r, p, st] <= 1, + ) + + # force all unlimited tiers to be activated (since they must have no cost, + # and to avoid a limit of 0.0 * infinity in the constraint below) + m.Force_Activate_Unlimited_RFM_Supply_Tier = Constraint( + m.RFM_SUPPLY_TIERS, + rule=lambda m, r, p, st: (m.RFMSupplyTierActive[r, p, st] == 1) + if (m.rfm_supply_tier_limit[r, p, st] == infinity) + else Constraint.Skip, + ) + + # only allow delivery from activated tiers + # (and skip unlimited tiers to avoid a complaint by glpk about these) + # note: this could be merged with the previous constraint, since they are + # complementary + m.Enforce_RFM_Supply_Tier_Activated = Constraint( + m.RFM_SUPPLY_TIERS, + rule=lambda m, r, p, st: ( + m.ConsumeFuelTier[r, p, st] + <= m.RFMSupplyTierActive[r, p, st] * m.rfm_supply_tier_limit[r, p, st] + ) + if m.rfm_supply_tier_limit[r, p, st] < infinity + else Constraint.Skip, + ) + + # total cost incurred for all the activated supply tiers + m.RFM_Fixed_Costs_Annual = Expression( + m.PERIODS, + rule=lambda m, p: sum( + ( + # note: we dance around projects with unlimited supply and 0.0 fixed cost + 0.0 + if m.rfm_supply_tier_fixed_cost[rfm_st] == 0.0 + else ( + m.rfm_supply_tier_fixed_cost[rfm_st] + * m.RFMSupplyTierActive[rfm_st] + * m.rfm_supply_tier_limit[rfm_st] + ) + ) + for r in m.REGIONAL_FUEL_MARKETS + for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[r, p] + ), + ) + m.Cost_Components_Per_Period.append("RFM_Fixed_Costs_Annual") + + +def load_inputs(m, switch_data, inputs_dir): + switch_data.load_aug( + optional=True, + filename=os.path.join(inputs_dir, "fuel_supply_curves.csv"), + select=("regional_fuel_market", "period", "tier", "fixed_cost", "max_age"), + param=(m.rfm_supply_tier_fixed_cost, m.rfm_supply_tier_max_age), + ) diff --git a/switch_model/energy_sources/fuel_costs/simple.py b/switch_model/energy_sources/fuel_costs/simple.py index 58d213d75..0b38cffe1 100644 --- a/switch_model/energy_sources/fuel_costs/simple.py +++ b/switch_model/energy_sources/fuel_costs/simple.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,9 +11,14 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.energy_sources.properties.properties',\ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -23,8 +28,8 @@ def define_components(mod): parameter is mandatory. Unless otherwise specified, all dollar values are real dollars in BASE_YEAR. - ZONE_FUEL_PERIODS is a set that describes fuel availability. Each - element of the set is (load_zone, fuel, period). + ZONE_FUEL_PERIODS is a set of (load_zone, fuel, period) for which fuel_cost + has been provided. fuel_cost[(z, f, p) in ZONE_FUEL_PERIODS] describes flat fuel costs for each supply of fuel. Costs can vary by load zone and period. @@ -46,44 +51,45 @@ def define_components(mod): mod.ZONE_FUEL_PERIODS = Set( dimen=3, validate=lambda m, z, f, p: ( - z in m.LOAD_ZONES and - f in m.FUELS and - p in m.PERIODS)) - mod.fuel_cost = Param( - mod.ZONE_FUEL_PERIODS, - within=NonNegativeReals) - mod.min_data_check('ZONE_FUEL_PERIODS', 'fuel_cost') + z in m.LOAD_ZONES and f in m.FUELS and p in m.PERIODS + ), + ) + mod.fuel_cost = Param(mod.ZONE_FUEL_PERIODS, within=NonNegativeReals) + mod.min_data_check("ZONE_FUEL_PERIODS", "fuel_cost") mod.GEN_TP_FUELS_UNAVAILABLE = Set( + dimen=3, initialize=mod.GEN_TP_FUELS, - filter=lambda m, g, t, f: ( - (m.gen_load_zone[g], f, m.tp_period[t]) - not in m.ZONE_FUEL_PERIODS)) + filter=lambda m, g, t, f: (m.gen_load_zone[g], f, m.tp_period[t]) + not in m.ZONE_FUEL_PERIODS, + ) mod.Enforce_Fuel_Unavailability = Constraint( mod.GEN_TP_FUELS_UNAVAILABLE, - rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0) + rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0, + ) # Summarize total fuel costs in each timepoint for the objective function def FuelCostsPerTP_rule(m, t): - if not hasattr(m, 'FuelCostsPerTP_dict'): + if not hasattr(m, "FuelCostsPerTP_dict"): # cache all Fuel_Cost_TP values in a dictionary (created in one pass) m.FuelCostsPerTP_dict = {t2: 0.0 for t2 in m.TIMEPOINTS} for (g, t2, f) in m.GEN_TP_FUELS: - if (m.gen_load_zone[g], f, m.tp_period[t2]) in m.ZONE_FUEL_PERIODS: + if (g, t2, f) not in m.GEN_TP_FUELS_UNAVAILABLE: m.FuelCostsPerTP_dict[t2] += ( m.GenFuelUseRate[g, t2, f] - * m.fuel_cost[m.gen_load_zone[g], f, m.tp_period[t2]]) + * m.fuel_cost[m.gen_load_zone[g], f, m.tp_period[t2]] + ) # return a result from the dictionary and pop the element each time # to release memory return m.FuelCostsPerTP_dict.pop(t) + mod.FuelCostsPerTP = Expression(mod.TIMEPOINTS, rule=FuelCostsPerTP_rule) - mod.Cost_Components_Per_TP.append('FuelCostsPerTP') + mod.Cost_Components_Per_TP.append("FuelCostsPerTP") def load_inputs(mod, switch_data, inputs_dir): """ - - Import simple fuel cost data. The following files are expected in + Import simple fuel cost data. The following file is expected in the input directory: fuel_cost.csv @@ -92,7 +98,7 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'fuel_cost.csv'), - select=('load_zone', 'fuel', 'period', 'fuel_cost'), + filename=os.path.join(inputs_dir, "fuel_cost.csv"), index=mod.ZONE_FUEL_PERIODS, - param=[mod.fuel_cost]) + param=[mod.fuel_cost], + ) diff --git a/switch_model/energy_sources/fuel_costs/simple_per_timepoint.py b/switch_model/energy_sources/fuel_costs/simple_per_timepoint.py new file mode 100644 index 000000000..5ccc325eb --- /dev/null +++ b/switch_model/energy_sources/fuel_costs/simple_per_timepoint.py @@ -0,0 +1,110 @@ +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. +# Licensed under the Apache License, Version 2.0, which is in the LICENSE file. + +""" + +A simple description of flat fuel costs for the Switch model that +serves as an alternative to the more complex fuel_markets with tiered +supply curves. This is mutually exclusive with the fuel_markets module. + +""" +import os +from pyomo.environ import * + +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + + +def define_components(mod): + """ + + Augments a Pyomo abstract model object with sets and parameters to + describe simple fuel costs. Unless otherwise stated, each set and + parameter is mandatory. Unless otherwise specified, all dollar + values are real dollars in BASE_YEAR. + + ZONE_FUEL_TIMEPOINTS is a set of (load_zone, fuel, period) for which + fuel_cost_per_timepoint has been specified. + + fuel_cost_per_timepoint[(z, f, t) in ZONE_FUEL_TIMEPOINTS] describes flat + fuel costs for each supply of fuel. Costs can vary by load zone and + timepoint. + + Note that fuels can only be used in the locations and times for which + fuel_cost_per_timepoint has been specified. + + GEN_TP_FUELS_UNAVAILABLE is a subset of GEN_TP_FUELS that describes which + points don't have fuel available. + + Enforce_Fuel_Unavailability[(g, t, f) in GEN_TP_FUELS_UNAVAILABLE] is a + constraint that restricts GenFuelUseRate to 0 for in load zones and periods + where the projects' fuel is unavailable. + + FuelCostsPerTP[t in TIMEPOINTS] is an expression that summarizes fuel costs + for the objective function. + + """ + + # TODO: maybe rename fuel_cost_per_timepoint component and/or .csv file to fuel_cost + # (but that could cause confusion in the documentation?) + mod.ZONE_FUEL_TIMEPOINTS = Set( + dimen=3, + validate=lambda m, z, f, p: ( + z in m.LOAD_ZONES and f in m.FUELS and p in m.TIMEPOINTS + ), + ) + mod.fuel_cost_per_timepoint = Param( + mod.ZONE_FUEL_TIMEPOINTS, within=NonNegativeReals + ) + mod.min_data_check("ZONE_FUEL_TIMEPOINTS", "fuel_cost_per_timepoint") + + # don't allow use of a fuel when no cost has been specified + mod.GEN_TP_FUELS_UNAVAILABLE = Set( + dimen=3, + initialize=mod.GEN_TP_FUELS, + filter=lambda m, g, t, f: (m.gen_load_zone[g], f, t) + not in m.ZONE_FUEL_TIMEPOINTS, + ) + mod.Enforce_Fuel_Unavailability = Constraint( + mod.GEN_TP_FUELS_UNAVAILABLE, + rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0, + ) + + # Summarize total fuel costs in each timepoint for the objective function + def FuelCostsPerTP_rule(m, t): + if not hasattr(m, "FuelCostsPerTP_dict"): + # cache all Fuel_Cost_TP values in a dictionary (created in one pass) + m.FuelCostsPerTP_dict = {t2: 0.0 for t2 in m.TIMEPOINTS} + for (g, t2, f) in m.GEN_TP_FUELS: + if (g, t2, f) not in m.GEN_TP_FUELS_UNAVAILABLE: + m.FuelCostsPerTP_dict[t2] += ( + m.GenFuelUseRate[g, t2, f] + * m.fuel_cost_per_timepoint[m.gen_load_zone[g], f, t2] + ) + # return a result from the dictionary and pop the element each time + # to release memory + return m.FuelCostsPerTP_dict.pop(t) + + mod.FuelCostsPerTP = Expression(mod.TIMEPOINTS, rule=FuelCostsPerTP_rule) + mod.Cost_Components_Per_TP.append("FuelCostsPerTP") + + +def load_inputs(mod, switch_data, inputs_dir): + """ + Import simple fuel cost data. The following file is expected in + the input directory: + + fuel_cost_per_timepoint.csv + load_zone, fuel, period, fuel_cost_per_timepoint + + """ + switch_data.load_aug( + filename=os.path.join(inputs_dir, "fuel_cost_per_timepoint.csv"), + index=mod.ZONE_FUEL_TIMEPOINTS, + param=[mod.fuel_cost_per_timepoint], + ) diff --git a/switch_model/energy_sources/properties.py b/switch_model/energy_sources/properties.py index a0fe8238f..c30e14ffc 100644 --- a/switch_model/energy_sources/properties.py +++ b/switch_model/energy_sources/properties.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -10,7 +10,8 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" + def define_components(mod): """ @@ -90,21 +91,22 @@ def define_components(mod): """ - mod.NON_FUEL_ENERGY_SOURCES = Set() - mod.FUELS = Set() + mod.NON_FUEL_ENERGY_SOURCES = Set(dimen=1) + mod.FUELS = Set(dimen=1) mod.f_co2_intensity = Param(mod.FUELS, within=NonNegativeReals) - mod.f_upstream_co2_intensity = Param( - mod.FUELS, within=Reals, default=0) - mod.min_data_check('f_co2_intensity') + mod.f_upstream_co2_intensity = Param(mod.FUELS, within=Reals, default=0) + mod.min_data_check("f_co2_intensity") # Ensure that fuel and non-fuel sets have no overlap. mod.e_source_is_fuel_or_not_check = BuildCheck( - rule=lambda m: len(m.FUELS & m.NON_FUEL_ENERGY_SOURCES) == 0) + rule=lambda m: len(m.FUELS & m.NON_FUEL_ENERGY_SOURCES) == 0 + ) # ENERGY_SOURCES is the union of fuel and non-fuels sets. Pipe | is # the union operator for Pyomo sets. mod.ENERGY_SOURCES = Set( - initialize=mod.NON_FUEL_ENERGY_SOURCES | mod.FUELS) - mod.min_data_check('ENERGY_SOURCES') + dimen=1, initialize=mod.NON_FUEL_ENERGY_SOURCES | mod.FUELS + ) + mod.min_data_check("ENERGY_SOURCES") def load_inputs(mod, switch_data, inputs_dir): @@ -138,11 +140,13 @@ def load_inputs(mod, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'non_fuel_energy_sources.csv'), - set=('NON_FUEL_ENERGY_SOURCES')) + filename=os.path.join(inputs_dir, "non_fuel_energy_sources.csv"), + set=("NON_FUEL_ENERGY_SOURCES"), + ) switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'fuels.csv'), - select=('fuel', 'co2_intensity', 'upstream_co2_intensity'), + filename=os.path.join(inputs_dir, "fuels.csv"), + select=("fuel", "co2_intensity", "upstream_co2_intensity"), index=mod.FUELS, - param=(mod.f_co2_intensity, mod.f_upstream_co2_intensity)) + param=(mod.f_co2_intensity, mod.f_upstream_co2_intensity), + ) diff --git a/switch_model/financials.py b/switch_model/financials.py index b79b736c9..b80273191 100644 --- a/switch_model/financials.py +++ b/switch_model/financials.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,7 +11,8 @@ import os import pandas as pd -dependencies = 'switch_model.timescales' +dependencies = "switch_model.timescales" + def capital_recovery_factor(ir, t): """ @@ -35,7 +36,7 @@ def capital_recovery_factor(ir, t): rate, paid over 20 years is 0.09439. If the principal was $100, loan\ payments would be $9.44 """ - return 1/t if ir == 0 else ir/(1-(1+ir)**-t) + return 1 / t if ir == 0 else ir / (1 - (1 + ir) ** -t) def uniform_series_to_present_value(dr, t): @@ -60,7 +61,7 @@ def uniform_series_to_present_value(dr, t): round(1/capital_recovery_factor(.07,20),7) True """ - return t if dr == 0 else (1-(1+dr)**-t)/dr + return t if dr == 0 else (1 - (1 + dr) ** -t) / dr def future_to_present_value(dr, t): @@ -71,7 +72,7 @@ def future_to_present_value(dr, t): >>> round(future_to_present_value(.07,10),7) 0.5083493 """ - return (1+dr)**-t + return (1 + dr) ** -t def present_to_future_value(ir, t): @@ -87,7 +88,8 @@ def present_to_future_value(ir, t): future_to_present_value(.07,10),7) == 1 True """ - return (1+ir)**t + return (1 + ir) ** t + def define_dynamic_lists(mod): """ @@ -115,6 +117,7 @@ def define_dynamic_lists(mod): mod.Cost_Components_Per_TP = [] mod.Cost_Components_Per_Period = [] + def define_components(mod): """ @@ -221,23 +224,26 @@ def define_components(mod): mod.base_financial_year = Param(within=NonNegativeReals) mod.interest_rate = Param(within=NonNegativeReals) mod.discount_rate = Param( - within=NonNegativeReals, default=lambda m: value(m.interest_rate)) - mod.min_data_check('base_financial_year', 'interest_rate') + within=NonNegativeReals, default=lambda m: value(m.interest_rate) + ) + mod.min_data_check("base_financial_year", "interest_rate") mod.bring_annual_costs_to_base_year = Param( mod.PERIODS, within=NonNegativeReals, initialize=lambda m, p: ( - uniform_series_to_present_value( - m.discount_rate, m.period_length_years[p]) * - future_to_present_value( - m.discount_rate, - m.period_start[p] - m.base_financial_year))) + uniform_series_to_present_value(m.discount_rate, m.period_length_years[p]) + * future_to_present_value( + m.discount_rate, m.period_start[p] - m.base_financial_year + ) + ), + ) mod.bring_timepoint_costs_to_base_year = Param( mod.TIMEPOINTS, within=NonNegativeReals, initialize=lambda m, t: ( - m.bring_annual_costs_to_base_year[m.tp_period[t]] * - m.tp_weight_in_year[t])) + m.bring_annual_costs_to_base_year[m.tp_period[t]] * m.tp_weight_in_year[t] + ), + ) def define_dynamic_components(mod): @@ -270,7 +276,8 @@ def define_dynamic_components(mod): def calc_tp_costs_in_period(m, t): return sum( getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] - for tp_cost in m.Cost_Components_Per_TP) + for tp_cost in m.Cost_Components_Per_TP + ) # Note: multiply annual costs by a conversion factor if running this # model on an intentional subset of annual data whose weights do not @@ -278,31 +285,29 @@ def calc_tp_costs_in_period(m, t): # This would also require disabling the validate_time_weights check. def calc_annual_costs_in_period(m, p): return sum( - getattr(m, annual_cost)[p] - for annual_cost in m.Cost_Components_Per_Period) + getattr(m, annual_cost)[p] for annual_cost in m.Cost_Components_Per_Period + ) def calc_sys_costs_per_period(m, p): return ( # All annual payments in the period ( - calc_annual_costs_in_period(m, p) + - sum(calc_tp_costs_in_period(m, t) for t in m.TPS_IN_PERIOD[p]) - ) * + calc_annual_costs_in_period(m, p) + + sum(calc_tp_costs_in_period(m, t) for t in m.TPS_IN_PERIOD[p]) + ) + * # Conversion from annual costs to base year m.bring_annual_costs_to_base_year[p] ) - mod.SystemCostPerPeriod = Expression( - mod.PERIODS, - rule=calc_sys_costs_per_period) + mod.SystemCostPerPeriod = Expression(mod.PERIODS, rule=calc_sys_costs_per_period) # starting with Pyomo 4.2, it is impossible to call Objective.reconstruct() # or calculate terms like Objective / , # so it's best to define a separate expression and use that for these purposes. mod.SystemCost = Expression( - rule=lambda m: sum(m.SystemCostPerPeriod[p] for p in m.PERIODS)) - mod.Minimize_System_Cost = Objective( - rule=lambda m: m.SystemCost, - sense=minimize) + rule=lambda m: sum(m.SystemCostPerPeriod[p] for p in m.PERIODS) + ) + mod.Minimize_System_Cost = Objective(rule=lambda m: m.SystemCost, sense=minimize) def load_inputs(mod, switch_data, inputs_dir): @@ -314,59 +319,76 @@ def load_inputs(mod, switch_data, inputs_dir): the second. """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'financials.csv'), - optional=False, auto_select=True, - param=(mod.base_financial_year, mod.interest_rate, mod.discount_rate) + filename=os.path.join(inputs_dir, "financials.csv"), + optional=False, + param=(mod.base_financial_year, mod.interest_rate, mod.discount_rate), ) + def post_solve(instance, outdir): m = instance - # Overall electricity costs - normalized_dat = [ - { - "PERIOD": p, - "SystemCostPerPeriod_NPV": value(m.SystemCostPerPeriod[p]), - "SystemCostPerPeriod_Real": value( - m.SystemCostPerPeriod[p] / m.bring_annual_costs_to_base_year[p] - ), - "EnergyCostReal_per_MWh": value( - m.SystemCostPerPeriod[p] / m.bring_annual_costs_to_base_year[p] / - sum(m.zone_total_demand_in_period_mwh[z,p] for z in m.LOAD_ZONES) - ), - "SystemDemand_MWh": value(sum( - m.zone_total_demand_in_period_mwh[z,p] for z in m.LOAD_ZONES - )) - } for p in m.PERIODS - ] - df = pd.DataFrame(normalized_dat) - df.set_index(["PERIOD"], inplace=True) - df.to_csv(os.path.join(outdir, "electricity_cost.csv")) + # Overall electricity costs, if appropriate (some models may be gas-only) + if hasattr(m, "zone_total_demand_in_period_mwh"): + normalized_dat = [ + { + "PERIOD": p, + "SystemCostPerPeriod_NPV": value(m.SystemCostPerPeriod[p]), + "SystemCostPerPeriod_Real": value( + m.SystemCostPerPeriod[p] / m.bring_annual_costs_to_base_year[p] + ), + "EnergyCostReal_per_MWh": value( + m.SystemCostPerPeriod[p] + / m.bring_annual_costs_to_base_year[p] + / sum(m.zone_total_demand_in_period_mwh[z, p] for z in m.LOAD_ZONES) + ), + "SystemDemand_MWh": value( + sum(m.zone_total_demand_in_period_mwh[z, p] for z in m.LOAD_ZONES) + ), + } + for p in m.PERIODS + ] + df = pd.DataFrame(normalized_dat) + df.set_index(["PERIOD"], inplace=True) + if instance.options.sorted_output: + df.sort_index(inplace=True) + df.to_csv(os.path.join(outdir, "electricity_cost.csv")) + # Itemized annual costs annualized_costs = [ { - "PERIOD": p, - "Component": annual_cost, - "Component_type": "annual", - "AnnualCost_NPV": value( - getattr(m, annual_cost)[p] * m.bring_annual_costs_to_base_year[p] - ), - "AnnualCost_Real": value(getattr(m, annual_cost)[p]) - } for p in m.PERIODS for annual_cost in m.Cost_Components_Per_Period + "PERIOD": p, + "Component": annual_cost, + "Component_type": "annual", + "AnnualCost_NPV": value( + getattr(m, annual_cost)[p] * m.bring_annual_costs_to_base_year[p] + ), + "AnnualCost_Real": value(getattr(m, annual_cost)[p]), + } + for p in m.PERIODS + for annual_cost in m.Cost_Components_Per_Period ] + [ { - "PERIOD": p, - "Component": tp_cost, - "Component_type": "timepoint", - "AnnualCost_NPV": value(sum( - getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] - for t in m.TPS_IN_PERIOD[p] - ) * m.bring_annual_costs_to_base_year[p]), - "AnnualCost_Real": value(sum( - getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] - for t in m.TPS_IN_PERIOD[p] - )) - } for p in m.PERIODS for tp_cost in m.Cost_Components_Per_TP + "PERIOD": p, + "Component": tp_cost, + "Component_type": "timepoint", + "AnnualCost_NPV": value( + sum( + getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) + * m.bring_annual_costs_to_base_year[p] + ), + "AnnualCost_Real": value( + sum( + getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) + ), + } + for p in m.PERIODS + for tp_cost in m.Cost_Components_Per_TP ] - df = pd.DataFrame(annualized_costs) - df.set_index(["PERIOD", "Component"], inplace=True) + df = pd.DataFrame(annualized_costs).set_index(["PERIOD", "Component"]).sort_index() + if instance.options.sorted_output: + df.sort_index(inplace=True) df.to_csv(os.path.join(outdir, "costs_itemized.csv")) diff --git a/switch_model/generators/core/__init__.py b/switch_model/generators/core/__init__.py index 5e1eebedb..fbdfbebad 100644 --- a/switch_model/generators/core/__init__.py +++ b/switch_model/generators/core/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. """ @@ -9,5 +9,6 @@ """ core_modules = [ - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch'] + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +] diff --git a/switch_model/generators/core/build.py b/switch_model/generators/core/build.py index 81d97c08d..d9136b4dd 100644 --- a/switch_model/generators/core/build.py +++ b/switch_model/generators/core/build.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ Defines generation projects build-outs. @@ -9,9 +9,15 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf from switch_model.reporting import write_table +from switch_model.utilities import unique_list + +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", +) -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties' def define_components(mod): """ @@ -87,7 +93,7 @@ def define_components(mod): only includes existing or planned projects that are not subject to optimization. - gen_predetermined_cap[(g, build_year) in PREDETERMINED_GEN_BLD_YRS] is + build_gen_predetermined[(g, build_year) in PREDETERMINED_GEN_BLD_YRS] is a parameter that describes how much capacity was built in the past for existing projects, or is planned to be built for future projects. @@ -117,7 +123,7 @@ def define_components(mod): force project build-outs to meet the minimum build requirements for generation technologies that have those requirements. They force BuildGen to be 0 when BuildMinGenCap is 0, and to be greater than - g_min_build_capacity when BuildMinGenCap is 1. In the latter case, + gen_min_build_capacity when BuildMinGenCap is 1. In the latter case, the upper constraint should be non-binding; the upper limit is set to 10 times the peak non-conincident demand of the entire system. @@ -132,6 +138,9 @@ def define_components(mod): for the given project in the given period. For some project-period combinations, this will be an empty set. + PERIODS_FOR_GEN[g] is the set of all periods when generation project + g could potentially be operated. + GEN_PERIODS describes periods in which generation projects could be operational. Unlike the related sets above, it is not indexed. Instead it is specified as a set of (g, period) @@ -184,33 +193,48 @@ def define_components(mod): - Allow early capacity retirements with savings on fixed O&M """ - mod.GENERATION_PROJECTS = Set() - mod.gen_dbid = Param(mod.GENERATION_PROJECTS, default=lambda m, g: g) - mod.gen_tech = Param(mod.GENERATION_PROJECTS) - mod.GENERATION_TECHNOLOGIES = Set(initialize=lambda m: - {m.gen_tech[g] for g in m.GENERATION_PROJECTS} - ) - mod.gen_energy_source = Param(mod.GENERATION_PROJECTS, - validate=lambda m,val,g: val in m.ENERGY_SOURCES or val == "multiple") + mod.GENERATION_PROJECTS = Set(dimen=1) + mod.gen_dbid = Param(mod.GENERATION_PROJECTS, default=lambda m, g: g, within=Any) + mod.gen_tech = Param(mod.GENERATION_PROJECTS, within=Any) + mod.GENERATION_TECHNOLOGIES = Set( + dimen=1, + initialize=lambda m: unique_list(m.gen_tech[g] for g in m.GENERATION_PROJECTS), + ) + mod.gen_energy_source = Param( + mod.GENERATION_PROJECTS, + validate=lambda m, val, g: val in m.ENERGY_SOURCES or val == "multiple", + within=Any, + ) mod.gen_load_zone = Param(mod.GENERATION_PROJECTS, within=mod.LOAD_ZONES) mod.gen_max_age = Param(mod.GENERATION_PROJECTS, within=PositiveIntegers) mod.gen_is_variable = Param(mod.GENERATION_PROJECTS, within=Boolean) mod.gen_is_baseload = Param(mod.GENERATION_PROJECTS, within=Boolean, default=False) mod.gen_is_cogen = Param(mod.GENERATION_PROJECTS, within=Boolean, default=False) - mod.gen_is_distributed = Param(mod.GENERATION_PROJECTS, within=Boolean, default=False) - mod.gen_scheduled_outage_rate = Param(mod.GENERATION_PROJECTS, - within=PercentFraction, default=0) - mod.gen_forced_outage_rate = Param(mod.GENERATION_PROJECTS, - within=PercentFraction, default=0) - mod.min_data_check('GENERATION_PROJECTS', 'gen_tech', 'gen_energy_source', - 'gen_load_zone', 'gen_max_age', 'gen_is_variable') + mod.gen_is_distributed = Param( + mod.GENERATION_PROJECTS, within=Boolean, default=False + ) + mod.gen_scheduled_outage_rate = Param( + mod.GENERATION_PROJECTS, within=PercentFraction, default=0 + ) + mod.gen_forced_outage_rate = Param( + mod.GENERATION_PROJECTS, within=PercentFraction, default=0 + ) + mod.min_data_check( + "GENERATION_PROJECTS", + "gen_tech", + "gen_energy_source", + "gen_load_zone", + "gen_max_age", + "gen_is_variable", + ) """Construct GENS_* indexed sets efficiently with a 'construction dictionary' pattern: on the first call, make a single traversal through all generation projects to generate a complete index, use that for subsequent lookups, and clean up at the last call.""" + def GENS_IN_ZONE_init(m, z): - if not hasattr(m, 'GENS_IN_ZONE_dict'): + if not hasattr(m, "GENS_IN_ZONE_dict"): m.GENS_IN_ZONE_dict = {_z: [] for _z in m.LOAD_ZONES} for g in m.GENERATION_PROJECTS: m.GENS_IN_ZONE_dict[m.gen_load_zone[g]].append(g) @@ -218,22 +242,26 @@ def GENS_IN_ZONE_init(m, z): if not m.GENS_IN_ZONE_dict: del m.GENS_IN_ZONE_dict return result - mod.GENS_IN_ZONE = Set( - mod.LOAD_ZONES, - initialize=GENS_IN_ZONE_init - ) + + mod.GENS_IN_ZONE = Set(mod.LOAD_ZONES, dimen=1, initialize=GENS_IN_ZONE_init) mod.VARIABLE_GENS = Set( + dimen=1, initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: m.gen_is_variable[g]) + filter=lambda m, g: m.gen_is_variable[g], + ) mod.VARIABLE_GENS_IN_ZONE = Set( mod.LOAD_ZONES, - initialize=lambda m, z: [g for g in m.GENS_IN_ZONE[z] if m.gen_is_variable[g]]) + dimen=1, + initialize=lambda m, z: [g for g in m.GENS_IN_ZONE[z] if m.gen_is_variable[g]], + ) mod.BASELOAD_GENS = Set( + dimen=1, initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: m.gen_is_baseload[g]) + filter=lambda m, g: m.gen_is_baseload[g], + ) def GENS_BY_TECHNOLOGY_init(m, t): - if not hasattr(m, 'GENS_BY_TECH_dict'): + if not hasattr(m, "GENS_BY_TECH_dict"): m.GENS_BY_TECH_dict = {_t: [] for _t in m.GENERATION_TECHNOLOGIES} for g in m.GENERATION_PROJECTS: m.GENS_BY_TECH_dict[m.gen_tech[g]].append(g) @@ -241,50 +269,80 @@ def GENS_BY_TECHNOLOGY_init(m, t): if not m.GENS_BY_TECH_dict: del m.GENS_BY_TECH_dict return result + mod.GENS_BY_TECHNOLOGY = Set( - mod.GENERATION_TECHNOLOGIES, - initialize=GENS_BY_TECHNOLOGY_init + mod.GENERATION_TECHNOLOGIES, dimen=1, initialize=GENS_BY_TECHNOLOGY_init ) - mod.CAPACITY_LIMITED_GENS = Set(within=mod.GENERATION_PROJECTS) + mod.CAPACITY_LIMITED_GENS = Set(within=mod.GENERATION_PROJECTS, dimen=1) mod.gen_capacity_limit_mw = Param( - mod.CAPACITY_LIMITED_GENS, within=NonNegativeReals) - mod.DISCRETELY_SIZED_GENS = Set(within=mod.GENERATION_PROJECTS) - mod.gen_unit_size = Param( - mod.DISCRETELY_SIZED_GENS, within=PositiveReals) - mod.CCS_EQUIPPED_GENS = Set(within=mod.GENERATION_PROJECTS) + mod.CAPACITY_LIMITED_GENS, within=NonNegativeReals + ) + mod.DISCRETELY_SIZED_GENS = Set(dimen=1, within=mod.GENERATION_PROJECTS) + mod.gen_unit_size = Param(mod.DISCRETELY_SIZED_GENS, within=PositiveReals) + mod.CCS_EQUIPPED_GENS = Set(dimen=1, within=mod.GENERATION_PROJECTS) mod.gen_ccs_capture_efficiency = Param( - mod.CCS_EQUIPPED_GENS, within=PercentFraction) - mod.gen_ccs_energy_load = Param( - mod.CCS_EQUIPPED_GENS, within=PercentFraction) + mod.CCS_EQUIPPED_GENS, within=PercentFraction + ) + mod.gen_ccs_energy_load = Param(mod.CCS_EQUIPPED_GENS, within=PercentFraction) mod.gen_uses_fuel = Param( mod.GENERATION_PROJECTS, + within=Boolean, initialize=lambda m, g: ( - m.gen_energy_source[g] in m.FUELS - or m.gen_energy_source[g] == "multiple")) + m.gen_energy_source[g] in m.FUELS or m.gen_energy_source[g] == "multiple" + ), + ) mod.NON_FUEL_BASED_GENS = Set( + dimen=1, initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: not m.gen_uses_fuel[g]) + filter=lambda m, g: not m.gen_uses_fuel[g], + ) mod.FUEL_BASED_GENS = Set( + dimen=1, initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: m.gen_uses_fuel[g]) + filter=lambda m, g: m.gen_uses_fuel[g], + ) - mod.gen_full_load_heat_rate = Param( - mod.FUEL_BASED_GENS, - within=NonNegativeReals) + mod.gen_full_load_heat_rate = Param(mod.FUEL_BASED_GENS, within=NonNegativeReals) mod.MULTIFUEL_GENS = Set( + dimen=1, + within=Any, initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: m.gen_energy_source[g] == "multiple") - mod.FUELS_FOR_MULTIFUEL_GEN = Set(mod.MULTIFUEL_GENS, within=mod.FUELS) - mod.FUELS_FOR_GEN = Set(mod.FUEL_BASED_GENS, + filter=lambda m, g: m.gen_energy_source[g] == "multiple", + ) + mod.MULTI_FUEL_GEN_FUELS = Set( + dimen=2, validate=lambda m, g, f: g in m.MULTIFUEL_GENS and f in m.FUELS + ) + + def FUELS_FOR_MULTIFUEL_GEN_init(m, g): + if not hasattr(m, "FUELS_FOR_MULTIFUEL_GEN_dict"): + m.FUELS_FOR_MULTIFUEL_GEN_dict = {_g: [] for _g in m.MULTIFUEL_GENS} + for _g, f in m.MULTI_FUEL_GEN_FUELS: + m.FUELS_FOR_MULTIFUEL_GEN_dict[_g].append(f) + result = m.FUELS_FOR_MULTIFUEL_GEN_dict.pop(g) + if not m.FUELS_FOR_MULTIFUEL_GEN_dict: + del m.FUELS_FOR_MULTIFUEL_GEN_dict + return result + + mod.FUELS_FOR_MULTIFUEL_GEN = Set( + mod.MULTIFUEL_GENS, + dimen=1, + within=mod.FUELS, + initialize=FUELS_FOR_MULTIFUEL_GEN_init, + ) + mod.FUELS_FOR_GEN = Set( + mod.FUEL_BASED_GENS, + dimen=1, initialize=lambda m, g: ( m.FUELS_FOR_MULTIFUEL_GEN[g] if g in m.MULTIFUEL_GENS - else [m.gen_energy_source[g]])) + else [m.gen_energy_source[g]] + ), + ) def GENS_BY_ENERGY_SOURCE_init(m, e): - if not hasattr(m, 'GENS_BY_ENERGY_dict'): + if not hasattr(m, "GENS_BY_ENERGY_dict"): m.GENS_BY_ENERGY_dict = {_e: [] for _e in m.ENERGY_SOURCES} for g in m.GENERATION_PROJECTS: if g in m.FUEL_BASED_GENS: @@ -296,34 +354,34 @@ def GENS_BY_ENERGY_SOURCE_init(m, e): if not m.GENS_BY_ENERGY_dict: del m.GENS_BY_ENERGY_dict return result + mod.GENS_BY_ENERGY_SOURCE = Set( - mod.ENERGY_SOURCES, - initialize=GENS_BY_ENERGY_SOURCE_init + mod.ENERGY_SOURCES, dimen=1, initialize=GENS_BY_ENERGY_SOURCE_init ) mod.GENS_BY_NON_FUEL_ENERGY_SOURCE = Set( mod.NON_FUEL_ENERGY_SOURCES, - initialize=lambda m, s: m.GENS_BY_ENERGY_SOURCE[s] + dimen=1, + initialize=lambda m, s: m.GENS_BY_ENERGY_SOURCE[s], ) mod.GENS_BY_FUEL = Set( - mod.FUELS, - initialize=lambda m, f: m.GENS_BY_ENERGY_SOURCE[f] + mod.FUELS, dimen=1, initialize=lambda m, f: m.GENS_BY_ENERGY_SOURCE[f] ) - mod.PREDETERMINED_GEN_BLD_YRS = Set( - dimen=2) + mod.PREDETERMINED_GEN_BLD_YRS = Set(dimen=2) mod.GEN_BLD_YRS = Set( dimen=2, validate=lambda m, g, bld_yr: ( - (g, bld_yr) in m.PREDETERMINED_GEN_BLD_YRS or - (g, bld_yr) in m.GENERATION_PROJECTS * m.PERIODS)) + (g, bld_yr) in m.PREDETERMINED_GEN_BLD_YRS + or (g, bld_yr) in m.GENERATION_PROJECTS * m.PERIODS + ), + ) mod.NEW_GEN_BLD_YRS = Set( - dimen=2, - initialize=lambda m: m.GEN_BLD_YRS - m.PREDETERMINED_GEN_BLD_YRS) - mod.gen_predetermined_cap = Param( - mod.PREDETERMINED_GEN_BLD_YRS, - within=NonNegativeReals) - mod.min_data_check('gen_predetermined_cap') - + dimen=2, initialize=lambda m: m.GEN_BLD_YRS - m.PREDETERMINED_GEN_BLD_YRS + ) + mod.build_gen_predetermined = Param( + mod.PREDETERMINED_GEN_BLD_YRS, within=NonNegativeReals + ) + mod.min_data_check("build_gen_predetermined") def gen_build_can_operate_in_period(m, g, build_year, period): if build_year in m.PERIODS: @@ -331,9 +389,7 @@ def gen_build_can_operate_in_period(m, g, build_year, period): else: online = build_year retirement = online + m.gen_max_age[g] - return ( - online <= m.period_start[period] < retirement - ) + return online <= m.period_start[period] < retirement # This is probably more correct, but is a different behavior # mid_period = m.period_start[period] + 0.5 * m.period_length_years[period] # return online <= m.period_start[period] and mid_period <= retirement @@ -341,39 +397,50 @@ def gen_build_can_operate_in_period(m, g, build_year, period): # The set of periods when a project built in a certain year will be online mod.PERIODS_FOR_GEN_BLD_YR = Set( mod.GEN_BLD_YRS, + dimen=1, within=mod.PERIODS, ordered=True, - initialize=lambda m, g, bld_yr: set( - period for period in m.PERIODS - if gen_build_can_operate_in_period(m, g, bld_yr, period))) + initialize=lambda m, g, bld_yr: [ + period + for period in m.PERIODS + if gen_build_can_operate_in_period(m, g, bld_yr, period) + ], + ) # The set of build years that could be online in the given period # for the given project. mod.BLD_YRS_FOR_GEN_PERIOD = Set( - mod.GENERATION_PROJECTS, mod.PERIODS, - initialize=lambda m, g, period: set( - bld_yr for (gen, bld_yr) in m.GEN_BLD_YRS - if gen == g and - gen_build_can_operate_in_period(m, g, bld_yr, period))) + mod.GENERATION_PROJECTS, + mod.PERIODS, + dimen=1, + initialize=lambda m, g, period: unique_list( + bld_yr + for (gen, bld_yr) in m.GEN_BLD_YRS + if gen == g and gen_build_can_operate_in_period(m, g, bld_yr, period) + ), + ) # The set of periods when a generator is available to run mod.PERIODS_FOR_GEN = Set( mod.GENERATION_PROJECTS, - initialize=lambda m, g: [p for p in m.PERIODS if len(m.BLD_YRS_FOR_GEN_PERIOD[g, p]) > 0] + dimen=1, + initialize=lambda m, g: [ + p for p in m.PERIODS if len(m.BLD_YRS_FOR_GEN_PERIOD[g, p]) > 0 + ], ) def bounds_BuildGen(model, g, bld_yr): - if((g, bld_yr) in model.PREDETERMINED_GEN_BLD_YRS): - return (model.gen_predetermined_cap[g, bld_yr], - model.gen_predetermined_cap[g, bld_yr]) - elif(g in model.CAPACITY_LIMITED_GENS): + if (g, bld_yr) in model.PREDETERMINED_GEN_BLD_YRS: + return ( + model.build_gen_predetermined[g, bld_yr], + model.build_gen_predetermined[g, bld_yr], + ) + elif g in model.CAPACITY_LIMITED_GENS: # This does not replace Max_Build_Potential because # Max_Build_Potential applies across all build years. return (0, model.gen_capacity_limit_mw[g]) else: return (0, None) - mod.BuildGen = Var( - mod.GEN_BLD_YRS, - within=NonNegativeReals, - bounds=bounds_BuildGen) + + mod.BuildGen = Var(mod.GEN_BLD_YRS, within=NonNegativeReals, bounds=bounds_BuildGen) # Some projects are retired before the first study period, so they # don't appear in the objective function or any constraints. # In this case, pyomo may leave the variable value undefined even @@ -383,10 +450,11 @@ def bounds_BuildGen(model, g, bld_yr): # starting point we assign an appropriate value to all the existing # projects here. def BuildGen_assign_default_value(m, g, bld_yr): - m.BuildGen[g, bld_yr] = m.gen_predetermined_cap[g, bld_yr] + m.BuildGen[g, bld_yr] = m.build_gen_predetermined[g, bld_yr] + mod.BuildGen_assign_default_value = BuildAction( - mod.PREDETERMINED_GEN_BLD_YRS, - rule=BuildGen_assign_default_value) + mod.PREDETERMINED_GEN_BLD_YRS, rule=BuildGen_assign_default_value + ) # note: in pull request 78, commit e7f870d..., GEN_PERIODS # was mistakenly redefined as GENERATION_PROJECTS * PERIODS. @@ -399,36 +467,42 @@ def BuildGen_assign_default_value(m, g, bld_yr): # and 'C-Coal_ST' in m.GENS_IN_PERIOD[2020] and 'C-Coal_ST' not in m.GENS_IN_PERIOD[2030] mod.GEN_PERIODS = Set( dimen=2, - initialize=lambda m: - [(g, p) for g in m.GENERATION_PROJECTS for p in m.PERIODS_FOR_GEN[g]]) + initialize=lambda m: [ + (g, p) for g in m.GENERATION_PROJECTS for p in m.PERIODS_FOR_GEN[g] + ], + ) mod.GenCapacity = Expression( - mod.GENERATION_PROJECTS, mod.PERIODS, + mod.GENERATION_PROJECTS, + mod.PERIODS, rule=lambda m, g, period: sum( - m.BuildGen[g, bld_yr] - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, period])) + m.BuildGen[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, period] + ), + ) mod.Max_Build_Potential = Constraint( - mod.CAPACITY_LIMITED_GENS, mod.PERIODS, - rule=lambda m, g, p: ( - m.gen_capacity_limit_mw[g] >= m.GenCapacity[g, p])) + mod.CAPACITY_LIMITED_GENS, + mod.PERIODS, + rule=lambda m, g, p: (m.gen_capacity_limit_mw[g] >= m.GenCapacity[g, p]), + ) # The following components enforce minimum capacity build-outs. # Note that this adds binary variables to the model. - mod.gen_min_build_capacity = Param (mod.GENERATION_PROJECTS, - within=NonNegativeReals, default=0) + mod.gen_min_build_capacity = Param( + mod.GENERATION_PROJECTS, within=NonNegativeReals, default=0 + ) mod.NEW_GEN_WITH_MIN_BUILD_YEARS = Set( + dimen=2, initialize=mod.NEW_GEN_BLD_YRS, - filter=lambda m, g, p: ( - m.gen_min_build_capacity[g] > 0)) - mod.BuildMinGenCap = Var( - mod.NEW_GEN_WITH_MIN_BUILD_YEARS, - within=Binary) + filter=lambda m, g, p: (m.gen_min_build_capacity[g] > 0), + ) + mod.BuildMinGenCap = Var(mod.NEW_GEN_WITH_MIN_BUILD_YEARS, within=Binary) mod.Enforce_Min_Build_Lower = Constraint( mod.NEW_GEN_WITH_MIN_BUILD_YEARS, rule=lambda m, g, p: ( - m.BuildMinGenCap[g, p] * m.gen_min_build_capacity[g] - <= m.BuildGen[g, p])) + m.BuildMinGenCap[g, p] * m.gen_min_build_capacity[g] <= m.BuildGen[g, p] + ), + ) # Define a constant for enforcing binary constraints on project capacity # The value of 100 GW should be larger than any expected build size. For @@ -440,40 +514,48 @@ def BuildGen_assign_default_value(m, g, bld_yr): mod.Enforce_Min_Build_Upper = Constraint( mod.NEW_GEN_WITH_MIN_BUILD_YEARS, rule=lambda m, g, p: ( - m.BuildGen[g, p] <= m.BuildMinGenCap[g, p] * - mod._gen_max_cap_for_binary_constraints)) + m.BuildGen[g, p] + <= m.BuildMinGenCap[g, p] * mod._gen_max_cap_for_binary_constraints + ), + ) # Costs - mod.gen_variable_om = Param (mod.GENERATION_PROJECTS, within=NonNegativeReals) - mod.gen_connect_cost_per_mw = Param(mod.GENERATION_PROJECTS, within=NonNegativeReals) - mod.min_data_check('gen_variable_om', 'gen_connect_cost_per_mw') + mod.gen_variable_om = Param(mod.GENERATION_PROJECTS, within=NonNegativeReals) + mod.gen_connect_cost_per_mw = Param( + mod.GENERATION_PROJECTS, within=NonNegativeReals + ) + mod.min_data_check("gen_variable_om", "gen_connect_cost_per_mw") - mod.gen_overnight_cost = Param( - mod.GEN_BLD_YRS, - within=NonNegativeReals) - mod.gen_fixed_om = Param( - mod.GEN_BLD_YRS, - within=NonNegativeReals) - mod.min_data_check('gen_overnight_cost', 'gen_fixed_om') + mod.gen_overnight_cost = Param(mod.GEN_BLD_YRS, within=NonNegativeReals) + mod.gen_fixed_om = Param(mod.GEN_BLD_YRS, within=NonNegativeReals) + mod.min_data_check("gen_overnight_cost", "gen_fixed_om") # Derived annual costs mod.gen_capital_cost_annual = Param( mod.GEN_BLD_YRS, + within=NonNegativeReals, initialize=lambda m, g, bld_yr: ( - (m.gen_overnight_cost[g, bld_yr] + - m.gen_connect_cost_per_mw[g]) * - crf(m.interest_rate, m.gen_max_age[g]))) + (m.gen_overnight_cost[g, bld_yr] + m.gen_connect_cost_per_mw[g]) + * crf(m.interest_rate, m.gen_max_age[g]) + ), + ) mod.GenCapitalCosts = Expression( - mod.GENERATION_PROJECTS, mod.PERIODS, + mod.GENERATION_PROJECTS, + mod.PERIODS, rule=lambda m, g, p: sum( m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p])) + for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p] + ), + ) mod.GenFixedOMCosts = Expression( - mod.GENERATION_PROJECTS, mod.PERIODS, + mod.GENERATION_PROJECTS, + mod.PERIODS, rule=lambda m, g, p: sum( m.BuildGen[g, bld_yr] * m.gen_fixed_om[g, bld_yr] - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p])) + for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p] + ), + ) # Summarize costs for the objective function. Units should be total # annual future costs in $base_year real dollars. The objective # function will convert these to base_year Net Present Value in @@ -482,8 +564,10 @@ def BuildGen_assign_default_value(m, g, bld_yr): mod.PERIODS, rule=lambda m, p: sum( m.GenCapitalCosts[g, p] + m.GenFixedOMCosts[g, p] - for g in m.GENERATION_PROJECTS)) - mod.Cost_Components_Per_Period.append('TotalGenFixedCosts') + for g in m.GENERATION_PROJECTS + ), + ) + mod.Cost_Components_Per_Period.append("TotalGenFixedCosts") def load_inputs(mod, switch_data, inputs_dir): @@ -492,7 +576,7 @@ def load_inputs(mod, switch_data, inputs_dir): Import data describing project builds. The following files are expected in the input directory. - generation_projects_info.csv has mandatory and optional columns. The + gen_info.csv has mandatory and optional columns. The operations.gen_dispatch module will also look for additional columns in this file. You may drop optional columns entirely or mark blank values with a dot '.' for select rows for which the column does not @@ -510,7 +594,7 @@ def load_inputs(mod, switch_data, inputs_dir): optional for simulations where there is no existing capacity: gen_build_predetermined.csv - GENERATION_PROJECT, build_year, gen_predetermined_cap + GENERATION_PROJECT, build_year, build_gen_predetermined The following file is mandatory, because it sets cost parameters for both existing and new project buildouts: @@ -519,64 +603,162 @@ def load_inputs(mod, switch_data, inputs_dir): GENERATION_PROJECT, build_year, gen_overnight_cost, gen_fixed_om """ + switch_data.load_aug( - filename=os.path.join(inputs_dir, 'generation_projects_info.csv'), - auto_select=True, - optional_params=['gen_dbid', 'gen_is_baseload', 'gen_scheduled_outage_rate', - 'gen_forced_outage_rate', 'gen_capacity_limit_mw', 'gen_unit_size', - 'gen_ccs_energy_load', 'gen_ccs_capture_efficiency', - 'gen_min_build_capacity', 'gen_is_cogen', 'gen_is_distributed'], + filename=os.path.join(inputs_dir, "gen_info.csv"), + optional_params=[ + "gen_dbid", + "gen_is_baseload", + "gen_scheduled_outage_rate", + "gen_forced_outage_rate", + "gen_capacity_limit_mw", + "gen_unit_size", + "gen_ccs_energy_load", + "gen_ccs_capture_efficiency", + "gen_min_build_capacity", + "gen_is_cogen", + "gen_is_distributed", + ], index=mod.GENERATION_PROJECTS, - param=(mod.gen_dbid, mod.gen_tech, mod.gen_energy_source, - mod.gen_load_zone, mod.gen_max_age, mod.gen_is_variable, - mod.gen_is_baseload, mod.gen_scheduled_outage_rate, - mod.gen_forced_outage_rate, mod.gen_capacity_limit_mw, - mod.gen_unit_size, mod.gen_ccs_energy_load, - mod.gen_ccs_capture_efficiency, mod.gen_full_load_heat_rate, - mod.gen_variable_om, mod.gen_min_build_capacity, - mod.gen_connect_cost_per_mw, mod.gen_is_cogen, - mod.gen_is_distributed)) + param=( + mod.gen_dbid, + mod.gen_tech, + mod.gen_energy_source, + mod.gen_load_zone, + mod.gen_max_age, + mod.gen_is_variable, + mod.gen_is_baseload, + mod.gen_scheduled_outage_rate, + mod.gen_forced_outage_rate, + mod.gen_capacity_limit_mw, + mod.gen_unit_size, + mod.gen_ccs_energy_load, + mod.gen_ccs_capture_efficiency, + mod.gen_full_load_heat_rate, + mod.gen_variable_om, + mod.gen_min_build_capacity, + mod.gen_connect_cost_per_mw, + mod.gen_is_cogen, + mod.gen_is_distributed, + ), + ) # Construct sets of capacity-limited, ccs-capable and unit-size-specified # projects. These sets include projects for which these parameters have # a value - if 'gen_capacity_limit_mw' in switch_data.data(): - switch_data.data()['CAPACITY_LIMITED_GENS'] = { - None: list(switch_data.data(name='gen_capacity_limit_mw').keys())} - if 'gen_unit_size' in switch_data.data(): - switch_data.data()['DISCRETELY_SIZED_GENS'] = { - None: list(switch_data.data(name='gen_unit_size').keys())} - if 'gen_ccs_capture_efficiency' in switch_data.data(): - switch_data.data()['CCS_EQUIPPED_GENS'] = { - None: list(switch_data.data(name='gen_ccs_capture_efficiency').keys())} + if "gen_capacity_limit_mw" in switch_data.data(): + switch_data.data()["CAPACITY_LIMITED_GENS"] = { + None: list(switch_data.data(name="gen_capacity_limit_mw").keys()) + } + if "gen_unit_size" in switch_data.data(): + switch_data.data()["DISCRETELY_SIZED_GENS"] = { + None: list(switch_data.data(name="gen_unit_size").keys()) + } + if "gen_ccs_capture_efficiency" in switch_data.data(): + switch_data.data()["CCS_EQUIPPED_GENS"] = { + None: list(switch_data.data(name="gen_ccs_capture_efficiency").keys()) + } switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'gen_build_predetermined.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "gen_build_predetermined.csv"), index=mod.PREDETERMINED_GEN_BLD_YRS, - param=(mod.gen_predetermined_cap)) + param=(mod.build_gen_predetermined), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'gen_build_costs.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "gen_build_costs.csv"), index=mod.GEN_BLD_YRS, - param=(mod.gen_overnight_cost, mod.gen_fixed_om)) - # read FUELS_FOR_MULTIFUEL_GEN from gen_multiple_fuels.dat if available - multi_fuels_path = os.path.join(inputs_dir, 'gen_multiple_fuels.dat') - if os.path.isfile(multi_fuels_path): - switch_data.load(filename=multi_fuels_path) + param=(mod.gen_overnight_cost, mod.gen_fixed_om), + ) + switch_data.load_aug( + optional=True, + filename=os.path.join(inputs_dir, "gen_multiple_fuels.csv"), + index=mod.MULTI_FUEL_GEN_FUELS, + param=tuple(), + ) def post_solve(m, outdir): + # report generator and storage additions in each period and and total + # capital outlay for those (up-front capital outlay is not treated as a + # direct cost by Switch, but is often interesting to users) + write_table( + m, + m.GEN_PERIODS, + output_file=os.path.join(outdir, "gen_build.csv"), + headings=( + "GENERATION_PROJECT", + "PERIOD", + "gen_tech", + "gen_load_zone", + "gen_energy_source", + "BuildGen", + "BuildStorageEnergy", + "GenCapitalOutlay", + ), + values=lambda m, g, p: ( + g, + p, + m.gen_tech[g], + m.gen_load_zone[g], + m.gen_energy_source[g], + m.BuildGen[g, p] if (g, p) in m.BuildGen else 0.0, + ( + m.BuildStorageEnergy[g, p] + if hasattr(m, "BuildStorageEnergy") and (g, p) in m.BuildStorageEnergy + else 0.0 + ), + ( + (m.gen_overnight_cost[g, p] + m.gen_connect_cost_per_mw[g]) + * m.BuildGen[g, p] + if (g, p) in m.BuildGen + else 0.0 + ) + + ( + (m.BuildStorageEnergy[g, p] * m.gen_storage_energy_overnight_cost[g, p]) + if hasattr(m, "BuildStorageEnergy") and (g, p) in m.BuildStorageEnergy + else 0.0 + ), + ), + ) + + # report total generator and storage capacity in place for each generator in + # each period. Also show capital and fixed O&M recovery per year in that + # period (these are the costs Switch seeks to minimize) write_table( m, - sorted(m.GEN_PERIODS) if m.options.sorted_output else m.GEN_PERIODS, + m.GENERATION_PROJECTS, + m.PERIODS, output_file=os.path.join(outdir, "gen_cap.csv"), headings=( - "GENERATION_PROJECT", "PERIOD", - "gen_tech", "gen_load_zone", "gen_energy_source", - "GenCapacity", "GenCapitalCosts", "GenFixedOMCosts"), - # Indexes are provided as a tuple, so put (g,p) in parentheses to - # access the two components of the index individually. + "GENERATION_PROJECT", + "PERIOD", + "gen_tech", + "gen_load_zone", + "gen_energy_source", + "GenCapacity", + "GenStorageCapacity", + "GenCapitalRecovery", + "GenFixedOMCosts", + ), values=lambda m, g, p: ( - g, p, - m.gen_tech[g], m.gen_load_zone[g], m.gen_energy_source[g], - m.GenCapacity[g, p], m.GenCapitalCosts[g, p], m.GenFixedOMCosts[g, p])) + g, + p, + m.gen_tech[g], + m.gen_load_zone[g], + m.gen_energy_source[g], + m.GenCapacity[g, p], + ( + m.StorageEnergyCapacity[g, p] + if hasattr(m, "StorageEnergyCapacity") + and (g, p) in m.StorageEnergyCapacity + else 0.0 + ), + m.GenCapitalCosts[g, p] + + ( + m.StorageEnergyCapitalCost[g, p] + if hasattr(m, "StorageEnergyCapitalCost") + and (g, p) in m.StorageEnergyCapitalCost + else 0.0 + ), + m.GenFixedOMCosts[g, p], + ), + ) diff --git a/switch_model/generators/core/commit/__init__.py b/switch_model/generators/core/commit/__init__.py index c015e852b..dcf58634c 100644 --- a/switch_model/generators/core/commit/__init__.py +++ b/switch_model/generators/core/commit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015-2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. """ @@ -10,5 +10,6 @@ """ core_modules = [ - 'switch_model.generators.core.commit.operate', - 'switch_model.generators.core.commit.fuel_use'] + "switch_model.generators.core.commit.operate", + "switch_model.generators.core.commit.fuel_use", +] diff --git a/switch_model/generators/core/commit/discrete.py b/switch_model/generators/core/commit/discrete.py index c26491c6e..292fd757e 100644 --- a/switch_model/generators/core/commit/discrete.py +++ b/switch_model/generators/core/commit/discrete.py @@ -1,4 +1,4 @@ -# Copyright 2015 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. """ @@ -8,10 +8,16 @@ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties',\ - 'switch_model.generators.core.build',\ - 'switch_model.generators.core.dispatch', 'switch_model.operations.unitcommit' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.operations.unitcommit", +) + def define_components(mod): """ @@ -50,14 +56,15 @@ def define_components(mod): mod.DISCRETE_GEN_TPS = Set( dimen=2, - initialize=lambda m: - [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] + initialize=lambda m: [ + (g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g] + ], ) - mod.CommitGenUnits = Var( - mod.DISCRETE_GEN_TPS, - within=NonNegativeIntegers) + mod.CommitGenUnits = Var(mod.DISCRETE_GEN_TPS, within=NonNegativeIntegers) mod.Commit_Units_Consistency = Constraint( mod.DISCRETE_GEN_TPS, rule=lambda m, g, t: ( - m.CommitGen[g, t] == m.CommitGenUnits[g, t] * - m.gen_unit_size[g] * m.gen_availability[g])) + m.CommitGen[g, t] + == m.CommitGenUnits[g, t] * m.gen_unit_size[g] * m.gen_availability[g] + ), + ) diff --git a/switch_model/generators/core/commit/fuel_use.py b/switch_model/generators/core/commit/fuel_use.py index 64ba50183..a8f42ef75 100644 --- a/switch_model/generators/core/commit/fuel_use.py +++ b/switch_model/generators/core/commit/fuel_use.py @@ -1,4 +1,4 @@ -# Copyright 2015 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. """ @@ -67,10 +67,16 @@ import csv from switch_model.utilities import approx_equal -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch',\ - 'switch_model.generators.core.commit.operate' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.commit.operate", +) + def define_components(mod): """ @@ -101,18 +107,17 @@ def define_components(mod): """ - mod.FUEL_USE_SEGMENTS_FOR_GEN = Set( - mod.FUEL_BASED_GENS, - dimen=2) + mod.FUEL_USE_SEGMENTS_FOR_GEN = Set(mod.FUEL_BASED_GENS, dimen=2) # Use BuildAction to populate a set's default values. def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): if g not in m.FUEL_USE_SEGMENTS_FOR_GEN: heat_rate = m.gen_full_load_heat_rate[g] m.FUEL_USE_SEGMENTS_FOR_GEN[g] = [(0, heat_rate)] + mod.FUEL_USE_SEGMENTS_FOR_GEN_default = BuildAction( - mod.FUEL_BASED_GENS, - rule=FUEL_USE_SEGMENTS_FOR_GEN_default_rule) + mod.FUEL_BASED_GENS, rule=FUEL_USE_SEGMENTS_FOR_GEN_default_rule + ) mod.GEN_TPS_FUEL_PIECEWISE_CONS_SET = Set( dimen=4, @@ -120,16 +125,20 @@ def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): (g, t, intercept, slope) for (g, t) in m.FUEL_BASED_GEN_TPS for (intercept, slope) in m.FUEL_USE_SEGMENTS_FOR_GEN[g] - ] + ], ) mod.GenFuelUseRate_Calculate = Constraint( mod.GEN_TPS_FUEL_PIECEWISE_CONS_SET, rule=lambda m, g, t, intercept, incremental_heat_rate: ( - sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) >= + sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) + >= # Do the startup - m.StartupGenCapacity[g, t] * m.gen_startup_fuel[g] / m.tp_duration_hrs[t] + - intercept * m.CommitGen[g, t] + - incremental_heat_rate * m.DispatchGen[g, t])) + m.StartupGenCapacity[g, t] * m.gen_startup_fuel[g] / m.tp_duration_hrs[t] + + intercept * m.CommitGen[g, t] + + incremental_heat_rate * m.DispatchGen[g, t] + ), + ) + # TODO: switch to defining heat rates as a collection of (output_mw, fuel_mmbtu_per_h) points; # read those directly as normal sets, then derive the project heat rate curves from those @@ -137,6 +146,7 @@ def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): # This will simplify data preparation (the current format is hard to produce from any # normalized database) and the import code and help the readability of this file. + def load_inputs(mod, switch_data, inputs_dir): """ @@ -181,43 +191,50 @@ def load_inputs(mod, switch_data, inputs_dir): """ - path = os.path.join(inputs_dir, 'gen_inc_heat_rates.csv') + path = os.path.join(inputs_dir, "gen_inc_heat_rates.csv") if os.path.isfile(path): (fuel_rate_segments, min_load, full_hr) = _parse_inc_heat_rate_file( - path, id_column="GENERATION_PROJECT") + path, id_column="GENERATION_PROJECT" + ) # Check implied minimum loading level for consistency with # gen_min_load_fraction if gen_min_load_fraction was provided. If # gen_min_load_fraction wasn't provided, set it to implied minimum # loading level. for g in min_load: - if 'gen_min_load_fraction' not in switch_data.data(): - switch_data.data()['gen_min_load_fraction'] = {} - dp_dict = switch_data.data(name='gen_min_load_fraction') + if "gen_min_load_fraction" not in switch_data.data(): + switch_data.data()["gen_min_load_fraction"] = {} + dp_dict = switch_data.data(name="gen_min_load_fraction") if g in dp_dict: min_load_dat = dp_dict[g] if not approx_equal(min_load[g], min_load_dat): - raise ValueError(( - "gen_min_load_fraction is inconsistant with " + - "incremental heat rate data for project " + - "{}.").format(g)) + raise ValueError( + ( + "gen_min_load_fraction is inconsistant with " + + "incremental heat rate data for project " + + "{}." + ).format(g) + ) else: dp_dict[g] = min_load[g] # Same thing, but for full load heat rate. for g in full_hr: - if 'gen_full_load_heat_rate' not in switch_data.data(): - switch_data.data()['gen_full_load_heat_rate'] = {} - dp_dict = switch_data.data(name='gen_full_load_heat_rate') + if "gen_full_load_heat_rate" not in switch_data.data(): + switch_data.data()["gen_full_load_heat_rate"] = {} + dp_dict = switch_data.data(name="gen_full_load_heat_rate") if g in dp_dict: full_hr_dat = dp_dict[g] if abs((full_hr[g] - full_hr_dat) / full_hr_dat) > 0.01: - raise ValueError(( - "gen_full_load_heat_rate is inconsistant with " + - "incremental heat rate data for project " + - "{}.").format(g)) + raise ValueError( + ( + "gen_full_load_heat_rate is inconsistant with " + + "incremental heat rate data for project " + + "{}." + ).format(g) + ) else: dp_dict[g] = full_hr[g] # Copy parsed data into the data portal. - switch_data.data()['FUEL_USE_SEGMENTS_FOR_GEN'] = fuel_rate_segments + switch_data.data()["FUEL_USE_SEGMENTS_FOR_GEN"] = fuel_rate_segments def _parse_inc_heat_rate_file(path, id_column): @@ -237,45 +254,56 @@ def _parse_inc_heat_rate_file(path, id_column): full_load_hr = {} # Scan the file and stuff the data into dictionaries for easy access. # Parse the file and stuff data into dictionaries indexed by units. - with open(path, 'r') as hr_file: - dat = list(csv.DictReader(hr_file, delimiter=',')) + with open(path, "r") as hr_file: + dat = list(csv.DictReader(hr_file, delimiter=",")) for row in dat: u = row[id_column] - p1 = float(row['power_start_mw']) - p2 = row['power_end_mw'] - ihr = row['incremental_heat_rate_mbtu_per_mwhr'] - fr = row['fuel_use_rate_mmbtu_per_h'] + p1 = float(row["power_start_mw"]) + p2 = row["power_end_mw"] + ihr = row["incremental_heat_rate_mbtu_per_mwhr"] + fr = row["fuel_use_rate_mmbtu_per_h"] # Does this row give the first point? - if(p2 == '.' and ihr == '.'): + if p2 == "." and ihr == ".": fr = float(fr) - if(u in fuel_rate_points): + if u in fuel_rate_points: raise ValueError( - "Error processing incremental heat rates for " + - u + " in " + path + ". More than one row has " + - "a fuel use rate specified.") + "Error processing incremental heat rates for " + + u + + " in " + + path + + ". More than one row has " + + "a fuel use rate specified." + ) fuel_rate_points[u] = {p1: fr} # Does this row give a line segment? - elif(fr == '.'): + elif fr == ".": p2 = float(p2) ihr = float(ihr) - if(u not in ihr_dat): + if u not in ihr_dat: ihr_dat[u] = [] ihr_dat[u].append((p1, p2, ihr)) # Throw an error if the row's format is not recognized. else: raise ValueError( - "Error processing incremental heat rates for row " + - u + " in " + path + ". Row format not recognized for " + - "row " + str(row) + ". See documentation for acceptable " + - "formats.") + "Error processing incremental heat rates for row " + + u + + " in " + + path + + ". Row format not recognized for " + + "row " + + str(row) + + ". See documentation for acceptable " + + "formats." + ) # Make sure that each project that has incremental heat rates defined # also has a starting point defined. missing_starts = [k for k in ihr_dat if k not in fuel_rate_points] if missing_starts: raise ValueError( - 'No starting point(s) are defined for incremental heat rate curves ' - 'for the following technologies: {}'.format(','.join(missing_starts))) + "No starting point(s) are defined for incremental heat rate curves " + "for the following technologies: {}".format(",".join(missing_starts)) + ) # Construct a convex combination of lines describing a fuel use # curve for each representative unit "u". @@ -293,7 +321,7 @@ def _parse_inc_heat_rate_file(path, id_column): # Sort the line segments by their domains. ihr_dat[u].sort() # Assume that the maximum power output is the rated capacity. - (junk, capacity, junk) = ihr_dat[u][len(ihr_dat[u])-1] + (junk, capacity, junk) = ihr_dat[u][len(ihr_dat[u]) - 1] # Retrieve the first incremental heat rate for error checking. (min_power, junk, ihr_prev) = ihr_dat[u][0] min_cap_factor[u] = min_power / capacity @@ -302,20 +330,24 @@ def _parse_inc_heat_rate_file(path, id_column): # Error check: This incremental heat rate cannot be less than # the previous one. if ihr_prev > ihr: - raise ValueError(( - "Error processing incremental heat rates for " + - "{} in file {}. The incremental heat rate " + - "between power output levels {}-{} is less than " + - "that of the prior line segment.").format( - u, path, p_start, p_end)) + raise ValueError( + ( + "Error processing incremental heat rates for " + + "{} in file {}. The incremental heat rate " + + "between power output levels {}-{} is less than " + + "that of the prior line segment." + ).format(u, path, p_start, p_end) + ) # Error check: This segment needs to start at an existing point. if p_start not in fr_points: - raise ValueError(( - "Error processing incremental heat rates for " + - "{} in file {}. The incremental heat rate " + - "between power output levels {}-{} does not start at a " + - "previously defined point or line segment.").format( - u, path, p_start, p_end)) + raise ValueError( + ( + "Error processing incremental heat rates for " + + "{} in file {}. The incremental heat rate " + + "between power output levels {}-{} does not start at a " + + "previously defined point or line segment." + ).format(u, path, p_start, p_end) + ) # Calculate the y-intercept then normalize it by the capacity. intercept_norm = (fr_points[p_start] - ihr * p_start) / capacity # Save the line segment's definition. diff --git a/switch_model/generators/core/commit/operate.py b/switch_model/generators/core/commit/operate.py index eca5445d1..189f1eda4 100644 --- a/switch_model/generators/core/commit/operate.py +++ b/switch_model/generators/core/commit/operate.py @@ -1,4 +1,4 @@ -# Copyright 2015 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. """ @@ -15,11 +15,15 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', 'switch_model.balancing.load_zones', - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", ) + def define_components(mod): """ @@ -138,8 +142,8 @@ def define_components(mod): Enforce_Min_Uptime and Enforce_Min_Downtime constraints, and are probably not useful elsewhere. - Enforce_Min_Uptime[(g, tp) in UPTIME_CONSTRAINED_GEN_TPS] and - Enforce_Min_Downtime[(g, tp) in DOWNTIME_CONSTRAINED_GEN_TPS] + Enforce_Min_Uptime[(g, t) in UPTIME_CONSTRAINED_GEN_TPS] and + Enforce_Min_Downtime[(g, t) in DOWNTIME_CONSTRAINED_GEN_TPS] are constraints that ensure that unit commitment respects the minimum uptime and downtime for each project. These are enforced on an aggregate basis for each project rather than tracking individual @@ -171,7 +175,7 @@ def define_components(mod): relevant when considering unit commitment so it is defined here rather than in the gen_dispatch module. - gen_min_load_fraction_TP[g, tp] is the same as + gen_min_load_fraction_TP[g, t] is the same as gen_min_load_fraction, but has separate entries for each timepoint. This could be used, for example, for non-curtailable renewable energy projects. This defaults to the value of gen_min_load_fraction[g]. @@ -214,201 +218,209 @@ def define_components(mod): """ # Commitment decision, bounds and associated slack variables - mod.CommitGen = Var( - mod.GEN_TPS, - within=NonNegativeReals) + mod.CommitGen = Var(mod.GEN_TPS, within=NonNegativeReals) mod.gen_max_commit_fraction = Param( - mod.GEN_TPS, - within=PercentFraction, - default=lambda m, g, t: 1.0) + mod.GEN_TPS, within=PercentFraction, default=lambda m, g, t: 1.0 + ) mod.gen_min_commit_fraction = Param( mod.GEN_TPS, within=PercentFraction, default=lambda m, g, t: ( - m.gen_max_commit_fraction[g, t] - if g in m.BASELOAD_GENS - else 0.0)) + m.gen_max_commit_fraction[g, t] if g in m.BASELOAD_GENS else 0.0 + ), + ) mod.CommitLowerLimit = Expression( mod.GEN_TPS, rule=lambda m, g, t: ( - m.GenCapacityInTP[g, t] * m.gen_availability[g] * - m.gen_min_commit_fraction[g, t])) + m.GenCapacityInTP[g, t] + * m.gen_availability[g] + * m.gen_min_commit_fraction[g, t] + ), + ) mod.CommitUpperLimit = Expression( mod.GEN_TPS, rule=lambda m, g, t: ( - m.GenCapacityInTP[g, t] * m.gen_availability[g] * - m.gen_max_commit_fraction[g, t])) + m.GenCapacityInTP[g, t] + * m.gen_availability[g] + * m.gen_max_commit_fraction[g, t] + ), + ) mod.Enforce_Commit_Lower_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitLowerLimit[g, t] <= m.CommitGen[g, t])) + rule=lambda m, g, t: (m.CommitLowerLimit[g, t] <= m.CommitGen[g, t]), + ) mod.Enforce_Commit_Upper_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitGen[g, t] <= m.CommitUpperLimit[g, t])) + rule=lambda m, g, t: (m.CommitGen[g, t] <= m.CommitUpperLimit[g, t]), + ) mod.CommitSlackUp = Expression( - mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitUpperLimit[g, t] - m.CommitGen[g, t])) + mod.GEN_TPS, rule=lambda m, g, t: (m.CommitUpperLimit[g, t] - m.CommitGen[g, t]) + ) mod.CommitSlackDown = Expression( - mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitGen[g, t] - m.CommitLowerLimit[g, t])) + mod.GEN_TPS, rule=lambda m, g, t: (m.CommitGen[g, t] - m.CommitLowerLimit[g, t]) + ) # StartupGenCapacity & ShutdownGenCapacity (at start of each timepoint) - mod.StartupGenCapacity = Var( - mod.GEN_TPS, - within=NonNegativeReals) - mod.ShutdownGenCapacity = Var( - mod.GEN_TPS, - within=NonNegativeReals) + mod.StartupGenCapacity = Var(mod.GEN_TPS, within=NonNegativeReals) + mod.ShutdownGenCapacity = Var(mod.GEN_TPS, within=NonNegativeReals) mod.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: - m.CommitGen[g, m.tp_previous[t]] - + m.StartupGenCapacity[g, t] - m.ShutdownGenCapacity[g, t] - == m.CommitGen[g, t]) + rule=lambda m, g, t: m.CommitGen[g, m.tp_previous[t]] + + m.StartupGenCapacity[g, t] + - m.ShutdownGenCapacity[g, t] + == m.CommitGen[g, t], + ) # StartupGenCapacity costs - mod.gen_startup_fuel = Param(mod.FUEL_BASED_GENS, default=0.0) - mod.gen_startup_om = Param(mod.GENERATION_PROJECTS, default=0.0) + mod.gen_startup_fuel = Param( + mod.FUEL_BASED_GENS, default=0.0, within=NonNegativeReals + ) + mod.gen_startup_om = Param( + mod.GENERATION_PROJECTS, default=0.0, within=NonNegativeReals + ) # Note: lump-sum startup O&M cost is divided by the duration of the # timepoint to give a cost-per-hour during this timepoint, as needed by # Cost_Components_Per_TP. mod.Total_StartupGenCapacity_OM_Costs = Expression( mod.TIMEPOINTS, rule=lambda m, t: sum( - m.gen_startup_om[g] * m.StartupGenCapacity[g, t] - / m.tp_duration_hrs[t] + m.gen_startup_om[g] * m.StartupGenCapacity[g, t] / m.tp_duration_hrs[t] for g in m.GENS_IN_PERIOD[m.tp_period[t]] - ) + ), ) - mod.Cost_Components_Per_TP.append('Total_StartupGenCapacity_OM_Costs') + mod.Cost_Components_Per_TP.append("Total_StartupGenCapacity_OM_Costs") mod.gen_min_uptime = Param( - mod.GENERATION_PROJECTS, - within=NonNegativeReals, - default=0.0) + mod.GENERATION_PROJECTS, within=NonNegativeReals, default=0.0 + ) mod.gen_min_downtime = Param( - mod.GENERATION_PROJECTS, - within=NonNegativeReals, - default=0.0) - mod.UPTIME_CONSTRAINED_GEN_TPS = Set(dimen=2, initialize=lambda m: [ - (g, tp) - for g in m.GENERATION_PROJECTS if m.gen_min_uptime[g] > 0.0 - for tp in m.TPS_FOR_GEN[g] - ]) - mod.DOWNTIME_CONSTRAINED_GEN_TPS = Set(dimen=2, initialize=lambda m: [ - (g, tp) - for g in m.GENERATION_PROJECTS if m.gen_min_downtime[g] > 0.0 - for tp in m.TPS_FOR_GEN[g] - ]) - - def tp_prev(m, tp, n=1): - # find nth previous timepoint, wrapping from start to end of day - return m.TPS_IN_TS[m.tp_ts[tp]].prevw(tp, n) - # min_time_projects = set() - def min_time_rule(m, g, tp, up): - """ This uses a simple rule: all capacity turned on in the last x - hours must still be on now (or all capacity recently turned off - must still be off).""" - - # how many timepoints must the project stay on/off once it's - # started/shutdown? - # note: StartupGenCapacity and ShutdownGenCapacity are assumed to - # occur at the start of the timepoint - n_tp = int(round( - (m.gen_min_uptime[g] if up else m.gen_min_downtime[g]) - / m.tp_duration_hrs[tp] - )) - if n_tp == 0: - # project can be shutdown and restarted in the same timepoint - rule = Constraint.Skip - else: - # note: this rule stops one short of n_tp steps back (normal - # behavior of range()), because the current timepoint is - # included in the duration when the capacity will be on/off. - if up: - rule = ( - # online capacity >= recent startups - # (all recent startups are still online) - m.CommitGen[g, tp] - >= - sum( - m.StartupGenCapacity[g, tp_prev(m, tp, i)] - for i in range(n_tp) - ) - ) - else: - # Find the largest fraction of capacity that could have - # been committed in the last x hours, including the - # current hour. We assume that everything above this band - # must remain turned off (e.g., on maintenance outage). - # Note: this band extends one step prior to the first - # relevant shutdown, since that capacity could have been - # online in the prior step. - committable_fraction = m.gen_availability[g] * max( - m.gen_max_commit_fraction[g, tp_prev(m, tp, i)] - for i in range(n_tp+1) - ) - rule = ( - # committable capacity - committed >= recent shutdowns - # (all recent shutdowns are still offline) - m.GenCapacityInTP[g, tp] * committable_fraction - - m.CommitGen[g, tp] - >= - sum( - m.ShutdownGenCapacity[g, tp_prev(m, tp, i)] - for i in range(n_tp) - ) - ) - return rule + mod.GENERATION_PROJECTS, within=NonNegativeReals, default=0.0 + ) + + def hrs_to_num_tps(m, hrs, t): + return int(round(hrs / m.ts_duration_of_tp[m.tp_ts[t]])) + + def time_window(m, t, hrs, add_one=False): + """Return a the set of timepoints, starting at t and going + back the specified number of hours""" + n = hrs_to_num_tps(m, hrs, t) + if add_one: + n += 1 + window = [m.TPS_IN_TS[m.tp_ts[t]].prevw(t, i) for i in range(n)] + return window + + mod.UPTIME_CONSTRAINED_GEN_TPS = Set( + dimen=2, + initialize=lambda m: [ + (g, t) + for g in m.GENERATION_PROJECTS + if m.gen_min_uptime[g] > 0.0 + for t in m.TPS_FOR_GEN[g] + if hrs_to_num_tps(m, m.gen_min_uptime[g], t) > 0 + ], + ) + mod.DOWNTIME_CONSTRAINED_GEN_TPS = Set( + dimen=2, + initialize=lambda m: [ + (g, t) + for g in m.GENERATION_PROJECTS + if m.gen_min_downtime[g] > 0.0 + for t in m.TPS_FOR_GEN[g] + if hrs_to_num_tps(m, m.gen_min_downtime[g], t) > 0 + ], + ) mod.Enforce_Min_Uptime = Constraint( mod.UPTIME_CONSTRAINED_GEN_TPS, - rule=lambda *a: min_time_rule(*a, up=True) + doc="All capacity turned on in the last x hours must still be on now", + rule=lambda m, g, t: ( + m.CommitGen[g, t] + >= sum( + m.StartupGenCapacity[g, t_prior] + for t_prior in time_window(m, t, m.gen_min_uptime[g]) + ) + ), ) + # Matthias notes on Enforce_Min_Downtime: The max(...) term finds the + # largest fraction of capacity that could have been committed in the last + # x hours, including the current hour. We assume that everything above + # this band must remain turned off (e.g., on maintenance outage). Note: + # this band extends one step prior to the first relevant shutdown, since + # that capacity could have been online in the prior step. This attempts to + # implement a band of capacity that does not participate in the minimum + # downtime constraint. Without that term, the model can turn off some + # capacity, and then get around the min-downtime rule by turning on other + # capacity which is actually forced off by gen_max_commit_fraction, e.g., + # due to a maintenance outage. + # The max() term & documentation is confusing to Josiah. + # See https://github.com/switch-model/switch/issues/123 for discussion. mod.Enforce_Min_Downtime = Constraint( mod.DOWNTIME_CONSTRAINED_GEN_TPS, - rule=lambda *a: min_time_rule(*a, up=False) + doc=( + "All recently shutdown capacity remains offline: " + "committed <= committable capacity - recent shutdowns" + ), + rule=lambda m, g, t: ( + m.CommitGen[g, t] + <= + # We can't use max(CommitUpperLimit) and fit within LP guidelines, + # so rederive the CommitUpperLimit expression and apply max to a + # constant. + ( + m.GenCapacityInTP[g, t] + * m.gen_availability[g] + * max( + m.gen_max_commit_fraction[g, t_prior] + for t_prior in time_window( + m, t, m.gen_min_downtime[g], add_one=True + ) + ) + ) + - sum( + m.ShutdownGenCapacity[g, t_prior] + for t_prior in time_window(m, t, m.gen_min_downtime[g]) + ) + ), ) # Dispatch limits relative to committed capacity. mod.gen_min_load_fraction = Param( mod.GENERATION_PROJECTS, within=PercentFraction, - default=lambda m, g: 1.0 if m.gen_is_baseload[g] else 0.0) + default=lambda m, g: 1.0 if m.gen_is_baseload[g] else 0.0, + ) mod.gen_min_load_fraction_TP = Param( mod.GEN_TPS, - default=lambda m, g, t: m.gen_min_load_fraction[g]) + default=lambda m, g, t: m.gen_min_load_fraction[g], + within=NonNegativeReals, + ) mod.DispatchLowerLimit = Expression( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitGen[g, t] * m.gen_min_load_fraction_TP[g, t])) + rule=lambda m, g, t: (m.CommitGen[g, t] * m.gen_min_load_fraction_TP[g, t]), + ) def DispatchUpperLimit_expr(m, g, t): if g in m.VARIABLE_GENS: - return m.CommitGen[g, t]*m.gen_max_capacity_factor[g, t] + return m.CommitGen[g, t] * m.gen_max_capacity_factor[g, t] else: return m.CommitGen[g, t] - mod.DispatchUpperLimit = Expression( - mod.GEN_TPS, - rule=DispatchUpperLimit_expr) + + mod.DispatchUpperLimit = Expression(mod.GEN_TPS, rule=DispatchUpperLimit_expr) mod.Enforce_Dispatch_Lower_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchLowerLimit[g, t] <= m.DispatchGen[g, t])) + rule=lambda m, g, t: (m.DispatchLowerLimit[g, t] <= m.DispatchGen[g, t]), + ) mod.Enforce_Dispatch_Upper_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchGen[g, t] <= m.DispatchUpperLimit[g, t])) + rule=lambda m, g, t: (m.DispatchGen[g, t] <= m.DispatchUpperLimit[g, t]), + ) mod.DispatchSlackUp = Expression( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchUpperLimit[g, t] - m.DispatchGen[g, t])) + rule=lambda m, g, t: (m.DispatchUpperLimit[g, t] - m.DispatchGen[g, t]), + ) mod.DispatchSlackDown = Expression( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchGen[g, t] - m.DispatchLowerLimit[g, t])) + rule=lambda m, g, t: (m.DispatchGen[g, t] - m.DispatchLowerLimit[g, t]), + ) def load_inputs(mod, switch_data, inputs_dir): @@ -419,7 +431,7 @@ def load_inputs(mod, switch_data, inputs_dir): If you only want to override default values for certain columns in a row, insert a dot . into the other columns. - generation_projects_info.csv + gen_info.csv GENERATION_PROJECT, gen_min_load_fraction, gen_startup_fuel, gen_startup_om @@ -434,13 +446,21 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'generation_projects_info.csv'), - auto_select=True, - param=(mod.gen_min_load_fraction, mod.gen_startup_fuel, - mod.gen_startup_om, mod.gen_min_uptime, mod.gen_min_downtime)) + filename=os.path.join(inputs_dir, "gen_info.csv"), + param=( + mod.gen_min_load_fraction, + mod.gen_startup_fuel, + mod.gen_startup_om, + mod.gen_min_uptime, + mod.gen_min_downtime, + ), + ) switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'gen_timepoint_commit_bounds.csv'), - auto_select=True, - param=(mod.gen_min_commit_fraction, - mod.gen_max_commit_fraction, mod.gen_min_load_fraction_TP)) + filename=os.path.join(inputs_dir, "gen_timepoint_commit_bounds.csv"), + param=( + mod.gen_min_commit_fraction, + mod.gen_max_commit_fraction, + mod.gen_min_load_fraction_TP, + ), + ) diff --git a/switch_model/generators/core/dispatch.py b/switch_model/generators/core/dispatch.py index 06d0fda6c..5a0c41281 100644 --- a/switch_model/generators/core/dispatch.py +++ b/switch_model/generators/core/dispatch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -10,20 +10,24 @@ """ from __future__ import division +import logging import os, collections + +import pandas as pd from pyomo.environ import * + from switch_model.reporting import write_table -import pandas as pd -try: - from ggplot import * - can_plot = True -except: - can_plot = False +from switch_model.utilities import unwrap + +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", +) +optional_dependencies = "switch_model.transmission.local_td" -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties', \ - 'switch_model.generators.core.build' -optional_dependencies = 'switch_model.transmission.local_td' def define_components(mod): """ @@ -151,23 +155,29 @@ def define_components(mod): """ def period_active_gen_rule(m, period): - if not hasattr(m, 'period_active_gen_dict'): - m.period_active_gen_dict = collections.defaultdict(set) + if not hasattr(m, "period_active_gen_dict"): + m.period_active_gen_dict = dict() for (_g, _period) in m.GEN_PERIODS: - m.period_active_gen_dict[_period].add(_g) + m.period_active_gen_dict.setdefault(_period, []).append(_g) result = m.period_active_gen_dict.pop(period) if len(m.period_active_gen_dict) == 0: - delattr(m, 'period_active_gen_dict') + delattr(m, "period_active_gen_dict") return result - mod.GENS_IN_PERIOD = Set(mod.PERIODS, initialize=period_active_gen_rule, - doc="The set of projects active in a given period.") + + mod.GENS_IN_PERIOD = Set( + mod.PERIODS, + dimen=1, + initialize=period_active_gen_rule, + doc="The set of projects active in a given period.", + ) mod.TPS_FOR_GEN = Set( mod.GENERATION_PROJECTS, + dimen=1, within=mod.TIMEPOINTS, initialize=lambda m, g: ( tp for p in m.PERIODS_FOR_GEN[g] for tp in m.TPS_IN_PERIOD[p] - ) + ), ) def init(m, gen, period): @@ -177,137 +187,199 @@ def init(m, gen, period): d = m._TPS_FOR_GEN_IN_PERIOD_dict = dict() for _gen in m.GENERATION_PROJECTS: for t in m.TPS_FOR_GEN[_gen]: - d.setdefault((_gen, m.tp_period[t]), set()).add(t) - result = d.pop((gen, period), set()) + d.setdefault((_gen, m.tp_period[t]), []).append(t) + result = d.pop((gen, period), []) if not d: # all gone, delete the attribute del m._TPS_FOR_GEN_IN_PERIOD_dict return result + mod.TPS_FOR_GEN_IN_PERIOD = Set( - mod.GENERATION_PROJECTS, mod.PERIODS, - within=mod.TIMEPOINTS, initialize=init) + mod.GENERATION_PROJECTS, + mod.PERIODS, + dimen=1, + within=mod.TIMEPOINTS, + initialize=init, + ) mod.GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.GENERATION_PROJECTS - for tp in m.TPS_FOR_GEN[g])) + (g, tp) for g in m.GENERATION_PROJECTS for tp in m.TPS_FOR_GEN[g] + ), + ) mod.VARIABLE_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.VARIABLE_GENS - for tp in m.TPS_FOR_GEN[g])) + (g, tp) for g in m.VARIABLE_GENS for tp in m.TPS_FOR_GEN[g] + ), + ) mod.FUEL_BASED_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.FUEL_BASED_GENS - for tp in m.TPS_FOR_GEN[g])) + (g, tp) for g in m.FUEL_BASED_GENS for tp in m.TPS_FOR_GEN[g] + ), + ) mod.GEN_TP_FUELS = Set( dimen=3, initialize=lambda m: ( - (g, t, f) - for (g, t) in m.FUEL_BASED_GEN_TPS - for f in m.FUELS_FOR_GEN[g])) + (g, t, f) for (g, t) in m.FUEL_BASED_GEN_TPS for f in m.FUELS_FOR_GEN[g] + ), + ) mod.GenCapacityInTP = Expression( - mod.GEN_TPS, - rule=lambda m, g, t: m.GenCapacity[g, m.tp_period[t]]) - mod.DispatchGen = Var( - mod.GEN_TPS, - within=NonNegativeReals) + mod.GEN_TPS, rule=lambda m, g, t: m.GenCapacity[g, m.tp_period[t]] + ) + mod.DispatchGen = Var(mod.GEN_TPS, within=NonNegativeReals) mod.ZoneTotalCentralDispatch = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=lambda m, z, t: \ - sum(m.DispatchGen[p, t] - for p in m.GENS_IN_ZONE[z] - if (p, t) in m.GEN_TPS and not m.gen_is_distributed[p]) - - sum(m.DispatchGen[p, t] * m.gen_ccs_energy_load[p] - for p in m.GENS_IN_ZONE[z] - if (p, t) in m.GEN_TPS and p in m.CCS_EQUIPPED_GENS), - doc="Net power from grid-tied generation projects.") - mod.Zone_Power_Injections.append('ZoneTotalCentralDispatch') + mod.LOAD_ZONES, + mod.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.DispatchGen[p, t] + for p in m.GENS_IN_ZONE[z] + if (p, t) in m.GEN_TPS and not m.gen_is_distributed[p] + ) + - sum( + m.DispatchGen[p, t] * m.gen_ccs_energy_load[p] + for p in m.GENS_IN_ZONE[z] + if (p, t) in m.GEN_TPS and p in m.CCS_EQUIPPED_GENS + ), + doc="Net power from grid-tied generation projects.", + ) + mod.Zone_Power_Injections.append("ZoneTotalCentralDispatch") # Divide distributed generation into a separate expression so that we can # put it in the distributed node's power balance equations if local_td is # included. mod.ZoneTotalDistributedDispatch = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=lambda m, z, t: \ - sum(m.DispatchGen[g, t] - for g in m.GENS_IN_ZONE[z] - if (g, t) in m.GEN_TPS and m.gen_is_distributed[g]), - doc="Total power from distributed generation projects." + mod.LOAD_ZONES, + mod.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.DispatchGen[g, t] + for g in m.GENS_IN_ZONE[z] + if (g, t) in m.GEN_TPS and m.gen_is_distributed[g] + ), + doc="Total power from distributed generation projects.", ) try: - mod.Distributed_Power_Injections.append('ZoneTotalDistributedDispatch') + mod.Distributed_Power_Injections.append("ZoneTotalDistributedDispatch") except AttributeError: - mod.Zone_Power_Injections.append('ZoneTotalDistributedDispatch') + mod.Zone_Power_Injections.append("ZoneTotalDistributedDispatch") def init_gen_availability(m, g): if m.gen_is_baseload[g]: - return ( - (1 - m.gen_forced_outage_rate[g]) * - (1 - m.gen_scheduled_outage_rate[g])) + return (1 - m.gen_forced_outage_rate[g]) * ( + 1 - m.gen_scheduled_outage_rate[g] + ) else: - return (1 - m.gen_forced_outage_rate[g]) + return 1 - m.gen_forced_outage_rate[g] + mod.gen_availability = Param( mod.GENERATION_PROJECTS, within=NonNegativeReals, - initialize=init_gen_availability) - - mod.VARIABLE_GEN_TPS_RAW = Set( - dimen=2, - within=mod.VARIABLE_GENS * mod.TIMEPOINTS, + initialize=init_gen_availability, ) + + mod.VARIABLE_GEN_TPS_RAW = Set(dimen=2, within=mod.VARIABLE_GENS * mod.TIMEPOINTS) mod.gen_max_capacity_factor = Param( mod.VARIABLE_GEN_TPS_RAW, within=Reals, - validate=lambda m, val, g, t: -1 < val < 2) + validate=lambda m, val, g, t: -1 < val < 2, + ) # Validate that a gen_max_capacity_factor has been defined for every # variable gen / timepoint that we need. Extra cap factors (like beyond an # existing plant's lifetime) shouldn't cause any problems. # This replaces: mod.min_data_check('gen_max_capacity_factor') from when # gen_max_capacity_factor was indexed by VARIABLE_GEN_TPS. mod.have_minimal_gen_max_capacity_factors = BuildCheck( - mod.VARIABLE_GEN_TPS, - rule=lambda m, g, t: (g,t) in m.VARIABLE_GEN_TPS_RAW) + mod.VARIABLE_GEN_TPS, rule=lambda m, g, t: (g, t) in m.VARIABLE_GEN_TPS_RAW + ) + + if mod.logger.isEnabledFor(logging.INFO): + # Tell user if the input files specify timeseries for renewable plant + # capacity factors that extend beyond the lifetime of the plant. + def rule(m): + extra_indexes = m.VARIABLE_GEN_TPS_RAW - m.VARIABLE_GEN_TPS + if extra_indexes: + num_impacted_generators = len(set(g for g, t in extra_indexes)) + extraneous = {g: [] for (g, t) in extra_indexes} + for (g, t) in extra_indexes: + extraneous[g].append(t) + pprint = "\n".join( + "* {}: {} to {}".format(g, min(tps), max(tps)) + for g, tps in extraneous.items() + ) + # basic message for everyone at info level + msg = unwrap( + """ + {} generation project[s] have data in + variable_capacity_factors.csv for timepoints when they are + not operable, either before construction is possible or + after retirement. + """.format( + num_impacted_generators + ) + ) + if m.logger.isEnabledFor(logging.DEBUG): + # more detailed message + msg += unwrap( + """ + You can avoid this message by only placing data in + variable_capacity_factors.csv for active periods for + each project. If you expect these project[s] to be + operable during all the timepoints currently in + variable_capacity_factors.csv, then they need to either + come online earlier, have longer lifetimes, or have + options to build new capacity when the old capacity + reaches its maximum age. + """ + ) + msg += " Plants with extra timepoints:\n{}".format(pprint) + else: + msg += " Use --log-level debug for more details." + m.logger.info(msg + "\n") + + mod.notify_on_extra_VARIABLE_GEN_TPS = BuildAction(rule=rule) mod.GenFuelUseRate = Var( mod.GEN_TP_FUELS, within=NonNegativeReals, - doc=("Other modules constraint this variable based on DispatchGen and " - "module-specific formulations of unit commitment and heat rates.")) + doc=( + "Other modules constraint this variable based on DispatchGen and " + "module-specific formulations of unit commitment and heat rates." + ), + ) def DispatchEmissions_rule(m, g, t, f): if g not in m.CCS_EQUIPPED_GENS: - return ( - m.GenFuelUseRate[g, t, f] * - (m.f_co2_intensity[f] + m.f_upstream_co2_intensity[f])) + return m.GenFuelUseRate[g, t, f] * ( + m.f_co2_intensity[f] + m.f_upstream_co2_intensity[f] + ) else: ccs_emission_frac = 1 - m.gen_ccs_capture_efficiency[g] - return ( - m.GenFuelUseRate[g, t, f] * - (m.f_co2_intensity[f] * ccs_emission_frac + - m.f_upstream_co2_intensity[f])) - mod.DispatchEmissions = Expression( - mod.GEN_TP_FUELS, - rule=DispatchEmissions_rule) - mod.AnnualEmissions = Expression(mod.PERIODS, + return m.GenFuelUseRate[g, t, f] * ( + m.f_co2_intensity[f] * ccs_emission_frac + m.f_upstream_co2_intensity[f] + ) + + mod.DispatchEmissions = Expression(mod.GEN_TP_FUELS, rule=DispatchEmissions_rule) + mod.AnnualEmissions = Expression( + mod.PERIODS, rule=lambda m, period: sum( m.DispatchEmissions[g, t, f] * m.tp_weight_in_year[t] for (g, t, f) in m.GEN_TP_FUELS - if m.tp_period[t] == period), - doc="The system's annual emissions, in metric tonnes of CO2 per year.") + if m.tp_period[t] == period + ), + doc="The system's annual emissions, in metric tonnes of CO2 per year.", + ) mod.GenVariableOMCostsInTP = Expression( mod.TIMEPOINTS, rule=lambda m, t: sum( m.DispatchGen[g, t] * m.gen_variable_om[g] - for g in m.GENS_IN_PERIOD[m.tp_period[t]]), - doc="Summarize costs for the objective function") - mod.Cost_Components_Per_TP.append('GenVariableOMCostsInTP') + for g in m.GENS_IN_PERIOD[m.tp_period[t]] + ), + doc="Summarize costs for the objective function", + ) + mod.Cost_Components_Per_TP.append("GenVariableOMCostsInTP") def load_inputs(mod, switch_data, inputs_dir): @@ -325,17 +397,17 @@ def load_inputs(mod, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'variable_capacity_factors.csv'), - autoselect=True, + filename=os.path.join(inputs_dir, "variable_capacity_factors.csv"), index=mod.VARIABLE_GEN_TPS_RAW, - param=(mod.gen_max_capacity_factor,)) + param=(mod.gen_max_capacity_factor,), + ) def post_solve(instance, outdir): """ Exported files: - dispatch-wide.csv - Dispatch results timepoints in "wide" format with + dispatch_wide.csv - Dispatch results timepoints in "wide" format with timepoints as rows, generation projects as columns, and dispatch level as values @@ -351,64 +423,190 @@ def post_solve(instance, outdir): dispatch_annual_summary.pdf - A figure of annual summary data. Only written if the ggplot python library is installed. """ + gen_proj = list(instance.GENERATION_PROJECTS) # native order + if instance.options.sorted_output: + gen_proj.sort() + write_table( - instance, instance.TIMEPOINTS, - output_file=os.path.join(outdir, "dispatch-wide.csv"), - headings=("timestamp",)+tuple(sorted(instance.GENERATION_PROJECTS)), - values=lambda m, t: (m.tp_timestamp[t],) + tuple( - m.DispatchGen[p, t] if (p, t) in m.GEN_TPS - else 0.0 - for p in sorted(m.GENERATION_PROJECTS) - ) + instance, + instance.TIMEPOINTS, + output_file=os.path.join(outdir, "dispatch_wide.csv"), + headings=("timestamp",) + tuple(gen_proj), + values=lambda m, t: (m.tp_timestamp[t],) + + tuple(m.DispatchGen[p, t] if (p, t) in m.GEN_TPS else 0.0 for p in gen_proj), ) - - dispatch_normalized_dat = [{ - "generation_project": g, - "gen_dbid": instance.gen_dbid[g], - "gen_tech": instance.gen_tech[g], - "gen_load_zone": instance.gen_load_zone[g], - "gen_energy_source": instance.gen_energy_source[g], - "timestamp": instance.tp_timestamp[t], - "tp_weight_in_year_hrs": instance.tp_weight_in_year[t], - "period": instance.tp_period[t], - "DispatchGen_MW": value(instance.DispatchGen[g, t]), - "Energy_GWh_typical_yr": value( - instance.DispatchGen[g, t] * instance.tp_weight_in_year[t] / 1000), - "VariableCost_per_yr": value( - instance.DispatchGen[g, t] * instance.gen_variable_om[g] * - instance.tp_weight_in_year[t]), - "DispatchEmissions_tCO2_per_typical_yr": value(sum( - instance.DispatchEmissions[g, t, f] * instance.tp_weight_in_year[t] - for f in instance.FUELS_FOR_GEN[g] - )) if instance.gen_uses_fuel[g] else 0 - } for g, t in instance.GEN_TPS ] + dispatch_normalized_dat = [] + for g, t in instance.GEN_TPS: + p = instance.tp_period[t] + record = { + "generation_project": g, + "gen_dbid": instance.gen_dbid[g], + "gen_tech": instance.gen_tech[g], + "gen_load_zone": instance.gen_load_zone[g], + "gen_energy_source": instance.gen_energy_source[g], + "timestamp": instance.tp_timestamp[t], + "tp_weight_in_year_hrs": instance.tp_weight_in_year[t], + "period": instance.tp_period[t], + "DispatchGen_MW": value(instance.DispatchGen[g, t]), + "Energy_GWh_typical_yr": value( + instance.DispatchGen[g, t] * instance.tp_weight_in_year[t] / 1000 + ), + "VariableCost_per_yr": value( + instance.DispatchGen[g, t] + * instance.gen_variable_om[g] + * instance.tp_weight_in_year[t] + ), + "DispatchEmissions_tCO2_per_typical_yr": value( + sum( + instance.DispatchEmissions[g, t, f] * instance.tp_weight_in_year[t] + for f in instance.FUELS_FOR_GEN[g] + ) + ) + if instance.gen_uses_fuel[g] + else 0, + "GenCapacity_MW": value(instance.GenCapacity[g, p]), + "GenCapitalCosts": value(instance.GenCapitalCosts[g, p]), + "GenFixedOMCosts": value(instance.GenFixedOMCosts[g, p]), + } + try: + try: + record["ChargeStorage_MW"] = -1.0 * value(instance.ChargeStorage[g, t]) + record["Store_GWh_typical_yr"] = value( + instance.ChargeStorage[g, t] * instance.tp_weight_in_year[t] / 1000 + ) + record["Discharge_GWh_typical_yr"] = record["Energy_GWh_typical_yr"] + record["Energy_GWh_typical_yr"] -= record["Store_GWh_typical_yr"] + record["is_storage"] = True + except KeyError: + record["ChargeStorage_MW"] = float("NaN") + record["Store_GWh_typical_yr"] = float("NaN") + record["Discharge_GWh_typical_yr"] = float("NaN") + record["is_storage"] = False + except AttributeError: + pass + dispatch_normalized_dat.append(record) dispatch_full_df = pd.DataFrame(dispatch_normalized_dat) dispatch_full_df.set_index(["generation_project", "timestamp"], inplace=True) + if instance.options.sorted_output: + dispatch_full_df.sort_index(inplace=True) dispatch_full_df.to_csv(os.path.join(outdir, "dispatch.csv")) + summary_columns = [ + "Energy_GWh_typical_yr", + "VariableCost_per_yr", + "DispatchEmissions_tCO2_per_typical_yr", + "GenCapacity_MW", + "GenCapitalCosts", + "GenFixedOMCosts", + "LCOE_dollar_per_MWh", + "capacity_factor", + ] + if "ChargeStorage" in dir(instance): + summary_columns.extend(["Store_GWh_typical_yr", "Discharge_GWh_typical_yr"]) + + # Annual summary of each generator + gen_sum = dispatch_full_df.groupby( + [ + "generation_project", + "gen_dbid", + "gen_tech", + "gen_load_zone", + "gen_energy_source", + "period", + "GenCapacity_MW", + "GenCapitalCosts", + "GenFixedOMCosts", + ] + ).agg( + lambda x: x.sum(min_count=1, skipna=False) + ) # why these arguments? + gen_sum.reset_index(inplace=True) + gen_sum.set_index( + inplace=True, + keys=[ + "generation_project", + "gen_dbid", + "gen_tech", + "gen_load_zone", + "gen_energy_source", + "period", + ], + ) + gen_sum["Energy_out_avg_MW"] = ( + gen_sum["Energy_GWh_typical_yr"] * 1000 / gen_sum["tp_weight_in_year_hrs"] + ) + hrs_per_yr = gen_sum.iloc[0]["tp_weight_in_year_hrs"] + try: + idx = gen_sum["is_storage"].astype(bool) + gen_sum.loc[idx, "Energy_out_avg_MW"] = ( + gen_sum.loc[idx, "Discharge_GWh_typical_yr"] + * 1000 + / gen_sum.loc[idx, "tp_weight_in_year_hrs"] + ) + except KeyError: + pass + + def add_cap_factor_and_lcoe(df): + df["capacity_factor"] = df["Energy_out_avg_MW"] / df["GenCapacity_MW"] + no_cap = df["GenCapacity_MW"] == 0 + df.loc[no_cap, "capacity_factor"] = 0 - annual_summary = dispatch_full_df.groupby(['gen_tech', "gen_energy_source", "period"]).sum() - annual_summary.to_csv( - os.path.join(outdir, "dispatch_annual_summary.csv"), - columns=["Energy_GWh_typical_yr", "VariableCost_per_yr", - "DispatchEmissions_tCO2_per_typical_yr"]) + df["LCOE_dollar_per_MWh"] = ( + df["GenCapitalCosts"] + df["GenFixedOMCosts"] + df["VariableCost_per_yr"] + ) / (df["Energy_out_avg_MW"] * hrs_per_yr) + no_energy = df["Energy_out_avg_MW"] == 0 + df.loc[no_energy, "LCOE_dollar_per_MWh"] = 0 + + return df + gen_sum = add_cap_factor_and_lcoe(gen_sum) + gen_sum.to_csv( + os.path.join(outdir, "gen_project_annual_summary.csv"), columns=summary_columns + ) - zonal_annual_summary = dispatch_full_df.groupby( - ['gen_tech', "gen_load_zone", "gen_energy_source", "period"] + zone_sum = gen_sum.groupby( + ["gen_tech", "gen_load_zone", "gen_energy_source", "period"] ).sum() - zonal_annual_summary.to_csv( + zone_sum = add_cap_factor_and_lcoe(zone_sum) + zone_sum.to_csv( os.path.join(outdir, "dispatch_zonal_annual_summary.csv"), - columns=["Energy_GWh_typical_yr", "VariableCost_per_yr", - "DispatchEmissions_tCO2_per_typical_yr"] + columns=summary_columns, ) - if can_plot: - annual_summary_plot = ggplot( - annual_summary.reset_index(), - aes(x='period', weight="Energy_GWh_typical_yr", fill="factor(gen_tech)") - ) + \ - geom_bar(position="stack") + \ - scale_y_continuous(name='Energy (GWh/yr)') + theme_bw() - annual_summary_plot.save(filename=os.path.join(outdir, "dispatch_annual_summary.pdf")) + annual_summary = zone_sum.groupby(["gen_tech", "gen_energy_source", "period"]).sum() + annual_summary = add_cap_factor_and_lcoe(annual_summary) + annual_summary.to_csv( + os.path.join(outdir, "dispatch_annual_summary.csv"), columns=summary_columns + ) + + import warnings + + with warnings.catch_warnings(): + # suppress warnings during import and use of plotnine + warnings.simplefilter("ignore") + try: + import plotnine as p9 + except ImportError: + pass + else: + # plotnine was imported successfully + plots = [ + ("gen_energy_source", "dispatch_annual_summary_fuel.pdf"), + ("gen_tech", "dispatch_annual_summary_tech.pdf"), + ] + for y, outfile in plots: + annual_summary_plot = ( + p9.ggplot( + annual_summary.reset_index(), + p9.aes( + x="period", + weight="Energy_GWh_typical_yr", + fill="factor({})".format(y), + ), + ) + + p9.geom_bar(position="stack") + + p9.scale_y_continuous(name="Energy (GWh/yr)") + + p9.theme_bw() + ) + annual_summary_plot.save(filename=os.path.join(outdir, outfile)) diff --git a/switch_model/generators/core/gen_discrete_build.py b/switch_model/generators/core/gen_discrete_build.py index d2b502635..fcdd1243b 100644 --- a/switch_model/generators/core/gen_discrete_build.py +++ b/switch_model/generators/core/gen_discrete_build.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -8,9 +8,14 @@ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties', \ - 'switch_model.generators.core.build' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", +) + def define_components(mod): """ @@ -33,13 +38,14 @@ def define_components(mod): """ mod.DISCRETE_GEN_BLD_YRS = Set( + dimen=2, initialize=mod.GEN_BLD_YRS, - filter=lambda m, g, bld_yr: g in m.DISCRETELY_SIZED_GENS) - mod.BuildUnits = Var( - mod.DISCRETE_GEN_BLD_YRS, - within=NonNegativeIntegers) + filter=lambda m, g, bld_yr: g in m.DISCRETELY_SIZED_GENS, + ) + mod.BuildUnits = Var(mod.DISCRETE_GEN_BLD_YRS, within=NonNegativeIntegers) mod.Build_Units_Consistency = Constraint( mod.DISCRETE_GEN_BLD_YRS, rule=lambda m, g, bld_yr: ( - m.BuildGen[g, bld_yr] == - m.BuildUnits[g, bld_yr] * m.gen_unit_size[g])) + m.BuildGen[g, bld_yr] == m.BuildUnits[g, bld_yr] * m.gen_unit_size[g] + ), + ) diff --git a/switch_model/generators/core/no_commit.py b/switch_model/generators/core/no_commit.py index 5798c56c6..9af7a7ad8 100644 --- a/switch_model/generators/core/no_commit.py +++ b/switch_model/generators/core/no_commit.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -9,9 +9,15 @@ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -80,37 +86,46 @@ def define_components(mod): # and dispatch. mod.BASELOAD_GEN_PERIODS = Set( dimen=2, - rule=lambda m: - [(g, p) for g in m.BASELOAD_GENS for p in m.PERIODS_FOR_GEN[g]]) + initialize=lambda m: [ + (g, p) for g in m.BASELOAD_GENS for p in m.PERIODS_FOR_GEN[g] + ], + ) mod.BASELOAD_GEN_TPS = Set( dimen=2, - rule=lambda m: - [(g, t) for g, p in m.BASELOAD_GEN_PERIODS for t in m.TPS_IN_PERIOD[p]]) + initialize=lambda m: [ + (g, t) for g, p in m.BASELOAD_GEN_PERIODS for t in m.TPS_IN_PERIOD[p] + ], + ) mod.DispatchBaseloadByPeriod = Var(mod.BASELOAD_GEN_PERIODS) def DispatchUpperLimit_expr(m, g, t): if g in m.VARIABLE_GENS: - return (m.GenCapacityInTP[g, t] * m.gen_availability[g] * - m.gen_max_capacity_factor[g, t]) + return ( + m.GenCapacityInTP[g, t] + * m.gen_availability[g] + * m.gen_max_capacity_factor[g, t] + ) else: return m.GenCapacityInTP[g, t] * m.gen_availability[g] - mod.DispatchUpperLimit = Expression( - mod.GEN_TPS, - rule=DispatchUpperLimit_expr) + + mod.DispatchUpperLimit = Expression(mod.GEN_TPS, rule=DispatchUpperLimit_expr) mod.Enforce_Dispatch_Baseload_Flat = Constraint( mod.BASELOAD_GEN_TPS, - rule=lambda m, g, t: - m.DispatchGen[g, t] == m.DispatchBaseloadByPeriod[g, m.tp_period[t]]) + rule=lambda m, g, t: m.DispatchGen[g, t] + == m.DispatchBaseloadByPeriod[g, m.tp_period[t]], + ) mod.Enforce_Dispatch_Upper_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchGen[g, t] <= m.DispatchUpperLimit[g, t])) + rule=lambda m, g, t: (m.DispatchGen[g, t] <= m.DispatchUpperLimit[g, t]), + ) mod.GenFuelUseRate_Calculate = Constraint( mod.FUEL_BASED_GEN_TPS, rule=lambda m, g, t: ( sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) - == m.DispatchGen[g, t] * m.gen_full_load_heat_rate[g])) + == m.DispatchGen[g, t] * m.gen_full_load_heat_rate[g] + ), + ) diff --git a/switch_model/generators/extensions/hydro_simple.py b/switch_model/generators/extensions/hydro_simple.py index 23c2257e2..078257ca4 100644 --- a/switch_model/generators/extensions/hydro_simple.py +++ b/switch_model/generators/extensions/hydro_simple.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2016-2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -9,42 +9,51 @@ reasonable, and easy to work with. This supports dispatchable resevoir- based hydro plants. This module is not needed to support run-of-river plants; those may be specified as variable renewable plants with exogenous -energy availability. - -A more full-featured hydro model is available from the Operations, Control -and Markets laboratory at Pontificia Universidad Católica de Chile. -Where possible, I have used the same set and variable names to hopefully -make it easier to merge that into the codebase later. The Chilean branch -has a hydro model that includes water networks that connect dams via waterways -and ground infiltration. It should be possible to describe a simple system -using the advanced framework, but the advanced framework would take longer to -read and understand. To really take advantage of it, you'll also need more -data than we usually have available. +energy availability. This module is generally sufficient for day-to-day +operations where reservoir volume levels are not a binding constraint, but +does not track seasonal planning for fluctuations in reservoir volume. + +A more full-featured hydro model is available in the hydro_system module from +the Operations, Control and Markets laboratory at Pontificia Universidad +Católica de Chile. Where possible, I have used the same set and variable names +to aid interoperability. The Chilean module has a hydro model that includes +water networks that connect dams via waterways and ground infiltration. It +should be possible to describe a simple system using the advanced framework, +but the advanced framework would take longer to read and understand. To really +take advantage of it, you'll also need more data than we usually have +available. """ -from __future__ import division # ToDo: Refactor this code to move the core components into a -# switch_model.hydro.core module, the simplist components into +# switch_model.hydro.core module, the simplest components into # switch_model.hydro.simple, and the advanced components into # switch_model.hydro.water_network. That should set a good example # for other people who want to do other custom handling of hydro. -from pyomo.environ import * +from __future__ import division + import os -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +from pyomo.environ import * +from switch_model.utilities import unique_list + +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ - - HYDRO_GENS is the set of dispatchable hydro projects. This is a subet - of GENERATION_PROJECTS, and is determined by the inputs file hydro_timeseries.csv. - Members of this set can be called either g, or hydro_g. + HYDRO_GENS is the set of dispatchable hydro projects. This is a subset of + GENERATION_PROJECTS, and is determined by the inputs file + hydro_timeseries.csv. HYDRO_GEN_TS is the set of Hydro projects and timeseries for which - minimum and average flow are specified. Members of this set can be - abbreviated as (project, timeseries) or (g, ts). + minimum and average flow are specified. HYDRO_GEN_TPS is the set of Hydro projects and available dispatch points. This is a filtered version of GEN_TPS that @@ -59,64 +68,104 @@ def define_components(mod): Enforce_Hydro_Min_Flow[(g, t) in HYDRO_GEN_TPS] is a constraint that enforces minimum flow levels for each timepoint. - Enforce_Hydro_Avg_Flow[(g, ts) in HYDRO_GEN_TS] is a constraint - that enforces average flow levels across each timeseries. + SpillHydro[(g, t) in HYDRO_GEN_TPS] is an variable describing the amount + of potential hydro power sent over the spillway (as opposed to being used + to generate electricity). In high-flow or flood conditions, this can + mitigate infeasibilities where the incoming flow is greater than the + generator's capacity. It is specified in units of MW. + Enforce_Hydro_Avg_Flow[(g, ts) in HYDRO_NONPUMPED_GEN_TS] is a constraint + that enforces average flow levels across each timeseries. It requires the + average of dispatched and spilled hydro over the course of a timeseries + must equal to the corresponding hydro_avg_flow_mw parameter. """ mod.HYDRO_GEN_TS_RAW = Set( dimen=2, - validate=lambda m, g, ts: (g in m.GENERATION_PROJECTS) & (ts in m.TIMESERIES)) - + validate=lambda m, g, ts: ((g in m.GENERATION_PROJECTS) & (ts in m.TIMESERIES)), + ) mod.HYDRO_GENS = Set( - initialize=lambda m: set(g for (g, ts) in m.HYDRO_GEN_TS_RAW), - doc="Dispatchable hydro projects") + dimen=1, + initialize=lambda m: unique_list(g for (g, ts) in m.HYDRO_GEN_TS_RAW), + doc="Dispatchable hydro projects", + ) mod.HYDRO_GEN_TS = Set( dimen=2, - initialize=lambda m: set( - (g, m.tp_ts[tp]) - for g in m.HYDRO_GENS - for tp in m.TPS_FOR_GEN[g])) + initialize=lambda m: unique_list( + (g, m.tp_ts[tp]) for g in m.HYDRO_GENS for tp in m.TPS_FOR_GEN[g] + ), + ) mod.HYDRO_GEN_TPS = Set( - initialize=mod.GEN_TPS, - filter=lambda m, g, t: g in m.HYDRO_GENS) + dimen=2, initialize=mod.GEN_TPS, filter=lambda m, g, t: g in m.HYDRO_GENS + ) # Validate that a timeseries data is specified for every hydro generator / # timeseries that we need. Extra data points (ex: outside of planning # horizon or beyond a plant's lifetime) can safely be ignored to make it # easier to create input files. mod.have_minimal_hydro_params = BuildCheck( - mod.HYDRO_GEN_TS, - rule=lambda m, g, ts: (g,ts) in m.HYDRO_GEN_TS_RAW) - - # To do: Add validation check that timeseries data are specified for every - # valid timepoint. + mod.HYDRO_GEN_TS, rule=lambda m, g, ts: (g, ts) in m.HYDRO_GEN_TS_RAW + ) + # Generate a warning if the input files specify timeseries for renewable + # plant capacity factors that extend beyond the expected lifetime of the + # plant. This could be caused by simple logic to build input files, or + # could indicate that the user expects those plants to operate longer + # than indicated. + def _warn_on_extra_HYDRO_GEN_TS(m): + extra_indexes = m.HYDRO_GEN_TS_RAW - m.HYDRO_GEN_TS + extraneous = {g: [] for (g, t) in extra_indexes} + for (g, t) in extra_indexes: + extraneous[g].append(t) + pprint = "\n".join( + "* {}: {} to {}".format(g, min(tps), max(tps)) + for g, tps in extraneous.items() + ) + warning_msg = ( + "{} hydro plants with predetermined builds have timeseries data " + "in periods when they are not operating (either after retirement, " + "or before construction is complete). This " + "could indicate a benign issue where the process that built " + "the dataset used simplified logic and/or didn't know the " + "scheduled operating dates. If you expect those datapoints to " + "be useful, then those plants need to either come online earlier, " + "have longer lifetimes, or have options to build new capacity " + "when the old capacity reaches the provided end-of-life date." + "\n".format(len(extraneous)) + ) + if extra_indexes: + m.logger.warning(warning_msg) + m.logger.info("Plants with extra timepoints:\n{}".format(pprint)) + return True + + mod.warn_on_extra_HYDRO_GEN_TS = BuildCheck(rule=_warn_on_extra_HYDRO_GEN_TS) mod.hydro_min_flow_mw = Param( - mod.HYDRO_GEN_TS_RAW, - within=NonNegativeReals, - default=0.0) + mod.HYDRO_GEN_TS_RAW, within=NonNegativeReals, default=0.0 + ) mod.Enforce_Hydro_Min_Flow = Constraint( mod.HYDRO_GEN_TPS, rule=lambda m, g, t: ( - m.DispatchGen[g, t] >= m.hydro_min_flow_mw[g, m.tp_ts[t]])) + m.DispatchGen[g, t] >= m.hydro_min_flow_mw[g, m.tp_ts[t]] + ), + ) mod.hydro_avg_flow_mw = Param( - mod.HYDRO_GEN_TS_RAW, - within=NonNegativeReals, - default=0.0) + mod.HYDRO_GEN_TS_RAW, within=NonNegativeReals, default=0.0 + ) + mod.SpillHydro = Var(mod.HYDRO_GEN_TPS, within=NonNegativeReals) mod.Enforce_Hydro_Avg_Flow = Constraint( mod.HYDRO_GEN_TS, rule=lambda m, g, ts: ( - sum(m.DispatchGen[g, t] for t in m.TPS_IN_TS[ts]) / m.ts_num_tps[ts] - == m.hydro_avg_flow_mw[g, ts])) + sum(m.DispatchGen[g, t] + m.SpillHydro[g, t] for t in m.TPS_IN_TS[ts]) + == m.hydro_avg_flow_mw[g, ts] * m.ts_num_tps[ts] + ), + ) - mod.min_data_check('hydro_min_flow_mw', 'hydro_avg_flow_mw') + mod.min_data_check("hydro_min_flow_mw", "hydro_avg_flow_mw") def load_inputs(mod, switch_data, inputs_dir): """ - Import hydro data. The single file hydro_timeseries.csv needs to contain entries for each dispatchable hydro project. The set of hydro projects is derived from this file, and this file should cover all time periods @@ -133,8 +182,7 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'hydro_timeseries.csv'), - autoselect=True, + filename=os.path.join(inputs_dir, "hydro_timeseries.csv"), index=mod.HYDRO_GEN_TS_RAW, - param=(mod.hydro_min_flow_mw, mod.hydro_avg_flow_mw) + param=(mod.hydro_min_flow_mw, mod.hydro_avg_flow_mw), ) diff --git a/switch_model/generators/extensions/hydro_system.py b/switch_model/generators/extensions/hydro_system.py index 33f02f1fc..22be10055 100644 --- a/switch_model/generators/extensions/hydro_system.py +++ b/switch_model/generators/extensions/hydro_system.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -32,9 +32,15 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -254,127 +260,118 @@ def define_components(mod): """ ################# # Nodes of the water network - mod.WATER_NODES = Set() - mod.WNODE_TPS = Set( - dimen=2, - initialize=lambda m: m.WATER_NODES * m.TIMEPOINTS) + mod.WATER_NODES = Set(dimen=1) + mod.WNODE_TPS = Set(dimen=2, initialize=lambda m: m.WATER_NODES * m.TIMEPOINTS) mod.wnode_constant_inflow = Param( - mod.WATER_NODES, - within=NonNegativeReals, - default=0.0) + mod.WATER_NODES, within=NonNegativeReals, default=0.0 + ) mod.wnode_constant_consumption = Param( - mod.WATER_NODES, - within=NonNegativeReals, - default=0.0) + mod.WATER_NODES, within=NonNegativeReals, default=0.0 + ) mod.wnode_tp_inflow = Param( mod.WNODE_TPS, within=NonNegativeReals, - default=lambda m, wn, t: m.wnode_constant_inflow[wn]) + default=lambda m, wn, t: m.wnode_constant_inflow[wn], + ) mod.wnode_tp_consumption = Param( mod.WNODE_TPS, within=NonNegativeReals, - default=lambda m, wn, t: m.wnode_constant_consumption[wn]) - mod.wn_is_sink = Param( - mod.WATER_NODES, - within=Boolean) - mod.min_data_check('wn_is_sink') - mod.spillage_penalty = Param( - within=NonNegativeReals, - default=100) - mod.SpillWaterAtNode = Var( - mod.WNODE_TPS, - within=NonNegativeReals) + default=lambda m, wn, t: m.wnode_constant_consumption[wn], + ) + mod.wn_is_sink = Param(mod.WATER_NODES, within=Boolean) + mod.min_data_check("wn_is_sink") + mod.spillage_penalty = Param(within=NonNegativeReals, default=100) + mod.SpillWaterAtNode = Var(mod.WNODE_TPS, within=NonNegativeReals) ################# # Reservoir nodes - mod.RESERVOIRS = Set( - within=mod.WATER_NODES) - mod.RESERVOIR_TPS = Set( - dimen=2, - initialize=lambda m: m.RESERVOIRS * m.TIMEPOINTS) - mod.res_min_vol = Param( - mod.RESERVOIRS, - within=NonNegativeReals) + mod.RESERVOIRS = Set(within=mod.WATER_NODES, dimen=1) + mod.RESERVOIR_TPS = Set(dimen=2, initialize=lambda m: m.RESERVOIRS * m.TIMEPOINTS) + mod.res_min_vol = Param(mod.RESERVOIRS, within=NonNegativeReals) mod.res_max_vol = Param( mod.RESERVOIRS, within=NonNegativeReals, - validate=lambda m, val, r: val >= m.res_min_vol[r]) + validate=lambda m, val, r: val >= m.res_min_vol[r], + ) mod.res_min_vol_tp = Param( mod.RESERVOIR_TPS, within=NonNegativeReals, - default=lambda m, r, t: m.res_min_vol[r]) + default=lambda m, r, t: m.res_min_vol[r], + ) mod.res_max_vol_tp = Param( mod.RESERVOIR_TPS, within=NonNegativeReals, - default=lambda m, r, t: m.res_max_vol[r]) + default=lambda m, r, t: m.res_max_vol[r], + ) mod.initial_res_vol = Param( mod.RESERVOIRS, within=NonNegativeReals, - validate=lambda m, val, r: ( - m.res_min_vol[r] <= val <= m.res_max_vol[r])) + validate=lambda m, val, r: (m.res_min_vol[r] <= val <= m.res_max_vol[r]), + ) mod.final_res_vol = Param( mod.RESERVOIRS, within=NonNegativeReals, - validate=lambda m, val, r: ( - m.res_min_vol[r] <= val <= m.res_max_vol[r])) - mod.min_data_check('res_min_vol', 'res_max_vol', 'initial_res_vol', - 'final_res_vol') + validate=lambda m, val, r: (m.res_min_vol[r] <= val <= m.res_max_vol[r]), + ) + mod.min_data_check("res_min_vol", "res_max_vol", "initial_res_vol", "final_res_vol") + def ReservoirVol_bounds(m, r, t): # In the first timepoint of each period, this is externally defined if t == m.TPS_IN_PERIOD[m.tp_period[t]].first(): - return(m.initial_res_vol[r], m.initial_res_vol[r]) + return (m.initial_res_vol[r], m.initial_res_vol[r]) # In all other timepoints, this is constrained by min & max params else: - return(m.res_min_vol[r], m.res_max_vol[r]) + return (m.res_min_vol[r], m.res_max_vol[r]) + mod.ReservoirVol = Var( - mod.RESERVOIR_TPS, - within=NonNegativeReals, - bounds=ReservoirVol_bounds) + mod.RESERVOIR_TPS, within=NonNegativeReals, bounds=ReservoirVol_bounds + ) mod.ReservoirFinalVol = Var( - mod.RESERVOIRS, mod.PERIODS, + mod.RESERVOIRS, + mod.PERIODS, within=NonNegativeReals, - bounds=lambda m, r, p: (m.final_res_vol[r], m.res_max_vol[r])) + bounds=lambda m, r, p: (m.final_res_vol[r], m.res_max_vol[r]), + ) ################ # Edges of the water network - mod.WATER_CONNECTIONS = Set() - mod.WCON_TPS = Set( - dimen=2, - initialize=lambda m: m.WATER_CONNECTIONS * m.TIMEPOINTS) - mod.water_node_from = Param( - mod.WATER_CONNECTIONS, - within=mod.WATER_NODES) - mod.water_node_to = Param( - mod.WATER_CONNECTIONS, - within=mod.WATER_NODES) + mod.WATER_CONNECTIONS = Set(dimen=1) + mod.WCON_TPS = Set(dimen=2, initialize=lambda m: m.WATER_CONNECTIONS * m.TIMEPOINTS) + mod.water_node_from = Param(mod.WATER_CONNECTIONS, within=mod.WATER_NODES) + mod.water_node_to = Param(mod.WATER_CONNECTIONS, within=mod.WATER_NODES) mod.wc_capacity = Param( - mod.WATER_CONNECTIONS, - within=NonNegativeReals, - default=float('inf')) - mod.min_eco_flow = Param( - mod.WCON_TPS, - within=NonNegativeReals, - default=0.0) - mod.min_data_check('water_node_from', 'water_node_to') + mod.WATER_CONNECTIONS, within=NonNegativeReals, default=float("inf") + ) + mod.min_eco_flow = Param(mod.WCON_TPS, within=NonNegativeReals, default=0.0) + mod.min_data_check("water_node_from", "water_node_to") mod.INWARD_WCONS_TO_WNODE = Set( mod.WATER_NODES, - initialize=lambda m, wn: set(wc for wc in m.WATER_CONNECTIONS - if m.water_node_to[wc] == wn)) + dimen=1, + initialize=lambda m, wn: [ + wc for wc in m.WATER_CONNECTIONS if m.water_node_to[wc] == wn + ], + ) mod.OUTWARD_WCONS_FROM_WNODE = Set( mod.WATER_NODES, - initialize=lambda m, wn: set(wc for wc in m.WATER_CONNECTIONS - if m.water_node_from[wc] == wn)) + dimen=1, + initialize=lambda m, wn: [ + wc for wc in m.WATER_CONNECTIONS if m.water_node_from[wc] == wn + ], + ) mod.DispatchWater = Var( mod.WCON_TPS, within=NonNegativeReals, - bounds=lambda m, wc, t: (m.min_eco_flow[wc, t], m.wc_capacity[wc])) + bounds=lambda m, wc, t: (m.min_eco_flow[wc, t], m.wc_capacity[wc]), + ) def Enforce_Wnode_Balance_rule(m, wn, t): # Sum inflows and outflows from and to other nodes - dispatch_inflow = sum(m.DispatchWater[wc, t] - for wc in m.INWARD_WCONS_TO_WNODE[wn]) - dispatch_outflow = sum(m.DispatchWater[wc, t] - for wc in m.OUTWARD_WCONS_FROM_WNODE[wn]) + dispatch_inflow = sum( + m.DispatchWater[wc, t] for wc in m.INWARD_WCONS_TO_WNODE[wn] + ) + dispatch_outflow = sum( + m.DispatchWater[wc, t] for wc in m.OUTWARD_WCONS_FROM_WNODE[wn] + ) # net change in reservoir volume (m3/s): 0 for non-reservoirs reservoir_fill_rate = 0.0 if wn in m.RESERVOIRS: @@ -384,62 +381,60 @@ def Enforce_Wnode_Balance_rule(m, wn, t): else: end_of_tp_volume = m.ReservoirVol[wn, m.TPS_IN_PERIOD[p].next(t)] reservoir_fill_rate = ( - (end_of_tp_volume - m.ReservoirVol[wn, t]) * 1000000.0 / - (m.tp_duration_hrs[t] * 3600)) + (end_of_tp_volume - m.ReservoirVol[wn, t]) + * 1000000.0 + / (m.tp_duration_hrs[t] * 3600) + ) # Conservation of mass flow return ( # inflows (m3/s) m.wnode_tp_inflow[wn, t] + dispatch_inflow # less outflows (m3/s) - - m.wnode_tp_consumption[wn, t] - dispatch_outflow + - m.wnode_tp_consumption[wn, t] + - dispatch_outflow - m.SpillWaterAtNode[wn, t] # net change in volume (m3/s) == reservoir_fill_rate ) + mod.Enforce_Wnode_Balance = Constraint( - mod.WNODE_TPS, - rule=Enforce_Wnode_Balance_rule) + mod.WNODE_TPS, rule=Enforce_Wnode_Balance_rule + ) mod.NodeSpillageCosts = Expression( mod.TIMEPOINTS, rule=lambda m, t: sum( # prior to Switch 2.0.3, this did not account for tp_duration_hrs - m.SpillWaterAtNode[wn,t] * 3600 * m.tp_duration_hrs[t] * - m.spillage_penalty + m.SpillWaterAtNode[wn, t] * 3600 * m.tp_duration_hrs[t] * m.spillage_penalty for wn in m.WATER_NODES if not m.wn_is_sink[wn] - ) + ), ) - mod.Cost_Components_Per_TP.append('NodeSpillageCosts') + mod.Cost_Components_Per_TP.append("NodeSpillageCosts") ################ # Hydro projects - mod.HYDRO_GENS = Set( - validate=lambda m, val: val in m.GENERATION_PROJECTS) + mod.HYDRO_GENS = Set(dimen=1, within=mod.GENERATION_PROJECTS) mod.HYDRO_GEN_TPS = Set( - initialize=mod.GEN_TPS, - filter=lambda m, g, t: g in m.HYDRO_GENS) - mod.hydro_efficiency = Param( - mod.HYDRO_GENS, - within=NonNegativeReals) - mod.hydraulic_location = Param( - mod.HYDRO_GENS, - validate=lambda m, val, g: val in m.WATER_CONNECTIONS) - mod.TurbinateFlow = Var( - mod.HYDRO_GEN_TPS, - within=NonNegativeReals) - mod.SpillFlow = Var( - mod.HYDRO_GEN_TPS, - within=NonNegativeReals) + dimen=2, initialize=mod.GEN_TPS, filter=lambda m, g, t: g in m.HYDRO_GENS + ) + mod.hydro_efficiency = Param(mod.HYDRO_GENS, within=NonNegativeReals) + mod.hydraulic_location = Param(mod.HYDRO_GENS, within=mod.WATER_CONNECTIONS) + mod.TurbinateFlow = Var(mod.HYDRO_GEN_TPS, within=NonNegativeReals) + mod.SpillFlow = Var(mod.HYDRO_GEN_TPS, within=NonNegativeReals) mod.Enforce_Hydro_Generation = Constraint( mod.HYDRO_GEN_TPS, - rule=lambda m, g, t: (m.DispatchGen[g, t] == - m.hydro_efficiency[g] * m.TurbinateFlow[g, t])) + rule=lambda m, g, t: ( + m.DispatchGen[g, t] == m.hydro_efficiency[g] * m.TurbinateFlow[g, t] + ), + ) mod.Enforce_Hydro_Extraction = Constraint( mod.HYDRO_GEN_TPS, - rule=lambda m, g, t: (m.TurbinateFlow[g, t] + - m.SpillFlow[g, t] == - m.DispatchWater[m.hydraulic_location[g], t])) + rule=lambda m, g, t: ( + m.TurbinateFlow[g, t] + m.SpillFlow[g, t] + == m.DispatchWater[m.hydraulic_location[g], t] + ), + ) def load_inputs(mod, switch_data, inputs_dir): @@ -463,53 +458,59 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'water_nodes.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "water_nodes.csv"), index=mod.WATER_NODES, - optional_params=['mod.wnode_constant_inflow', - 'mod.wnode_constant_consumption'], - param=(mod.wn_is_sink, mod.wnode_constant_inflow, - mod.wnode_constant_consumption)) + optional_params=["mod.wnode_constant_inflow", "mod.wnode_constant_consumption"], + param=( + mod.wn_is_sink, + mod.wnode_constant_inflow, + mod.wnode_constant_consumption, + ), + ) switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'water_node_tp_flows.csv'), - auto_select=True, - optional_params=['mod.wnode_tp_inflow', 'mod.wnode_tp_consumption'], - param=(mod.wnode_tp_inflow, mod.wnode_tp_consumption)) + filename=os.path.join(inputs_dir, "water_node_tp_flows.csv"), + optional_params=["mod.wnode_tp_inflow", "mod.wnode_tp_consumption"], + param=(mod.wnode_tp_inflow, mod.wnode_tp_consumption), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'reservoirs.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "reservoirs.csv"), index=mod.RESERVOIRS, - param=(mod.res_min_vol, mod.res_max_vol, - mod.initial_res_vol, mod.final_res_vol)) - if os.path.exists(os.path.join(inputs_dir, 'reservoir_tp_data.csv')): + param=( + mod.res_min_vol, + mod.res_max_vol, + mod.initial_res_vol, + mod.final_res_vol, + ), + ) + if os.path.exists(os.path.join(inputs_dir, "reservoir_tp_data.csv")): raise NotImplementedError( "Code needs to be added to hydro_system module to enforce " "reservoir volume limits per timepoint." ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'reservoir_tp_data.csv'), + filename=os.path.join(inputs_dir, "reservoir_tp_data.csv"), optional=True, - auto_select=True, - optional_params=['mod.res_max_vol_tp', 'mod.res_min_vol_tp'], - param=(mod.res_max_vol_tp, mod.res_min_vol_tp)) + optional_params=["mod.res_max_vol_tp", "mod.res_min_vol_tp"], + param=(mod.res_max_vol_tp, mod.res_min_vol_tp), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'water_connections.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "water_connections.csv"), index=mod.WATER_CONNECTIONS, - param=(mod.water_node_from, mod.water_node_to, mod.wc_capacity)) + param=(mod.water_node_from, mod.water_node_to, mod.wc_capacity), + ) switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'min_eco_flows.csv'), - auto_select=True, - param=(mod.min_eco_flow)) + filename=os.path.join(inputs_dir, "min_eco_flows.csv"), + param=(mod.min_eco_flow), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'hydro_generation_projects.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "hydro_generation_projects.csv"), index=mod.HYDRO_GENS, - param=(mod.hydro_efficiency, mod.hydraulic_location)) + param=(mod.hydro_efficiency, mod.hydraulic_location), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'spillage_penalty.csv'), - optional=True, auto_select=True, - param=(mod.spillage_penalty,) + filename=os.path.join(inputs_dir, "spillage_penalty.csv"), + optional=True, + param=(mod.spillage_penalty,), ) diff --git a/switch_model/generators/extensions/storage.py b/switch_model/generators/extensions/storage.py index 6bc3d47aa..56607e03d 100644 --- a/switch_model/generators/extensions/storage.py +++ b/switch_model/generators/extensions/storage.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,9 +11,15 @@ import os, collections from switch_model.financials import capital_recovery_factor as crf -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -60,6 +66,12 @@ def define_components(mod): Note that this describes the energy component and the overnight_cost describes the power component. + build_gen_energy_predetermined[(g, bld_yr) in + PREDETERMINED_GEN_BLD_YRS] is the amount of storage that has either been + installed previously, or is slated for installation and is not a free + decision variable. This is analogous to build_gen_predetermined, but in + units of energy of storage capacity (MWh) rather than power (MW). + BuildStorageEnergy[(g, bld_yr) in STORAGE_GEN_BLD_YRS] is a decision of how much energy capacity to build onto a storage project. This is analogous to BuildGen, but for energy rather than power. @@ -97,60 +109,73 @@ def define_components(mod): """ - mod.STORAGE_GENS = Set(within=mod.GENERATION_PROJECTS) + mod.STORAGE_GENS = Set(within=mod.GENERATION_PROJECTS, dimen=1) mod.STORAGE_GEN_PERIODS = Set( + dimen=2, within=mod.GEN_PERIODS, - initialize=lambda m: [(g, p) for g in m.STORAGE_GENS for p in m.PERIODS_FOR_GEN[g]] + initialize=lambda m: [ + (g, p) for g in m.STORAGE_GENS for p in m.PERIODS_FOR_GEN[g] + ], ) - mod.gen_storage_efficiency = Param( - mod.STORAGE_GENS, - within=PercentFraction) + mod.gen_storage_efficiency = Param(mod.STORAGE_GENS, within=PercentFraction) # TODO: rename to gen_charge_to_discharge_ratio? mod.gen_store_to_release_ratio = Param( - mod.STORAGE_GENS, - within=NonNegativeReals, - default=1.0) + mod.STORAGE_GENS, within=NonNegativeReals, default=1.0 + ) mod.gen_storage_energy_to_power_ratio = Param( - mod.STORAGE_GENS, - within=NonNegativeReals, - default=float("inf")) # inf is a flag that no value is specified (nan and None don't work) + mod.STORAGE_GENS, within=NonNegativeReals, default=float("inf") + ) # inf is a flag that no value is specified (nan and None don't work) mod.gen_storage_max_cycles_per_year = Param( - mod.STORAGE_GENS, - within=NonNegativeReals, - default=float('inf')) + mod.STORAGE_GENS, within=NonNegativeReals, default=float("inf") + ) # TODO: build this set up instead of filtering down, to improve performance mod.STORAGE_GEN_BLD_YRS = Set( dimen=2, initialize=mod.GEN_BLD_YRS, - filter=lambda m, g, bld_yr: g in m.STORAGE_GENS) + filter=lambda m, g, bld_yr: g in m.STORAGE_GENS, + ) mod.gen_storage_energy_overnight_cost = Param( - mod.STORAGE_GEN_BLD_YRS, - within=NonNegativeReals) - mod.min_data_check('gen_storage_energy_overnight_cost') + mod.STORAGE_GEN_BLD_YRS, within=NonNegativeReals + ) + mod.min_data_check("gen_storage_energy_overnight_cost") + mod.build_gen_energy_predetermined = Param( + mod.PREDETERMINED_GEN_BLD_YRS, within=NonNegativeReals + ) + + def bounds_BuildStorageEnergy(m, g, bld_yr): + if (g, bld_yr) in m.build_gen_energy_predetermined: + return ( + m.build_gen_energy_predetermined[g, bld_yr], + m.build_gen_energy_predetermined[g, bld_yr], + ) + else: + return (0, None) + mod.BuildStorageEnergy = Var( mod.STORAGE_GEN_BLD_YRS, - within=NonNegativeReals) + within=NonNegativeReals, + bounds=bounds_BuildStorageEnergy, + ) # Summarize capital costs of energy storage for the objective function # Note: A bug in to 2.0.0b3 - 2.0.5, assigned costs that were several times # too high mod.StorageEnergyCapitalCost = Expression( - mod.STORAGE_GENS, mod.PERIODS, + mod.STORAGE_GENS, + mod.PERIODS, rule=lambda m, g, p: sum( m.BuildStorageEnergy[g, bld_yr] * m.gen_storage_energy_overnight_cost[g, bld_yr] * crf(m.interest_rate, m.gen_max_age[g]) for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p] - ) + ), ) mod.StorageEnergyFixedCost = Expression( mod.PERIODS, - rule=lambda m, p: sum( - m.StorageEnergyCapitalCost[g, p] for g in m.STORAGE_GENS - ) + rule=lambda m, p: sum(m.StorageEnergyCapitalCost[g, p] for g in m.STORAGE_GENS), ) - mod.Cost_Components_Per_Period.append('StorageEnergyFixedCost') + mod.Cost_Components_Per_Period.append("StorageEnergyFixedCost") # 2.0.0b3 code: # mod.StorageEnergyInstallCosts = Expression( @@ -161,30 +186,29 @@ def define_components(mod): # for (g, bld_yr) in m.STORAGE_GEN_BLD_YRS)) mod.StorageEnergyCapacity = Expression( - mod.STORAGE_GENS, mod.PERIODS, + mod.STORAGE_GENS, + mod.PERIODS, rule=lambda m, g, period: sum( m.BuildStorageEnergy[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, period] - ) + ), ) mod.STORAGE_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.STORAGE_GENS - for tp in m.TPS_FOR_GEN[g])) + (g, tp) for g in m.STORAGE_GENS for tp in m.TPS_FOR_GEN[g] + ), + ) - mod.ChargeStorage = Var( - mod.STORAGE_GEN_TPS, - within=NonNegativeReals) + mod.ChargeStorage = Var(mod.STORAGE_GEN_TPS, within=NonNegativeReals) # Summarize storage charging for the energy balance equations # TODO: rename this StorageTotalCharging or similar (to indicate it's a # sum for a zone, not a net quantity for a project) def rule(m, z, t): # Construct and cache a set for summation as needed - if not hasattr(m, 'Storage_Charge_Summation_dict'): + if not hasattr(m, "Storage_Charge_Summation_dict"): m.Storage_Charge_Summation_dict = collections.defaultdict(set) for g, t2 in m.STORAGE_GEN_TPS: z2 = m.gen_load_zone[g] @@ -192,58 +216,73 @@ def rule(m, z, t): # Use pop to free memory relevant_projects = m.Storage_Charge_Summation_dict.pop((z, t), {}) return sum(m.ChargeStorage[g, t] for g in relevant_projects) + mod.StorageNetCharge = Expression(mod.LOAD_ZONES, mod.TIMEPOINTS, rule=rule) # Register net charging with zonal energy balance. Discharging is already # covered by DispatchGen. - mod.Zone_Power_Withdrawals.append('StorageNetCharge') + mod.Zone_Power_Withdrawals.append("StorageNetCharge") # use fixed energy/power ratio (# hours of capacity) when specified mod.Enforce_Fixed_Energy_Storage_Ratio = Constraint( mod.STORAGE_GEN_BLD_YRS, - rule=lambda m, g, y: - Constraint.Skip if m.gen_storage_energy_to_power_ratio[g] == float("inf") # no value specified - else - (m.BuildStorageEnergy[g, y] == m.gen_storage_energy_to_power_ratio[g] * m.BuildGen[g, y]) + rule=lambda m, g, y: Constraint.Skip + if m.gen_storage_energy_to_power_ratio[g] == float("inf") # no value specified + else ( + m.BuildStorageEnergy[g, y] + == m.gen_storage_energy_to_power_ratio[g] * m.BuildGen[g, y] + ), ) def Charge_Storage_Upper_Limit_rule(m, g, t): - return m.ChargeStorage[g,t] <= \ - m.DispatchUpperLimit[g, t] * m.gen_store_to_release_ratio[g] + return ( + m.ChargeStorage[g, t] + <= m.DispatchUpperLimit[g, t] * m.gen_store_to_release_ratio[g] + ) + mod.Charge_Storage_Upper_Limit = Constraint( - mod.STORAGE_GEN_TPS, - rule=Charge_Storage_Upper_Limit_rule) + mod.STORAGE_GEN_TPS, rule=Charge_Storage_Upper_Limit_rule + ) - mod.StateOfCharge = Var( - mod.STORAGE_GEN_TPS, - within=NonNegativeReals) + mod.StateOfCharge = Var(mod.STORAGE_GEN_TPS, within=NonNegativeReals) def Track_State_Of_Charge_rule(m, g, t): - return m.StateOfCharge[g, t] == \ - m.StateOfCharge[g, m.tp_previous[t]] + \ - (m.ChargeStorage[g, t] * m.gen_storage_efficiency[g] - - m.DispatchGen[g, t]) * m.tp_duration_hrs[t] + return ( + m.StateOfCharge[g, t] + == m.StateOfCharge[g, m.tp_previous[t]] + + ( + m.ChargeStorage[g, t] * m.gen_storage_efficiency[g] + - m.DispatchGen[g, t] + ) + * m.tp_duration_hrs[t] + ) + mod.Track_State_Of_Charge = Constraint( - mod.STORAGE_GEN_TPS, - rule=Track_State_Of_Charge_rule) + mod.STORAGE_GEN_TPS, rule=Track_State_Of_Charge_rule + ) def State_Of_Charge_Upper_Limit_rule(m, g, t): - return m.StateOfCharge[g, t] <= \ - m.StorageEnergyCapacity[g, m.tp_period[t]] + return m.StateOfCharge[g, t] <= m.StorageEnergyCapacity[g, m.tp_period[t]] + mod.State_Of_Charge_Upper_Limit = Constraint( - mod.STORAGE_GEN_TPS, - rule=State_Of_Charge_Upper_Limit_rule) + mod.STORAGE_GEN_TPS, rule=State_Of_Charge_Upper_Limit_rule + ) # batteries can only complete the specified number of cycles per year, averaged over each period mod.Battery_Cycle_Limit = Constraint( mod.STORAGE_GEN_PERIODS, rule=lambda m, g, p: - # solvers sometimes perform badly with infinite constraint - Constraint.Skip if m.gen_storage_max_cycles_per_year[g] == float('inf') - else ( - sum(m.DispatchGen[g, tp] * m.tp_duration_hrs[tp] for tp in m.TPS_IN_PERIOD[p]) - <= - m.gen_storage_max_cycles_per_year[g] * m.StorageEnergyCapacity[g, p] * m.period_length_years[p] + # solvers sometimes perform badly with infinite constraint + Constraint.Skip + if m.gen_storage_max_cycles_per_year[g] == float("inf") + else ( + sum( + m.DispatchGen[g, tp] * m.tp_duration_hrs[tp] + for tp in m.TPS_IN_PERIOD[p] ) + <= m.gen_storage_max_cycles_per_year[g] + * m.StorageEnergyCapacity[g, p] + * m.period_length_years[p] + ), ) @@ -252,7 +291,7 @@ def load_inputs(mod, switch_data, inputs_dir): Import storage parameters. Optional columns are noted with a *. - generation_projects_info.csv + gen_info.csv GENERATION_PROJECT, ... gen_storage_efficiency, gen_store_to_release_ratio*, gen_storage_energy_to_power_ratio*, gen_storage_max_cycles_per_year* @@ -261,6 +300,10 @@ def load_inputs(mod, switch_data, inputs_dir): GENERATION_PROJECT, build_year, ... gen_storage_energy_overnight_cost + gen_build_predetermined.csv + GENERATION_PROJECT, build_year, ..., + build_gen_energy_predetermined* + """ # TODO: maybe move these columns to a storage_gen_info file to avoid the weird index @@ -270,44 +313,62 @@ def load_inputs(mod, switch_data, inputs_dir): # gen_storage_efficiency has been specified, then require valid settings for all # STORAGE_GENS. switch_data.load_aug( - filename=os.path.join(inputs_dir, 'generation_projects_info.csv'), - auto_select=True, - optional_params=['gen_store_to_release_ratio', 'gen_storage_energy_to_power_ratio', 'gen_storage_max_cycles_per_year'], - param=(mod.gen_storage_efficiency, mod.gen_store_to_release_ratio, mod.gen_storage_energy_to_power_ratio, mod.gen_storage_max_cycles_per_year)) + filename=os.path.join(inputs_dir, "gen_info.csv"), + optional_params=[ + "gen_store_to_release_ratio", + "gen_storage_energy_to_power_ratio", + "gen_storage_max_cycles_per_year", + ], + param=( + mod.gen_storage_efficiency, + mod.gen_store_to_release_ratio, + mod.gen_storage_energy_to_power_ratio, + mod.gen_storage_max_cycles_per_year, + ), + ) # Base the set of storage projects on storage efficiency being specified. # TODO: define this in a more normal way - switch_data.data()['STORAGE_GENS'] = { - None: list(switch_data.data(name='gen_storage_efficiency').keys())} + switch_data.data()["STORAGE_GENS"] = { + None: list(switch_data.data(name="gen_storage_efficiency").keys()) + } + switch_data.load_aug( + filename=os.path.join(inputs_dir, "gen_build_costs.csv"), + param=(mod.gen_storage_energy_overnight_cost), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'gen_build_costs.csv'), - auto_select=True, - param=(mod.gen_storage_energy_overnight_cost)) + optional=True, + filename=os.path.join(inputs_dir, "gen_build_predetermined.csv"), + param=(mod.build_gen_energy_predetermined,), + ) def post_solve(instance, outdir): """ - Export storage build information to storage_builds.csv, and storage - dispatch info to storage_dispatch.csv + Export storage dispatch info to storage_dispatch.csv + + Note that construction information is reported by the generators.core.build + module, so is not reported here. """ import switch_model.reporting as reporting + reporting.write_table( - instance, instance.STORAGE_GEN_BLD_YRS, - output_file=os.path.join(outdir, "storage_builds.csv"), - headings=("generation_project", "period", "load_zone", - "IncrementalPowerCapacityMW", "IncrementalEnergyCapacityMWh", - "OnlinePowerCapacityMW", "OnlineEnergyCapacityMWh" ), - values=lambda m, g, bld_yr: ( - g, bld_yr, m.gen_load_zone[g], - m.BuildGen[g, bld_yr], m.BuildStorageEnergy[g, bld_yr], - m.GenCapacity[g, bld_yr], m.StorageEnergyCapacity[g, bld_yr] - )) - reporting.write_table( - instance, instance.STORAGE_GEN_TPS, + instance, + instance.STORAGE_GEN_TPS, output_file=os.path.join(outdir, "storage_dispatch.csv"), - headings=("generation_project", "timepoint", "load_zone", - "ChargeMW", "DischargeMW", "StateOfCharge"), + headings=( + "generation_project", + "timepoint", + "load_zone", + "ChargeMW", + "DischargeMW", + "StateOfCharge", + ), values=lambda m, g, t: ( - g, m.tp_timestamp[t], m.gen_load_zone[g], - m.ChargeStorage[g, t], m.DispatchGen[g, t], - m.StateOfCharge[g, t] - )) + g, + m.tp_timestamp[t], + m.gen_load_zone[g], + m.ChargeStorage[g, t], + m.DispatchGen[g, t], + m.StateOfCharge[g, t], + ), + ) diff --git a/switch_model/hawaii/batteries.py b/switch_model/hawaii/batteries.py index b7cec7e61..f6a48c758 100644 --- a/switch_model/hawaii/batteries.py +++ b/switch_model/hawaii/batteries.py @@ -2,6 +2,7 @@ import os from pyomo.environ import * + def define_components(m): # It's not clear how best to model battery cell replacement @@ -23,31 +24,38 @@ def define_components(m): # use the battery, so on average you can always have a refurbished battery on hand. # battery capital cost - m.battery_capital_cost_per_mwh_capacity = Param() + m.battery_capital_cost_per_mwh_capacity = Param(within=NonNegativeReals) # number of full cycles the battery can do; we assume shallower cycles do proportionally less damage - m.battery_n_cycles = Param() + m.battery_n_cycles = Param(within=NonNegativeReals) # maximum depth of discharge - m.battery_max_discharge = Param() + m.battery_max_discharge = Param(within=PercentFraction) # round-trip efficiency - m.battery_efficiency = Param() + m.battery_efficiency = Param(within=PercentFraction) # fastest time that storage can be emptied (down to max_discharge) - m.battery_min_discharge_time = Param() + m.battery_min_discharge_time = Param(within=NonNegativeReals) # we treat storage as infinitely long-lived (so we pay just interest on the loan), # but charge a usage fee corresponding to the reduction in life during each cycle # (i.e., enough to restore it to like-new status, on average) - m.battery_cost_per_mwh_cycled = Param(initialize = lambda m: - m.battery_capital_cost_per_mwh_capacity / (m.battery_n_cycles * m.battery_max_discharge) + m.battery_cost_per_mwh_cycled = Param( + within=NonNegativeReals, + initialize=lambda m: m.battery_capital_cost_per_mwh_capacity + / (m.battery_n_cycles * m.battery_max_discharge), ) - m.battery_fixed_cost_per_year = Param(initialize = lambda m: - m.battery_capital_cost_per_mwh_capacity * m.interest_rate + m.battery_fixed_cost_per_year = Param( + within=NonNegativeReals, + initialize=lambda m: m.battery_capital_cost_per_mwh_capacity * m.interest_rate, ) # amount of battery capacity to build and use (in MWh) # TODO: integrate this with other project data, so it can contribute to reserves, etc. m.BuildBattery = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.Battery_Capacity = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildBattery[z, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p]) + m.Battery_Capacity = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildBattery[z, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), ) # rate of charging/discharging battery @@ -58,50 +66,69 @@ def define_components(m): m.BatteryLevel = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # add storage to the zonal energy balance - m.Zone_Power_Injections.append('DischargeBattery') - m.Zone_Power_Withdrawals.append('ChargeBattery') + m.Zone_Power_Injections.append("DischargeBattery") + m.Zone_Power_Withdrawals.append("ChargeBattery") # add the batteries to the objective function - m.Battery_Variable_Cost = Expression(m.TIMEPOINTS, rule=lambda m, t: - sum(m.battery_cost_per_mwh_cycled * m.DischargeBattery[z, t] for z in m.LOAD_ZONES) + m.Battery_Variable_Cost = Expression( + m.TIMEPOINTS, + rule=lambda m, t: sum( + m.battery_cost_per_mwh_cycled * m.DischargeBattery[z, t] + for z in m.LOAD_ZONES + ), ) - m.Battery_Fixed_Cost_Annual = Expression(m.PERIODS, rule=lambda m, p: - sum(m.battery_fixed_cost_per_year * m.Battery_Capacity[z, p] for z in m.LOAD_ZONES) + m.Battery_Fixed_Cost_Annual = Expression( + m.PERIODS, + rule=lambda m, p: sum( + m.battery_fixed_cost_per_year * m.Battery_Capacity[z, p] + for z in m.LOAD_ZONES + ), ) - m.Cost_Components_Per_TP.append('Battery_Variable_Cost') - m.Cost_Components_Per_Period.append('Battery_Fixed_Cost_Annual') + m.Cost_Components_Per_TP.append("Battery_Variable_Cost") + m.Cost_Components_Per_Period.append("Battery_Fixed_Cost_Annual") # Calculate the state of charge based on conservation of energy # NOTE: this is circular for each day # NOTE: the overall level for the day is free, but the levels each timepoint are chained. - m.Battery_Level_Calc = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.BatteryLevel[z, t] == - m.BatteryLevel[z, m.tp_previous[t]] - + m.battery_efficiency * m.ChargeBattery[z, m.tp_previous[t]] - - m.DischargeBattery[z, m.tp_previous[t]] + m.Battery_Level_Calc = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.BatteryLevel[z, t] + == m.BatteryLevel[z, m.tp_previous[t]] + + m.battery_efficiency * m.ChargeBattery[z, m.tp_previous[t]] + - m.DischargeBattery[z, m.tp_previous[t]], ) # limits on storage level - m.Battery_Min_Level = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - (1.0 - m.battery_max_discharge) * m.Battery_Capacity[z, m.tp_period[t]] - <= - m.BatteryLevel[z, t] + m.Battery_Min_Level = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: (1.0 - m.battery_max_discharge) + * m.Battery_Capacity[z, m.tp_period[t]] + <= m.BatteryLevel[z, t], ) - m.Battery_Max_Level = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.BatteryLevel[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] + m.Battery_Max_Level = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.BatteryLevel[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]], ) - m.Battery_Max_Charge = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.ChargeBattery[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Max_Charge = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.ChargeBattery[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]] + * m.battery_max_discharge + / m.battery_min_discharge_time, ) - m.Battery_Max_Disharge = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DischargeBattery[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Max_Disharge = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DischargeBattery[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]] + * m.battery_max_discharge + / m.battery_min_discharge_time, ) @@ -110,4 +137,4 @@ def load_inputs(mod, switch_data, inputs_dir): Import battery data from a .dat file. TODO: change this to allow multiple storage technologies. """ - switch_data.load(filename=os.path.join(inputs_dir, 'batteries.dat')) + switch_data.load(filename=os.path.join(inputs_dir, "batteries.dat")) diff --git a/switch_model/hawaii/batteries_fixed_calendar_life.py b/switch_model/hawaii/batteries_fixed_calendar_life.py index 61bb65fde..132c6dc79 100644 --- a/switch_model/hawaii/batteries_fixed_calendar_life.py +++ b/switch_model/hawaii/batteries_fixed_calendar_life.py @@ -3,35 +3,42 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf + def define_components(m): # TODO: change this to allow multiple storage technologies. # battery capital cost # TODO: accept a single battery_capital_cost_per_mwh_capacity value or the annual values shown here - m.BATTERY_CAPITAL_COST_YEARS = Set() # list of all years for which capital costs are available - m.battery_capital_cost_per_mwh_capacity_by_year = Param(m.BATTERY_CAPITAL_COST_YEARS) + # years that have capital cost data available + m.BATTERY_CAPITAL_COST_YEARS = Set(dimen=1) + m.battery_capital_cost_per_mwh_capacity_by_year = Param( + m.BATTERY_CAPITAL_COST_YEARS, within=NonNegativeReals + ) # TODO: merge this code with batteries.py and auto-select between fixed calendar life and cycle life # based on whether battery_n_years or battery_n_cycles is provided. (Or find some hybrid that can # handle both well?) # number of years the battery can last; we assume there is no limit on cycle life within this period - m.battery_n_years = Param() + m.battery_n_years = Param(within=NonNegativeReals) # maximum depth of discharge - m.battery_max_discharge = Param() + m.battery_max_discharge = Param(within=NonNegativeReals) # round-trip efficiency - m.battery_efficiency = Param() + m.battery_efficiency = Param(within=NonNegativeReals) # fastest time that storage can be emptied (down to max_discharge) - m.battery_min_discharge_time = Param() + m.battery_min_discharge_time = Param(within=NonNegativeReals) # amount of battery capacity to build and use (in MWh) # TODO: integrate this with other project data, so it can contribute to reserves, etc. m.BuildBattery = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.Battery_Capacity = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum( + m.Battery_Capacity = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( m.BuildBattery[z, bld_yr] - for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] if bld_yr + m.battery_n_years > p - ) + for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + if bld_yr + m.battery_n_years > p + ), ) # rate of charging/discharging battery @@ -42,8 +49,8 @@ def define_components(m): m.BatteryLevel = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # add storage dispatch to the zonal energy balance - m.Zone_Power_Injections.append('DischargeBattery') - m.Zone_Power_Withdrawals.append('ChargeBattery') + m.Zone_Power_Injections.append("DischargeBattery") + m.Zone_Power_Withdrawals.append("ChargeBattery") # add the batteries to the objective function @@ -54,96 +61,115 @@ def define_components(m): m.BuildBattery[z, bld_yr] * m.battery_capital_cost_per_mwh_capacity_by_year[bld_yr] * crf(m.interest_rate, m.battery_n_years) - for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] if bld_yr + m.battery_n_years > p - for z in m.LOAD_ZONES - ) + for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + if bld_yr + m.battery_n_years > p + for z in m.LOAD_ZONES + ), ) - m.Cost_Components_Per_Period.append('BatteryAnnualCost') + m.Cost_Components_Per_Period.append("BatteryAnnualCost") # Calculate the state of charge based on conservation of energy # NOTE: this is circular for each day # NOTE: the overall level for the day is free, but the levels each timepoint are chained. - m.Battery_Level_Calc = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.BatteryLevel[z, t] == - m.BatteryLevel[z, m.tp_previous[t]] - + m.tp_duration_hrs[t] * ( - m.battery_efficiency * m.ChargeBattery[z, m.tp_previous[t]] - - m.DischargeBattery[z, m.tp_previous[t]] - ) + m.Battery_Level_Calc = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.BatteryLevel[z, t] + == m.BatteryLevel[z, m.tp_previous[t]] + + m.tp_duration_hrs[t] + * ( + m.battery_efficiency * m.ChargeBattery[z, m.tp_previous[t]] + - m.DischargeBattery[z, m.tp_previous[t]] + ), ) # limits on storage level - m.Battery_Min_Level = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - (1.0 - m.battery_max_discharge) * m.Battery_Capacity[z, m.tp_period[t]] - <= - m.BatteryLevel[z, t] + m.Battery_Min_Level = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: (1.0 - m.battery_max_discharge) + * m.Battery_Capacity[z, m.tp_period[t]] + <= m.BatteryLevel[z, t], ) - m.Battery_Max_Level = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.BatteryLevel[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] + m.Battery_Max_Level = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.BatteryLevel[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]], ) - m.Battery_Max_Charge_Rate = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.ChargeBattery[z, t] - <= + m.Battery_Max_Charge_Rate = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.ChargeBattery[z, t] <= # changed 2018-02-20 to allow full discharge in min_discharge_time, # (previously pegged to battery_max_discharge) - m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time, ) - m.Battery_Max_Discharge_Rate = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DischargeBattery[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time + m.Battery_Max_Discharge_Rate = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DischargeBattery[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time, ) # how much could output/input be increased on short notice (to provide reserves) - m.BatterySlackUp = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time + m.BatterySlackUp = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.Battery_Capacity[z, m.tp_period[t]] + / m.battery_min_discharge_time - m.DischargeBattery[z, t] - + m.ChargeBattery[z, t] + + m.ChargeBattery[z, t], ) - m.BatterySlackDown = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time + m.BatterySlackDown = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.Battery_Capacity[z, m.tp_period[t]] + / m.battery_min_discharge_time - m.ChargeBattery[z, t] - + m.DischargeBattery[z, t] + + m.DischargeBattery[z, t], ) # assume batteries can only complete one full cycle per day, averaged over each period # (this was pegged to battery_max_discharge before 2018-02-20) - m.Battery_Cycle_Limit = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.DischargeBattery[z, tp] * m.tp_duration_hrs[tp] for tp in m.TPS_IN_PERIOD[p]) - <= - m.Battery_Capacity[z, p] * m.period_length_hours[p] + m.Battery_Cycle_Limit = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.DischargeBattery[z, tp] * m.tp_duration_hrs[tp] + for tp in m.TPS_IN_PERIOD[p] + ) + <= m.Battery_Capacity[z, p] * m.period_length_hours[p], ) # Register with spinning reserves if it is available - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if hasattr(m, "Spinning_Reserve_Up_Provisions"): m.BatterySpinningReserveUp = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.BatterySlackUp[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.BatterySlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) - m.Spinning_Reserve_Up_Provisions.append('BatterySpinningReserveUp') + m.Spinning_Reserve_Up_Provisions.append("BatterySpinningReserveUp") m.BatterySpinningReserveDown = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.BatterySlackDown[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.BatterySlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) - m.Spinning_Reserve_Down_Provisions.append('BatterySpinningReserveDown') + m.Spinning_Reserve_Down_Provisions.append("BatterySpinningReserveDown") def load_inputs(m, switch_data, inputs_dir): """ Import battery data from .dat and .csv files. """ - switch_data.load(filename=os.path.join(inputs_dir, 'batteries.dat')) + switch_data.load(filename=os.path.join(inputs_dir, "batteries.dat")) switch_data.load_aug( optional=False, - filename=os.path.join(inputs_dir, 'battery_capital_cost.csv'), - autoselect=True, + filename=os.path.join(inputs_dir, "battery_capital_cost.csv"), index=m.BATTERY_CAPITAL_COST_YEARS, - param=(m.battery_capital_cost_per_mwh_capacity_by_year,)) + param=(m.battery_capital_cost_per_mwh_capacity_by_year,), + ) diff --git a/switch_model/hawaii/demand_response_no_reserves.py b/switch_model/hawaii/demand_response_no_reserves.py index b83ac68a8..bd2f4b130 100644 --- a/switch_model/hawaii/demand_response_no_reserves.py +++ b/switch_model/hawaii/demand_response_no_reserves.py @@ -18,21 +18,35 @@ from pprint import pprint from pyomo.environ import * import switch_model.utilities as utilities -demand_module = None # will be set via command-line options + +demand_module = None # will be set via command-line options from . import util from .util import get + def define_arguments(argparser): - argparser.add_argument("--dr-flat-pricing", action='store_true', default=False, - help="Charge a constant (average) price for electricity, rather than varying hour by hour") - argparser.add_argument("--dr-total-cost-pricing", action='store_true', default=False, - help="Include both marginal and non-marginal(fixed) costs when setting prices") - argparser.add_argument("--dr-demand-module", default=None, + argparser.add_argument( + "--dr-flat-pricing", + action="store_true", + default=False, + help="Charge a constant (average) price for electricity, rather than varying hour by hour", + ) + argparser.add_argument( + "--dr-total-cost-pricing", + action="store_true", + default=False, + help="Include both marginal and non-marginal(fixed) costs when setting prices", + ) + argparser.add_argument( + "--dr-demand-module", + default=None, help="Name of module to use for demand-response bids. This should also be " "specified in the modules list, and should provide calibrate() and bid() functions. " "Pre-written options include constant_elasticity_demand_system or r_demand_system. " - "Specify one of these in the modules list and use --help again to see module-specific options.") + "Specify one of these in the modules list and use --help again to see module-specific options.", + ) + def define_components(m): @@ -72,24 +86,30 @@ def define_components(m): ################## # cost per MWh for unserved load (high) - m.dr_unserved_load_penalty_per_mwh = Param(initialize=10000) + m.dr_unserved_load_penalty_per_mwh = Param( + within=NonNegativeReals, initialize=10000 + ) # amount of unserved load during each timepoint m.DRUnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved load - m.DR_Unserved_Load_Penalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.DRUnservedLoad[z, tp] * m.dr_unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) + m.DR_Unserved_Load_Penalty = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: sum( + m.DRUnservedLoad[z, tp] * m.dr_unserved_load_penalty_per_mwh + for z in m.LOAD_ZONES + ), ) # add unserved load to the zonal energy balance - m.Zone_Power_Injections.append('DRUnservedLoad') + m.Zone_Power_Injections.append("DRUnservedLoad") # add the unserved load penalty to the model's objective function - m.Cost_Components_Per_TP.append('DR_Unserved_Load_Penalty') + m.Cost_Components_Per_TP.append("DR_Unserved_Load_Penalty") ################### # Price Responsive Demand bids ################## # list of all bids that have been received from the demand system - m.DR_BID_LIST = Set(initialize = [], ordered=True) + m.DR_BID_LIST = Set(dimen=1, initialize=[], ordered=True) # we need an explicit indexing set for everything that depends on DR_BID_LIST # so we can reconstruct it (and them) each time we add an element to DR_BID_LIST # (not needed, and actually doesn't work -- reconstruct() fails for sets) @@ -99,16 +119,24 @@ def define_components(m): # data for the individual bids; each load_zone gets one bid for each timeseries, # and each bid covers all the timepoints in that timeseries. So we just record # the bid for each timepoint for each load_zone. - m.dr_bid = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, mutable=True) + m.dr_bid = Param( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, within=Reals, mutable=True + ) # price used to get this bid (only kept for reference) - m.dr_price = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, mutable=True) + m.dr_price = Param( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, within=Reals, mutable=True + ) # the private benefit of serving each bid - m.dr_bid_benefit = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, mutable=True) + m.dr_bid_benefit = Param( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=Reals, mutable=True + ) # weights to assign to the bids for each timeseries when constructing an optimal demand profile - m.DRBidWeight = Var(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals) + m.DRBidWeight = Var( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals + ) # def DR_Convex_Bid_Weight_rule(m, z, ts): # if len(m.DR_BID_LIST) == 0: @@ -119,9 +147,12 @@ def define_components(m): # return (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) # # choose a convex combination of bids for each zone and timeseries - m.DR_Convex_Bid_Weight = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - Constraint.Skip if len(m.DR_BID_LIST) == 0 - else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) + m.DR_Convex_Bid_Weight = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: Constraint.Skip + if len(m.DR_BID_LIST) == 0 + else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1), ) # Since we don't have differentiated prices for each zone, we have to use the same @@ -130,8 +161,11 @@ def define_components(m): # Note: LOAD_ZONES is not an ordered set, so we have to use a trick to get a single # arbitrary one to refer to (next(iter(m.LOAD_ZONES)) would also work). m.DR_Load_Zone_Shared_Bid_Weight = Constraint( - m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, z, ts: - m.DRBidWeight[b, z, ts] == m.DRBidWeight[b, list(m.LOAD_ZONES)[0], ts] + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, b, z, ts: m.DRBidWeight[b, z, ts] + == m.DRBidWeight[b, list(m.LOAD_ZONES)[0], ts], ) # For flat-price models, we have to use the same weight for all timeseries within the @@ -139,16 +173,20 @@ def define_components(m): # induce different adjustments in individual timeseries. if m.options.dr_flat_pricing: m.DR_Flat_Bid_Weight = Constraint( - m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, z, ts: - m.DRBidWeight[b, z, ts] - == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]] + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, b, z, ts: m.DRBidWeight[b, z, ts] + == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]], ) - # Optimal level of demand, calculated from available bids (negative, indicating consumption) - m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS, - rule=lambda m, z, tp: - sum(m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp] for b in m.DR_BID_LIST) + m.FlexibleDemand = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, tp: sum( + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp] for b in m.DR_BID_LIST + ), ) # replace zone_demand_mw with FlexibleDemand in the energy balance constraint @@ -157,32 +195,38 @@ def define_components(m): # a certain ordering. # m.Zone_Power_Withdrawals.remove('zone_demand_mw') # m.Zone_Power_Withdrawals.append('FlexibleDemand') - idx = m.Zone_Power_Withdrawals.index('zone_demand_mw') - m.Zone_Power_Withdrawals[idx] = 'FlexibleDemand' + idx = m.Zone_Power_Withdrawals.index("zone_demand_mw") + m.Zone_Power_Withdrawals[idx] = "FlexibleDemand" # private benefit of the electricity consumption # (i.e., willingness to pay for the current electricity supply) # reported as negative cost, i.e., positive benefit # also divide by number of timepoints in the timeseries # to convert from a cost per timeseries to a cost per timepoint. - m.DR_Welfare_Cost = Expression(m.TIMEPOINTS, rule=lambda m, tp: - (-1.0) - * sum(m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] - for b in m.DR_BID_LIST for z in m.LOAD_ZONES) - * m.tp_duration_hrs[tp] / m.ts_num_tps[m.tp_ts[tp]] + m.DR_Welfare_Cost = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: (-1.0) + * sum( + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] + for b in m.DR_BID_LIST + for z in m.LOAD_ZONES + ) + * m.tp_duration_hrs[tp] + / m.ts_num_tps[m.tp_ts[tp]], ) # add the private benefit to the model's objective function - m.Cost_Components_Per_TP.append('DR_Welfare_Cost') + m.Cost_Components_Per_TP.append("DR_Welfare_Cost") # annual costs, recovered via baseline prices # but not included in switch's calculation of costs - m.other_costs = Param(m.PERIODS, mutable=True, default=0.0) - m.Cost_Components_Per_Period.append('other_costs') + m.other_costs = Param(m.PERIODS, within=Reals, mutable=True, default=0.0) + m.Cost_Components_Per_Period.append("other_costs") # variable to store the baseline data m.base_data = None + def pre_iterate(m): # could all prev values be stored in post_iterate? # then this func would just alter the model based on values calculated in post_iterate @@ -195,20 +239,28 @@ def pre_iterate(m): # store various properties from previous model solution for later reference m.prev_marginal_cost = ( - {(z, tp): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS} # model hasn't been solved yet - if m.iteration_number == 0 else - {(z, tp): electricity_marginal_cost(m, z, tp) for z in m.LOAD_ZONES for tp in m.TIMEPOINTS} + { + (z, tp): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + } # model hasn't been solved yet + if m.iteration_number == 0 + else { + (z, tp): electricity_marginal_cost(m, z, tp) + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + } ) m.prev_demand = ( - {(z, tp): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS} # model hasn't been solved yet - if m.iteration_number == 0 else - {(z, tp): electricity_demand(m, z, tp) for z in m.LOAD_ZONES for tp in m.TIMEPOINTS} - ) - m.prev_SystemCost = ( - None - if m.iteration_number == 0 else - value(m.SystemCost) + { + (z, tp): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + } # model hasn't been solved yet + if m.iteration_number == 0 + else { + (z, tp): electricity_demand(m, z, tp) + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + } ) + m.prev_SystemCost = None if m.iteration_number == 0 else value(m.SystemCost) if m.iteration_number > 0: # store cost of previous solution before it gets altered by update_demand() @@ -228,65 +280,81 @@ def pre_iterate(m): # different solution that would be much better than the one we have now. # This ignores other solutions far away, where an integer variable is flipped, # but that's OK. (?) - prev_cost = value(sum( - ( - sum( - m.prev_marginal_cost[z, tp] * m.prev_demand[z, tp] + prev_cost = value( + sum( + ( + sum( + m.prev_marginal_cost[z, tp] * m.prev_demand[z, tp] for z in m.LOAD_ZONES - ) + m.DR_Welfare_Cost[tp] - ) * m.bring_timepoint_costs_to_base_year[tp] + ) + + m.DR_Welfare_Cost[tp] + ) + * m.bring_timepoint_costs_to_base_year[tp] for ts in m.TIMESERIES - for tp in m.TPS_IN_TS[ts] - )) + for tp in m.TPS_IN_TS[ts] + ) + ) # get the next bid and attach it to the model update_demand(m) - b = m.DR_BID_LIST.last() # current bid number + b = m.DR_BID_LIST.last() # current bid number if m.iteration_number > 0: # get an estimate of best possible net cost of serving load # (if we could completely serve the last bid at the prices we quoted, # that would be an optimum; the actual cost may be higher but never lower) - best_cost = value(sum( + best_cost = value( sum( - m.prev_marginal_cost[z, tp] * m.dr_bid[b, z, tp] - - m.dr_bid_benefit[b, z, ts] * m.tp_duration_hrs[tp] / m.ts_num_tps[ts] - for z in m.LOAD_ZONES + sum( + m.prev_marginal_cost[z, tp] * m.dr_bid[b, z, tp] + - m.dr_bid_benefit[b, z, ts] + * m.tp_duration_hrs[tp] + / m.ts_num_tps[ts] + for z in m.LOAD_ZONES + ) + * m.bring_timepoint_costs_to_base_year[tp] + for ts in m.TIMESERIES + for tp in m.TPS_IN_TS[ts] ) - * m.bring_timepoint_costs_to_base_year[tp] - for ts in m.TIMESERIES - for tp in m.TPS_IN_TS[ts] - )) - print("lower bound={}, previous cost={}, ratio={}".format( - best_cost, prev_cost, prev_cost/best_cost)) + ) + print( + "lower bound={}, previous cost={}, ratio={}".format( + best_cost, prev_cost, prev_cost / best_cost + ) + ) # Check for convergence -- optimality gap is less than 0.1% of best possible cost # (which may be negative) # TODO: index this to the direct costs, rather than the direct costs minus benefits # as it stands, it converges with about $50,000,000 optimality gap, which is about # 3% of direct costs. - converged = (m.iteration_number > 0 and (prev_cost - best_cost)/abs(best_cost) <= 0.0001) + converged = ( + m.iteration_number > 0 and (prev_cost - best_cost) / abs(best_cost) <= 0.0001 + ) return converged + def post_iterate(m): print("\n\n=======================================================") print("Solved model") print("=======================================================") print("Total cost: ${v:,.0f}".format(v=value(m.SystemCost))) - # TODO: # maybe calculate prices for the next round here and attach them to the # model, so they can be reported as final prices (currently we don't # report the final prices, only the prices prior to the final model run) - SystemCost = value(m.SystemCost) # calculate once to save time - print("prev_SystemCost={}, SystemCost={}, ratio={}".format( - m.prev_SystemCost, SystemCost, - None if m.prev_SystemCost is None else SystemCost/m.prev_SystemCost - )) + SystemCost = value(m.SystemCost) # calculate once to save time + print( + "prev_SystemCost={}, SystemCost={}, ratio={}".format( + m.prev_SystemCost, + SystemCost, + None if m.prev_SystemCost is None else SystemCost / m.prev_SystemCost, + ) + ) tag = m.options.scenario_name outputs_dir = m.options.outputs_dir @@ -296,12 +364,23 @@ def post_iterate(m): util.create_table( output_file=os.path.join(outputs_dir, "bid_{t}.csv".format(t=tag)), headings=( - "bid_num", "load_zone", "timeseries", "timepoint", "marginal_cost", "price", - "bid_load", "wtp", "base_price", "base_load" - ) + "bid_num", + "load_zone", + "timeseries", + "timepoint", + "marginal_cost", + "price", + "bid_load", + "wtp", + "base_price", + "base_load", + ), ) - b = m.DR_BID_LIST.last() # current bid - util.append_table(m, m.LOAD_ZONES, m.TIMEPOINTS, + b = m.DR_BID_LIST.last() # current bid + util.append_table( + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "bid_{t}.csv".format(t=tag)), values=lambda m, z, tp: ( b, @@ -314,18 +393,28 @@ def post_iterate(m): m.dr_bid_benefit[b, z, m.tp_ts[tp]], m.base_data_dict[z, tp][1], m.base_data_dict[z, tp][0], - ) + ), ) # store the current bid weights for future reference if m.iteration_number == 0: util.create_table( output_file=os.path.join(outputs_dir, "bid_weights_{t}.csv".format(t=tag)), - headings=("iteration", "load_zone", "timeseries", "bid_num", "weight") + headings=("iteration", "load_zone", "timeseries", "bid_num", "weight"), ) - util.append_table(m, m.LOAD_ZONES, m.TIMESERIES, m.DR_BID_LIST, + util.append_table( + m, + m.LOAD_ZONES, + m.TIMESERIES, + m.DR_BID_LIST, output_file=os.path.join(outputs_dir, "bid_weights_{t}.csv".format(t=tag)), - values=lambda m, z, ts, b: (len(m.DR_BID_LIST), z, ts, b, m.DRBidWeight[b, z, ts]) + values=lambda m, z, ts, b: ( + len(m.DR_BID_LIST), + z, + ts, + b, + m.DRBidWeight[b, z, ts], + ), ) # report the dual costs @@ -346,24 +435,28 @@ def update_demand(m): and marginal costs to calibrate the demand system, and then replaces the fixed demand with the flexible demand system. """ - first_run = (m.base_data is None) + first_run = m.base_data is None print("attaching new demand bid to model") if first_run: calibrate_model(m) - else: # not first run + else: # not first run # print "m.DRBidWeight (first day):" # print [(b, z, ts, value(m.DRBidWeight[b, z, ts])) # for b in m.DR_BID_LIST # for z in m.LOAD_ZONES # for ts in m.TIMESERIES] print("m.DRBidWeight:") - pprint([(z, ts, [(b, value(m.DRBidWeight[b, z, ts])) for b in m.DR_BID_LIST]) - for z in m.LOAD_ZONES - for ts in m.TIMESERIES]) - #print "DR_Convex_Bid_Weight:" - #m.DR_Convex_Bid_Weight.pprint() + pprint( + [ + (z, ts, [(b, value(m.DRBidWeight[b, z, ts])) for b in m.DR_BID_LIST]) + for z in m.LOAD_ZONES + for ts in m.TIMESERIES + ] + ) + # print "DR_Convex_Bid_Weight:" + # m.DR_Convex_Bid_Weight.pprint() # get new bids from the demand system at the current prices bids = get_bids(m) @@ -402,26 +495,34 @@ def total_direct_costs_per_year(m, period): in each zone.) """ return value( - sum(getattr(m, annual_cost)[period] for annual_cost in m.Cost_Components_Per_Period) + sum( + getattr(m, annual_cost)[period] + for annual_cost in m.Cost_Components_Per_Period + ) + sum( getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[period] - for tp_cost in m.Cost_Components_Per_TP - if tp_cost != "DR_Welfare_Cost" + for tp_cost in m.Cost_Components_Per_TP + if tp_cost != "DR_Welfare_Cost" ) ) + def electricity_marginal_cost(m, z, tp): """Return marginal cost of production per MWh in load_zone z during timepoint tp.""" - return m.dual[m.Energy_Balance[z, tp]]/m.bring_timepoint_costs_to_base_year[tp] + return m.dual[m.Energy_Balance[z, tp]] / m.bring_timepoint_costs_to_base_year[tp] + def electricity_demand(m, z, tp): """Return total electricity consumption by customers in load_zone z during timepoint tp.""" - return value(sum( - getattr(m, component)[z, tp] - for component in ('zone_demand_mw', 'FlexibleDemand') - if component in m.Zone_Power_Withdrawals - )) + return value( + sum( + getattr(m, component)[z, tp] + for component in ("zone_demand_mw", "FlexibleDemand") + if component in m.Zone_Power_Withdrawals + ) + ) + def make_prices(m): """Calculate hourly prices for customers, based on the current model configuration. @@ -434,20 +535,20 @@ def make_prices(m): # calculate the ratio between potential revenue # at marginal-cost pricing and total costs for each period mc_annual_revenue = { - (z, p): - sum( + (z, p): sum( electricity_demand(m, z, tp) * electricity_marginal_cost(m, z, tp) * m.tp_weight_in_year[tp] for tp in m.TPS_IN_PERIOD[p] ) - for z in m.LOAD_ZONES for p in m.PERIODS + for z in m.LOAD_ZONES + for p in m.PERIODS } # note: it would be nice to do this on a zonal basis, but production costs # are only available model-wide. price_scalar = { p: total_direct_costs_per_year(m, p) - / sum(mc_annual_revenue[z, p] for z in m.LOAD_ZONES) + / sum(mc_annual_revenue[z, p] for z in m.LOAD_ZONES) for p in m.PERIODS } else: @@ -457,41 +558,43 @@ def make_prices(m): # calculate hourly prices hourly_prices = { (z, tp): price_scalar[m.tp_period[tp]] * electricity_marginal_cost(m, z, tp) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS } if m.options.dr_flat_pricing: # use flat prices each year # calculate annual average prices (total revenue / total kWh) average_prices = { - (z, p): - sum( + (z, p): sum( hourly_prices[z, tp] * electricity_demand(m, z, tp) * m.tp_weight_in_year[tp] for tp in m.TPS_IN_PERIOD[p] ) - / - sum( - electricity_demand(m, z, tp) - * m.tp_weight_in_year[tp] + / sum( + electricity_demand(m, z, tp) * m.tp_weight_in_year[tp] for tp in m.TPS_IN_PERIOD[p] ) - for z in m.LOAD_ZONES for p in m.PERIODS + for z in m.LOAD_ZONES + for p in m.PERIODS } prices = { (z, tp): average_prices[z, m.tp_period[tp]] - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS } else: prices = hourly_prices return prices + annual_revenue = None + def calibrate_model(m): - global annual_revenue # save a copy for debugging later + global annual_revenue # save a copy for debugging later """ Calibrate the demand system and add it to the model. Also calculate other_costs (utility costs not modeled by Switch). @@ -510,26 +613,31 @@ def calibrate_model(m): # For now, we just assume the base price was $180/MWh, which is HECO's average price in # 2007 according to EIA form 826. # TODO: add in something for the fixed costs, to make marginal cost commensurate with the base_price - #baseCosts = [m.dual[m.EnergyBalance[z, tp]] for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] + # baseCosts = [m.dual[m.EnergyBalance[z, tp]] for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] base_price = 180 # average retail price for 2007 ($/MWh) - m.base_data = [( - z, - ts, - [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], - [base_price] * len(m.TPS_IN_TS[ts]) - ) for z in m.LOAD_ZONES for ts in m.TIMESERIES] + m.base_data = [ + ( + z, + ts, + [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], + [base_price] * len(m.TPS_IN_TS[ts]), + ) + for z in m.LOAD_ZONES + for ts in m.TIMESERIES + ] # make a dict of base_data, indexed by load_zone and timepoint, for later reference m.base_data_dict = { (z, tp): (m.zone_demand_mw[z, tp], base_price) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS } # calculate costs that are included in the base prices but not reflected in Switch. # note: during the first iteration, other_costs = 0, so this calculates a value for # other_costs that will bring total_direct_costs_per_year() up to the baseline # annual_revenue level. - annual_revenue = dict(zip(list(m.PERIODS), [0.0]*len(m.PERIODS))) + annual_revenue = dict(zip(list(m.PERIODS), [0.0] * len(m.PERIODS))) for (z, tp), (load, price) in utilities.iteritems(m.base_data_dict): annual_revenue[m.tp_period[tp]] += load * prices * m.tp_weight_in_year[tp] for p in m.PERIODS: @@ -538,7 +646,7 @@ def calibrate_model(m): m.other_costs[p] = 0.0 # calibrate the demand module - #demand_module.calibrate(m.base_data, m.options.dr_elasticity_scenario) + # demand_module.calibrate(m.base_data, m.options.dr_elasticity_scenario) demand_module.calibrate(m, m.base_data) @@ -556,7 +664,6 @@ def get_bids(m): # TODO: change make_prices to use base_price in iteration 0, # instead of doing it below - for i, (z, ts, base_load, base_price) in enumerate(m.base_data): # if i < 2: @@ -613,8 +720,8 @@ def add_bids(m, bids): # print "timepoints[i+1]: "+str(timepoints[i+1]) # note: demand is a python list or array, which uses 0-based indexing, but # timepoints is a pyomo set, which uses 1-based indexing, so we have to shift the index by 1. - m.dr_bid[b, z, timepoints[i+1]] = d - m.dr_price[b, z, timepoints[i+1]] = prices[i] + m.dr_bid[b, z, timepoints[i + 1]] = d + m.dr_price[b, z, timepoints[i + 1]] = prices[i] print("len(m.DR_BID_LIST): {l}".format(l=len(m.DR_BID_LIST))) print("m.DR_BID_LIST: {b}".format(b=[x for x in m.DR_BID_LIST])) @@ -634,6 +741,7 @@ def add_bids(m, bids): m.SystemCostPerPeriod.reconstruct() m.SystemCost.reconstruct() + def reconstruct_energy_balance(m): """Reconstruct Energy_Balance constraint, preserving dual values (if present).""" # copy the existing Energy_Balance object @@ -661,28 +769,34 @@ def write_batch_results(m): util.append_table(m, output_file=output_file, values=lambda m: summary_values(m)) + def summary_headers(m): return ( ("tag", "iteration", "total_cost") - +tuple('total_direct_costs_per_year_'+str(p) for p in m.PERIODS) - +tuple('other_costs_'+str(p) for p in m.PERIODS) - +tuple('DR_Welfare_Cost_'+str(p) for p in m.PERIODS) - +tuple('customer_payments_'+str(p) for p in m.PERIODS) - +tuple('MWh_sold_'+str(p) for p in m.PERIODS) + + tuple("total_direct_costs_per_year_" + str(p) for p in m.PERIODS) + + tuple("other_costs_" + str(p) for p in m.PERIODS) + + tuple("DR_Welfare_Cost_" + str(p) for p in m.PERIODS) + + tuple("customer_payments_" + str(p) for p in m.PERIODS) + + tuple("MWh_sold_" + str(p) for p in m.PERIODS) ) + def summary_values(m): demand_components = [ - c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs', 'FlexibleDemand') if hasattr(m, c) + c + for c in ("zone_demand_mw", "ShiftDemand", "ChargeEVs", "FlexibleDemand") + if hasattr(m, c) ] values = [] # tag (configuration) - values.extend([ - m.options.scenario_name, - m.iteration_number, - m.SystemCost # total cost (all periods) - ]) + values.extend( + [ + m.options.scenario_name, + m.iteration_number, + m.SystemCost, # total cost (all periods) + ] + ) # direct costs (including "other") values.extend([total_direct_costs_per_year(m, p) for p in m.PERIODS]) @@ -691,10 +805,15 @@ def summary_values(m): values.extend([m.other_costs[p] for p in m.PERIODS]) # DR_Welfare_Cost - values.extend([ - sum(m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p]) - for p in m.PERIODS - ]) + values.extend( + [ + sum( + m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) + for p in m.PERIODS + ] + ) # payments by customers ([expected load] * [gice offered for that load]) # TODO: this uses the price from just _before_ the final solution. @@ -704,71 +823,95 @@ def summary_values(m): if m.iteration_number == 0: values.extend([None for p in m.PERIODS]) else: - values.extend([ + values.extend( + [ + sum( + electricity_demand(m, z, tp) + * m.dr_price[last_bid, z, tp] + * m.tp_weight_in_year[tp] + for z in m.LOAD_ZONES + for tp in m.TPS_IN_PERIOD[p] + ) + for p in m.PERIODS + ] + ) + + # total MWh delivered each year + values.extend( + [ sum( - electricity_demand(m, z, tp) * m.dr_price[last_bid, z, tp] * m.tp_weight_in_year[tp] - for z in m.LOAD_ZONES for tp in m.TPS_IN_PERIOD[p] + electricity_demand(m, z, tp) * m.tp_weight_in_year[tp] + for z in m.LOAD_ZONES + for tp in m.TPS_IN_PERIOD[p] ) for p in m.PERIODS - ]) - - # total MWh delivered each year - values.extend([ - sum( - electricity_demand(m, z, tp) * m.tp_weight_in_year[tp] - for z in m.LOAD_ZONES for tp in m.TPS_IN_PERIOD[p] - ) - for p in m.PERIODS - ]) + ] + ) return values + def write_results(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) - avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) + avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES)) / len( + m.TIMESERIES + ) last_bid = m.DR_BID_LIST.last() util.write_table( - m, m.LOAD_ZONES, m.TIMEPOINTS, + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "energy_sources{t}.csv".format(t=tag)), - headings= - ("load_zone", "period", "timepoint_label") - +tuple(m.FUELS) - +tuple(m.NON_FUEL_ENERGY_SOURCES) - +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) - +tuple(m.Zone_Power_Injections) - +tuple(m.Zone_Power_Withdrawals) - +("marginal_cost","final_marginal_cost","price","bid_load","peak_day","base_load","base_price"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) - +tuple( - sum(get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENS_BY_FUEL[f]) - for f in m.FUELS - ) - +tuple( - sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) - for s in m.NON_FUEL_ENERGY_SOURCES - ) - +tuple( - sum( - get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - ) - for s in m.NON_FUEL_ENERGY_SOURCES + headings=("load_zone", "period", "timepoint_label") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + tuple("curtail_" + s for s in m.NON_FUEL_ENERGY_SOURCES) + + tuple(m.Zone_Power_Injections) + + tuple(m.Zone_Power_Withdrawals) + + ( + "marginal_cost", + "final_marginal_cost", + "price", + "bid_load", + "peak_day", + "base_load", + "base_price", + ), + values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) + + tuple( + sum(get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENS_BY_FUEL[f]) + for f in m.FUELS + ) + + tuple( + sum( + get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) - +( - m.prev_marginal_cost[z, t], - electricity_marginal_cost(m, z, t), - m.dr_price[last_bid, z, t], - m.dr_bid[last_bid, z, t], - 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < 0.5*avg_ts_scale else 'typical', - m.base_data_dict[z, t][0], - m.base_data_dict[z, t][1], + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( + sum( + get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + + ( + m.prev_marginal_cost[z, t], + electricity_marginal_cost(m, z, t), + m.dr_price[last_bid, z, t], + m.dr_bid[last_bid, z, t], + "peak" + if m.ts_scale_to_year[m.tp_ts[t]] < 0.5 * avg_ts_scale + else "typical", + m.base_data_dict[z, t][0], + m.base_data_dict[z, t][1], + ), ) # import pprint @@ -776,6 +919,7 @@ def write_results(m): # bt=set(x[3] for x in b) # technologies # pprint([(t, sum(x[2] for x in b if x[3]==t), sum(x[4] for x in b if x[3]==t)/sum(1.0 for x in b if x[3]==t)) for t in bt]) + def write_dual_costs(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) @@ -796,7 +940,7 @@ def write_dual_costs(m): outfile = os.path.join(outputs_dir, "dual_costs{t}.csv".format(t=tag)) dual_data = [] start_time = time.time() - print("Writing {} ... ".format(outfile), end=' ') + print("Writing {} ... ".format(outfile), end=" ") def add_dual(const, lbound, ubound, duals): if const in duals: @@ -810,12 +954,17 @@ def add_dual(const, lbound, ubound, duals): if bound is None: # Variable is unbounded; dual should be 0.0 or possibly a tiny non-zero value. if not (-1e-5 < dual < 1e-5): - raise ValueError("{} has no {} bound but has a non-zero dual value {}.".format( - const.cname(), "lower" if dual > 0 else "upper", dual)) + raise ValueError( + "{} has no {} bound but has a non-zero dual value {}.".format( + const.cname(), "lower" if dual > 0 else "upper", dual + ) + ) else: total_cost = dual * bound if total_cost != 0.0: - dual_data.append((const.cname(), direction, bound, dual, total_cost)) + dual_data.append( + (const.cname(), direction, bound, dual, total_cost) + ) for comp in m.component_objects(ctype=Var): for idx in comp: @@ -826,12 +975,15 @@ def add_dual(const, lbound, ubound, duals): constr = comp[idx] add_dual(constr, value(constr.lower), value(constr.upper), m.dual) - dual_data.sort(key=lambda r: (not r[0].startswith('DR_Convex_'), r[3] >= 0)+r) + dual_data.sort(key=lambda r: (not r[0].startswith("DR_Convex_"), r[3] >= 0) + r) + + with open(outfile, "w") as f: + f.write( + ",".join(["constraint", "direction", "bound", "dual", "total_cost"]) + "\n" + ) + f.writelines(",".join(map(str, r)) + "\n" for r in dual_data) + print("time taken: {dur:.2f}s".format(dur=time.time() - start_time)) - with open(outfile, 'w') as f: - f.write(','.join(['constraint', 'direction', 'bound', 'dual', 'total_cost']) + '\n') - f.writelines(','.join(map(str, r)) + '\n' for r in dual_data) - print("time taken: {dur:.2f}s".format(dur=time.time()-start_time)) def filename_tag(m): if m.options.scenario_name: diff --git a/switch_model/hawaii/demand_response_simple.py b/switch_model/hawaii/demand_response_simple.py index a4f1d14d4..9b954e277 100644 --- a/switch_model/hawaii/demand_response_simple.py +++ b/switch_model/hawaii/demand_response_simple.py @@ -3,20 +3,35 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf +from switch_model.balancing.demand_response.iterative import ( + register_demand_response_reserves, +) + + def define_arguments(argparser): - argparser.add_argument('--demand-response-share', type=float, default=0.30, - help="Fraction of hourly load that can be shifted to other times of day (default=0.30)") - argparser.add_argument('--demand-response-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from demand response (e.g., 'contingency' or 'regulation'). " - "Specify 'none' to disable." + argparser.add_argument( + "--demand-response-share", + type=float, + default=0.30, + help="Fraction of hourly load that can be shifted to other times of day (default=0.30)", + ) + argparser.add_argument( + "--demand-response-reserve-types", + nargs="+", + default=[], + help="Type(s) of reserves to provide from demand response (e.g., 'contingency' or 'regulation'). " + "Specify 'none' to disable. Default is 'spinning' if an operating reserve module is used, " + "otherwise it is 'none'.", ) + def define_components(m): # maximum share of hourly load that can be rescheduled # this is mutable so various values can be tested - m.demand_response_max_share = Param(default=m.options.demand_response_share, mutable=True) + m.demand_response_max_share = Param( + default=m.options.demand_response_share, mutable=True, within=PercentFraction + ) # maximum amount of load that can be _added_ each hour; we assume # it is 8x the maximum reduction, which is roughly equivalent to @@ -26,85 +41,48 @@ def define_components(m): # give negative down reserves when shifted demand exceeded this quantity, # which would have to come from somewhere else. m.demand_response_max_increase = Param( - rule=lambda m: m.demand_response_max_share * 24 / 3 + within=NonNegativeReals, rule=lambda m: m.demand_response_max_share * 24 / 3 ) # adjustment to demand during each hour (positive = higher demand) m.ShiftDemand = Var( - m.LOAD_ZONES, m.TIMEPOINTS, within=Reals, - bounds=lambda m, z, t: - ( - (-1.0) * m.demand_response_max_share * m.zone_demand_mw[z, t], - m.demand_response_max_increase * m.zone_demand_mw[z, t] - ) + m.LOAD_ZONES, + m.TIMEPOINTS, + within=Reals, + bounds=lambda m, z, t: ( + (-1.0) * m.demand_response_max_share * m.zone_demand_mw[z, t], + m.demand_response_max_increase * m.zone_demand_mw[z, t], + ), ) # all changes to demand must balance out over the course of the day - m.Demand_Response_Net_Zero = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - sum(m.ShiftDemand[z, tp] for tp in m.TPS_IN_TS[ts]) == 0.0 + m.Demand_Response_Net_Zero = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: sum(m.ShiftDemand[z, tp] for tp in m.TPS_IN_TS[ts]) + == 0.0, ) # add demand response to the zonal energy balance - m.Zone_Power_Withdrawals.append('ShiftDemand') + m.Zone_Power_Withdrawals.append("ShiftDemand") - if [rt.lower() for rt in m.options.demand_response_reserve_types] != ['none']: - # Register with spinning reserves - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): - # calculate available slack from demand response - # (from supply perspective, so "up" means less load) - m.DemandResponseSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: - sum( - m.ShiftDemand[z, t] - m.ShiftDemand[z, t].lb - for z in m.ZONES_IN_BALANCING_AREA[b] - ) - ) - m.DemandResponseSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, tp: - sum( - # difference between scheduled load and max allowed - m.demand_response_max_increase * m.zone_demand_mw[z, tp] - - m.ShiftDemand[z, tp] - for z in m.ZONES_IN_BALANCING_AREA[b] - ) - ) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): - # using advanced formulation, index by reserve type, balancing area, timepoint - # define variables for each type of reserves to be provided - # choose how to allocate the slack between the different reserve products - m.DR_SPINNING_RESERVE_TYPES = Set( - initialize=m.options.demand_response_reserve_types - ) - m.DemandResponseSpinningReserveUp = Var( - m.DR_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals - ) - m.DemandResponseSpinningReserveDown = Var( - m.DR_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals - ) - # constrain reserve provision within available slack - m.Limit_DemandResponseSpinningReserveUp = Constraint( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.DemandResponseSpinningReserveUp[rt, ba, tp] - for rt in m.DR_SPINNING_RESERVE_TYPES - ) <= m.DemandResponseSlackUp[ba, tp] - ) - m.Limit_DemandResponseSpinningReserveDown = Constraint( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.DemandResponseSpinningReserveDown[rt, ba, tp] - for rt in m.DR_SPINNING_RESERVE_TYPES - ) <= m.DemandResponseSlackDown[ba, tp] - ) - m.Spinning_Reserve_Up_Provisions.append('DemandResponseSpinningReserveUp') - m.Spinning_Reserve_Down_Provisions.append('DemandResponseSpinningReserveDown') - else: - # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.demand_response_reserve_types != ['spinning']: - raise ValueError( - 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' - ) - m.Spinning_Reserve_Up_Provisions.append('DemandResponseSlackUp') - m.Spinning_Reserve_Down_Provisions.append('DemandResponseSlackDown') + if hasattr(m, "ZONES_IN_BALANCING_AREA"): + # calculate available slack from demand response + # (from supply perspective, so "up" means less load) + m.DemandResponseSlackUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: sum( + m.ShiftDemand[z, t] - m.ShiftDemand[z, t].lb + for z in m.ZONES_IN_BALANCING_AREA[b] + ), + ) + m.DemandResponseSlackDown = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, tp: sum( + # difference between scheduled load and max allowed + m.demand_response_max_increase * m.zone_demand_mw[z, tp] + - m.ShiftDemand[z, tp] + for z in m.ZONES_IN_BALANCING_AREA[b] + ), + ) + register_demand_response_reserves(m) diff --git a/switch_model/hawaii/emission_rules.py b/switch_model/hawaii/emission_rules.py index 3f7722c2f..517486851 100644 --- a/switch_model/hawaii/emission_rules.py +++ b/switch_model/hawaii/emission_rules.py @@ -1,21 +1,27 @@ from pyomo.environ import * + def define_components(m): """ prevent non-cogen plants from burning pure LSFO after 2017 due to MATS emission restrictions """ # TODO: move this set into a parameter list in fuels.csv, e.g, 'banned_after', which can be a year or NULL - m.FUEL_BANS = Set(dimen=2, initialize=[('LSFO', 2017)]) + m.FUEL_BANS = Set(dimen=2, initialize=[("LSFO", 2017)]) - m.BANNED_FUEL_DISPATCH_POINTS = Set(dimen=3, initialize=lambda m: - [(g, tp, f) + m.BANNED_FUEL_DISPATCH_POINTS = Set( + dimen=3, + initialize=lambda m: [ + (g, tp, f) for (f, y) in m.FUEL_BANS - for g in m.GENS_BY_FUEL[f] # if not m.gen_is_cogen[g] - for pe in m.PERIODS if m.period_end[pe] >= y - for tp in m.TPS_IN_PERIOD[pe] if (g, tp) in m.GEN_TPS - ] + for g in m.GENS_BY_FUEL[f] # if not m.gen_is_cogen[g] + for pe in m.PERIODS + if m.period_end[pe] >= y + for tp in m.TPS_IN_PERIOD[pe] + if (g, tp) in m.GEN_TPS + ], ) - m.ENFORCE_FUEL_BANS = Constraint(m.BANNED_FUEL_DISPATCH_POINTS, rule = lambda m, g, tp, f: - m.DispatchGenByFuel[g, tp, f] == 0 + m.ENFORCE_FUEL_BANS = Constraint( + m.BANNED_FUEL_DISPATCH_POINTS, + rule=lambda m, g, tp, f: m.DispatchGenByFuel[g, tp, f] == 0, ) diff --git a/switch_model/hawaii/ev.py b/switch_model/hawaii/ev.py index be4047117..bdaa6f33b 100644 --- a/switch_model/hawaii/ev.py +++ b/switch_model/hawaii/ev.py @@ -4,43 +4,80 @@ from pyomo.environ import * from switch_model import timescales + def define_arguments(argparser): - argparser.add_argument("--ev-timing", choices=['bau', 'flat', 'optimal'], default='optimal', - help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).") - argparser.add_argument('--ev-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." - "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'." + argparser.add_argument( + "--ev-timing", + default=["optimal"], + nargs="+", + help="Rule(s) for when to charge EVs -- bau=business-as-usual (upon arrival), " + "flat=around the clock, or optimal (default). You may also specify " + "multiple options in the form --ev-timing bau=0.32 optimal=0.68 to " + "use more than one mode. Modes without shares assigned will receive " + "equal fractions of the unallocated charging.", + ) + argparser.add_argument( + "--ev-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." + "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'.", ) + def define_components(m): # setup various parameters describing the EV and ICE fleet each year - for p in ["ev_share", "ice_miles_per_gallon", "ev_miles_per_kwh", "ev_extra_cost_per_vehicle_year", "n_all_vehicles", "vmt_per_vehicle"]: - setattr(m, p, Param(m.LOAD_ZONES, m.PERIODS)) + m.ev_share = Param(m.LOAD_ZONES, m.PERIODS, within=PercentFraction) + m.ice_miles_per_gallon = Param(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) + m.ev_miles_per_kwh = Param(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) + m.ev_extra_cost_per_vehicle_year = Param(m.LOAD_ZONES, m.PERIODS, within=Reals) + m.n_all_vehicles = Param(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) + m.vmt_per_vehicle = Param(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.ev_bau_mw = Param(m.LOAD_ZONES, m.TIMEPOINTS) + m.ev_bau_mw = Param(m.LOAD_ZONES, m.TIMEPOINTS, within=Reals) # calculate the extra annual cost (non-fuel) of having EVs, relative to ICEs (mostly for batteries, could also be chargers) - m.ev_extra_annual_cost = Param(m.PERIODS, initialize=lambda m, p: - sum(m.ev_extra_cost_per_vehicle_year[z, p] * m.ev_share[z, p] * m.n_all_vehicles[z, p] for z in m.LOAD_ZONES) + m.ev_extra_annual_cost = Param( + m.PERIODS, + within=Reals, + initialize=lambda m, p: sum( + m.ev_extra_cost_per_vehicle_year[z, p] + * m.ev_share[z, p] + * m.n_all_vehicles[z, p] + for z in m.LOAD_ZONES + ), ) # calculate total fuel cost for ICE (non-EV) VMTs - # We assume gasoline for the ICE vehicles costs the same as diesel - # note: this is the utility price, which is actually lower than retail gasoline if hasattr(m, "rfm_supply_tier_cost"): - ice_fuel_cost_func = lambda m, z, p: m.rfm_supply_tier_cost['Hawaii_Diesel', p, 'base'] - else: - ice_fuel_cost_func = lambda m, z, p: m.fuel_cost[z, "Diesel", p] - - m.ice_annual_fuel_cost = Param(m.PERIODS, initialize=lambda m, p: - sum( - (1.0 - m.ev_share[z, p]) * m.n_all_vehicles[z, p] * m.vmt_per_vehicle[z, p] + # using fuel_costs.markets + ice_fuel_cost_func = lambda m, z, p: m.rfm_supply_tier_cost[ + "Hawaii_Motor_Gasoline", p, "base" + ] + elif hasattr(m, "ZONE_FUEL_PERIODS"): + # using fuel_costs.simple + ice_fuel_cost_func = lambda m, z, p: m.fuel_cost[z, "Motor_Gasoline", p] + elif hasattr(m, "ZONE_FUEL_TIMEPOINTS"): + # using fuel_costs.simple_per_timepoint + ice_fuel_cost_func = ( + lambda m, z, p: sum( + m.tp_weight[t] * m.fuel_cost_per_timepoint[z, "Motor_Gasoline", t] + for t in m.TPS_IN_PERIOD[p] + ) + / m.period_length_hours[p] + ) + m.ice_annual_fuel_cost = Param( + m.PERIODS, + within=NonNegativeReals, + initialize=lambda m, p: sum( + (1.0 - m.ev_share[z, p]) + * m.n_all_vehicles[z, p] + * m.vmt_per_vehicle[z, p] / m.ice_miles_per_gallon[z, p] - * 0.114 # 0.114 MBtu/gal gasoline + * 0.114 # 0.114 MBtu/gal gasoline * ice_fuel_cost_func(m, z, p) - for z in m.LOAD_ZONES - ) + for z in m.LOAD_ZONES + ), ) # add cost components to account for the vehicle miles traveled via EV or ICE @@ -49,8 +86,12 @@ def define_components(m): # m.Cost_Components_Per_Period.append('ice_annual_fuel_cost') # calculate the amount of energy used during each timeseries under business-as-usual charging - m.ev_mwh_ts = Param(m.LOAD_ZONES, m.TIMESERIES, initialize=lambda m, z, ts: - sum(m.ev_bau_mw[z, tp] for tp in m.TPS_IN_TS[ts]) * m.ts_duration_of_tp[ts] + m.ev_mwh_ts = Param( + m.LOAD_ZONES, + m.TIMESERIES, + within=NonNegativeReals, + initialize=lambda m, z, ts: sum(m.ev_bau_mw[z, tp] for tp in m.TPS_IN_TS[ts]) + * m.ts_duration_of_tp[ts], ) # decide when to provide the EV energy @@ -59,81 +100,129 @@ def define_components(m): # make sure to charge all EVs at some point during the day # (they must always consume the same amount per day as under business-as-usual, # but there may be some room to reschedule it.) - m.ChargeEVs_min = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - sum(m.ChargeEVs[z, tp] for tp in m.TPS_IN_TS[ts]) * m.ts_duration_of_tp[ts] - == m.ev_mwh_ts[z, ts] + m.ChargeEVs_min = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: sum(m.ChargeEVs[z, tp] for tp in m.TPS_IN_TS[ts]) + * m.ts_duration_of_tp[ts] + == m.ev_mwh_ts[z, ts], ) # set rules for when to charge EVs - if m.options.ev_timing == "optimal": - if m.options.verbose: - print("Charging EVs at best time each day.") - # no extra code needed - elif m.options.ev_timing == "flat": - if m.options.verbose: - print("Charging EVs as baseload.") - m.ChargeEVs_flat = Constraint( - m.LOAD_ZONES, m.TIMEPOINTS, - rule=lambda m, z, tp: - m.ChargeEVs[z, tp] == m.ev_mwh_ts[z, m.tp_ts[tp]] / m.ts_duration_hrs[m.tp_ts[tp]] + mode_shares = {"optimal": 0.0, "flat": 0.0, "bau": 0.0} + for tag in m.options.ev_timing: + try: + mode, share = tag.split("=", 2) + try: + share = float(share) + except: + print( + "\nInvalid share for EV charging mode {}: ({}).".format(mode, share) + ) + raise + except ValueError: + mode = tag + share = None + if mode in mode_shares: + mode_shares[mode] = share + else: + raise ValueError( + "Invalid EV charging mode specified for --ev-timing: {}".format(mode) + ) + fillers = [mode for (mode, share) in mode_shares.items() if share is None] + allocated_shares = sum(share for share in mode_shares.values() if share is not None) + if allocated_shares >= 1.00001: + raise ValueError( + "Shares assigned with --ev-timing flag add up to {}. " + "They must sum to 1.0 (or less if a catch-all mode is specified).".format( + allocated_shares + ) ) - elif m.options.ev_timing == "bau": - if m.options.verbose: - print("Charging EVs at business-as-usual times of day.") - m.ChargeEVs_bau = Constraint( - m.LOAD_ZONES, m.TIMEPOINTS, - rule=lambda m, z, tp: - m.ChargeEVs[z, tp] == m.ev_bau_mw[z, tp] + if allocated_shares <= 0.99999 and not fillers: + raise ValueError( + "Shares assigned with --ev-timing flag add up to {}. " + "They must sum to 1.0 if no catch-all mode is specified.".format( + allocated_shares + ) ) - else: - # should never happen - raise ValueError("Invalid value specified for --ev-timing: {}".format(str(m.options.ev_timing))) + for mode in fillers: + mode_shares[mode] = (1 - allocated_shares) / len(fillers) + + if m.options.verbose: + for mode, tag in [ + ("optimal", "at best time each day"), + ("flat", "round the clock each day"), + ("bau", "at business-as-usual times each day"), + ]: + if mode_shares[mode] > 0: + print("Charging {:.1%} of EVs {}.".format(mode_shares[mode], tag)) + + # force the minimum amount of charging required for the bau and flat modes; + # all other charging will be allocated optimally among hours + m.Min_EV_Charging = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, tp: mode_shares["flat"] + * (m.ev_mwh_ts[z, m.tp_ts[tp]] / m.ts_duration_hrs[m.tp_ts[tp]]) + + mode_shares["bau"] * m.ev_bau_mw[z, tp], + ) + m.Enforce_EV_Charging_Modes = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, tp: m.ChargeEVs[z, tp] >= m.Min_EV_Charging[z, tp], + ) # add the EV load to the model's energy balance - m.Zone_Power_Withdrawals.append('ChargeEVs') + m.Zone_Power_Withdrawals.append("ChargeEVs") - # Register with spinning reserves if it is available and optimal EV charging is enabled. - if [rt.lower() for rt in m.options.ev_reserve_types] != ['none'] and m.options.ev_timing == "optimal": - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # Register with spinning reserves if it is available and any optimal EV charging is enabled. + if [rt.lower() for rt in m.options.ev_reserve_types] != ["none"] and mode_shares[ + "optimal" + ] > 0: + if hasattr(m, "Spinning_Reserve_Up_Provisions"): # calculate available slack from EV charging # (from supply perspective, so "up" means less load) m.EVSlackUp = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.ChargeEVs[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.ChargeEVs[z, t] - m.Min_EV_Charging[z, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) # note: we currently ignore down-reserves (option of increasing consumption) # from EVs since it's not clear how high they could go; we could revisit this if # down-reserves have a positive price at equilibrium (probabably won't) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # print("\n\nNeed to define Spinning_Reserve_Down_Provisions for EVs.\n") + # import time; time.sleep(3) + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products m.EV_SPINNING_RESERVE_TYPES = Set( - initialize=m.options.ev_reserve_types + dimen=1, initialize=m.options.ev_reserve_types ) m.EVSpinningReserveUp = Var( - m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.EV_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_EVSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.EVSpinningReserveUp[rt, ba, tp] - for rt in m.EV_SPINNING_RESERVE_TYPES - ) <= m.EVSlackUp[ba, tp] + rule=lambda m, ba, tp: sum( + m.EVSpinningReserveUp[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) + <= m.EVSlackUp[ba, tp], ) - m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') + m.Spinning_Reserve_Up_Provisions.append("EVSpinningReserveUp") else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.ev_reserve_types != ['spinning']: + if m.options.ev_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('EVSlackUp') - + m.Spinning_Reserve_Up_Provisions.append("EVSlackUp") def load_inputs(m, switch_data, inputs_dir): @@ -141,18 +230,21 @@ def load_inputs(m, switch_data, inputs_dir): Import ev data from .csv files. """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_fleet_info.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "ev_fleet_info.csv"), param=[ getattr(m, p) - for p in - ["ev_share", "ice_miles_per_gallon", "ev_miles_per_kwh", "ev_extra_cost_per_vehicle_year", "n_all_vehicles", "vmt_per_vehicle"] - ] + for p in [ + "ev_share", + "ice_miles_per_gallon", + "ev_miles_per_kwh", + "ev_extra_cost_per_vehicle_year", + "n_all_vehicles", + "vmt_per_vehicle", + ] + ], ) # print "loading ev_bau_load.csv" # import pdb; pdb.set_trace() switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_bau_load.csv'), - auto_select=True, - param=m.ev_bau_mw + filename=os.path.join(inputs_dir, "ev_bau_load.csv"), param=m.ev_bau_mw ) diff --git a/switch_model/hawaii/ev_advanced.py b/switch_model/hawaii/ev_advanced.py index 0123bada5..0817e90e6 100644 --- a/switch_model/hawaii/ev_advanced.py +++ b/switch_model/hawaii/ev_advanced.py @@ -1,37 +1,50 @@ from __future__ import print_function import os from pyomo.environ import * +from switch_model.utilities import unique_list + def define_arguments(argparser): - argparser.add_argument("--ev-timing", choices=['bau', 'optimal'], default='optimal', - help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).") - argparser.add_argument('--ev-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." - "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'." + argparser.add_argument( + "--ev-timing", + choices=["bau", "optimal"], + default="optimal", + help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).", + ) + argparser.add_argument( + "--ev-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." + "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'.", ) + # parameters describing the EV and ICE fleet each year, all indexed by zone, # vehicle type and period ev_zone_type_period_params = [ "n_vehicles", - "ice_gals_per_year", "ice_fuel", "ev_kwh_per_year", - "ev_extra_cost_per_vehicle_year" + "ice_gals_per_year", + "ice_fuel", + "ev_kwh_per_year", + "ev_extra_cost_per_vehicle_year", ] + def define_components(m): - # indexing set for EV bids, decomposed to get sets of EV bid numbers and EV types - m.EV_ZONE_TYPE_BID_TP = Set(dimen=4) # load zone, vehicle type, bid number, timepoint - def rule(m): - bids = m.EV_BID_NUMS_set = set() - types = m.EV_TYPES_set = set() - for z, t, n, tp in m.EV_ZONE_TYPE_BID_TP: - bids.add(n) - types.add(t) - m.Split_EV_Sets = BuildAction(rule=rule) - m.EV_BID_NUMS = Set(initialize=lambda m: m.EV_BID_NUMS_set) - m.EV_TYPES = Set(initialize=lambda m: m.EV_TYPES_set) + # indexing set for EV bids, filtered to get sets of EV bid numbers and EV types + m.EV_ZONE_TYPE_BID_TP = Set( + dimen=4 + ) # load zone, vehicle type, bid number, timepoint + m.EV_BID_NUMS = Set( + dimen=1, + initialize=lambda m: unique_list(n for z, t, n, tp in m.EV_ZONE_TYPE_BID_TP), + ) + m.EV_TYPES = Set( + dimen=1, + initialize=lambda m: unique_list(t for z, t, n, tp in m.EV_ZONE_TYPE_BID_TP), + ) # parameters describing the EV and ICE fleet each year @@ -39,54 +52,67 @@ def rule(m): # (could eventually be a decision variable) m.ev_share = Param(m.LOAD_ZONES, m.PERIODS, within=PercentFraction) for p in ev_zone_type_period_params: - setattr(m, p, Param(m.LOAD_ZONES, m.EV_TYPES, m.PERIODS)) + setattr( + m, p, Param(m.LOAD_ZONES, m.EV_TYPES, m.PERIODS, within=NonNegativeReals) + ) # calculate the extra annual cost (non-fuel) of having EVs, relative to ICEs, # for batteries and chargers m.ev_extra_annual_cost = Param( - m.PERIODS, initialize=lambda m, p: - sum( + m.PERIODS, + initialize=lambda m, p: sum( m.ev_share[z, p] * m.n_vehicles[z, t, p] * m.ev_extra_cost_per_vehicle_year[z, t, p] for z in m.LOAD_ZONES for t in m.EV_TYPES - ) + ), + within=NonNegativeReals, ) # calculate total fuel usage, cost and emissions for ICE (non-EV) vehicles motor_fuel_mmbtu_per_gallon = { # from https://www.eia.gov/Energyexplained/?page=about_energy_units "Motor_Gasoline": 0.120476, - "Motor_Diesel": 0.137452 + "Motor_Diesel": 0.137452, } m.ice_annual_fuel_mmbtu = Param( - m.LOAD_ZONES, m.EV_TYPES, m.PERIODS, - initialize=lambda m, z, evt, p: + m.LOAD_ZONES, + m.EV_TYPES, + m.PERIODS, + within=NonNegativeReals, + initialize=lambda m, z, evt, p: ( (1.0 - m.ev_share[z, p]) * m.n_vehicles[z, evt, p] * m.ice_gals_per_year[z, evt, p] * motor_fuel_mmbtu_per_gallon[m.ice_fuel[z, evt, p]] + ), ) # non-EV fuel cost if hasattr(m, "rfm_supply_tier_cost"): - ice_fuel_cost_func = lambda m, z, p, f: m.rfm_supply_tier_cost[m.zone_rfm[z, f], p, 'base'] + ice_fuel_cost_func = lambda m, z, p, f: m.rfm_supply_tier_cost[ + m.zone_fuel_rfm[z, f], p, "base" + ] else: ice_fuel_cost_func = lambda m, z, p, f: m.fuel_cost[z, f, p] - m.ice_annual_fuel_cost = Param(m.PERIODS, initialize=lambda m, p: - sum( + m.ice_annual_fuel_cost = Param( + m.PERIODS, + within=Reals, + initialize=lambda m, p: sum( m.ice_annual_fuel_mmbtu[z, evt, p] * ice_fuel_cost_func(m, z, p, m.ice_fuel[z, evt, p]) for z in m.LOAD_ZONES for evt in m.EV_TYPES - ) + ), ) # non-EV annual emissions (currently only used for reporting via # --save-expression ice_annual_emissions # TODO: find a way to add this to the AnnualEmissions expression (maybe); # at present, this doesn't affect the system emissions or emission cost - m.ice_annual_emissions = Param(m.PERIODS, initialize = lambda m, p: - sum( + m.ice_annual_emissions = Param( + m.PERIODS, + within=NonNegativeReals, + initialize=lambda m, p: sum( m.ice_annual_fuel_mmbtu[z, evt, p] * ( m.f_co2_intensity[m.ice_fuel[z, evt, p]] @@ -94,54 +120,63 @@ def rule(m): ) for z in m.LOAD_ZONES for evt in m.EV_TYPES - ) + ), ) # add cost components to account for the vehicle miles traveled via EV or ICE # (not used because it interferes with calculation of cost per kWh for electricity) - m.Cost_Components_Per_Period.append('ev_extra_annual_cost') - m.Cost_Components_Per_Period.append('ice_annual_fuel_cost') + m.Cost_Components_Per_Period.append("ev_extra_annual_cost") + m.Cost_Components_Per_Period.append("ice_annual_fuel_cost") # EV bid data -- total MW used by 100% EV fleet, for each zone, veh type, # bid number, timepoint - m.ev_bid_by_type = Param(m.EV_ZONE_TYPE_BID_TP) + m.ev_bid_by_type = Param(m.EV_ZONE_TYPE_BID_TP, within=NonNegativeReals) # aggregate across vehicle types (types are only needed for reporting) m.ev_bid_mw = Param( - m.LOAD_ZONES, m.EV_BID_NUMS, m.TIMEPOINTS, - initialize=lambda m, z, n, tp: - sum(m.ev_bid_by_type[z, t, n, tp] for t in m.EV_TYPES) + m.LOAD_ZONES, + m.EV_BID_NUMS, + m.TIMEPOINTS, + initialize=lambda m, z, n, tp: sum( + m.ev_bid_by_type[z, t, n, tp] for t in m.EV_TYPES + ), ) # find lowest and highest possible charging in each timepoint, used for reserve calcs m.ev_charge_min = Param( - m.LOAD_ZONES, m.TIMEPOINTS, - initialize=lambda m, z, tp: - m.ev_share[z, m.tp_period[tp]] - * min(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + m.LOAD_ZONES, + m.TIMEPOINTS, + within=Reals, + initialize=lambda m, z, tp: m.ev_share[z, m.tp_period[tp]] + * min(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS), ) m.ev_charge_max = Param( - m.LOAD_ZONES, m.TIMEPOINTS, - initialize=lambda m, z, tp: - m.ev_share[z, m.tp_period[tp]] - * max(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + m.LOAD_ZONES, + m.TIMEPOINTS, + within=Reals, + initialize=lambda m, z, tp: m.ev_share[z, m.tp_period[tp]] + * max(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS), ) # decide which share of the fleet to allocate to each charging bid - m.EVBidWeight = Var(m.LOAD_ZONES, m.TIMESERIES, m.EV_BID_NUMS, within=PercentFraction) + m.EVBidWeight = Var( + m.LOAD_ZONES, m.TIMESERIES, m.EV_BID_NUMS, within=PercentFraction + ) m.Charge_Enough_EVs = Constraint( - m.LOAD_ZONES, m.TIMESERIES, - rule=lambda m, z, ts: - sum(m.EVBidWeight[z, ts, n] for n in m.EV_BID_NUMS) == m.ev_share[z, m.ts_period[ts]] + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: sum(m.EVBidWeight[z, ts, n] for n in m.EV_BID_NUMS) + == m.ev_share[z, m.ts_period[ts]], ) # calculate total EV charging m.ChargeEVs = Expression( - m.LOAD_ZONES, m.TIMEPOINTS, + m.LOAD_ZONES, + m.TIMEPOINTS, rule=lambda m, z, tp: sum( m.EVBidWeight[z, m.tp_ts[tp], n] * m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS - ) + ), ) # set rules for when to charge EVs @@ -155,22 +190,30 @@ def rule(m): print("Charging EVs at business-as-usual times of day.") # give full weight to BAU bid (number 0) m.ChargeEVs_bau = Constraint( - m.LOAD_ZONES, m.EV_BID_NUMS, m.TIMESERIES, + m.LOAD_ZONES, + m.EV_BID_NUMS, + m.TIMESERIES, rule=lambda m, z, n, ts: ( m.EVBidWeight[z, ts, n] == (m.ev_share[z, m.ts_period[ts]] if n == 0 else 0) - ) + ), ) else: # should never happen - raise ValueError("Invalid value specified for --ev-timing: {}".format(str(m.options.ev_timing))) + raise ValueError( + "Invalid value specified for --ev-timing: {}".format( + str(m.options.ev_timing) + ) + ) # add the EV load to the model's energy balance - m.Zone_Power_Withdrawals.append('ChargeEVs') + m.Zone_Power_Withdrawals.append("ChargeEVs") # Register with spinning reserves if it is available and optimal EV charging is enabled. - if [rt.lower() for rt in m.options.ev_reserve_types] != ['none'] and m.options.ev_timing == "optimal": - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if [rt.lower() for rt in m.options.ev_reserve_types] != [ + "none" + ] and m.options.ev_timing == "optimal": + if hasattr(m, "Spinning_Reserve_Up_Provisions"): # calculate available slack from EV charging # (from supply perspective, so "up" means less load) m.EVSlackUp = Expression( @@ -178,57 +221,59 @@ def rule(m): rule=lambda m, b, t: sum( m.ChargeEVs[z, t] - m.ev_charge_min[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + ), ) m.EVSlackDown = Expression( m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: sum( m.ev_charge_max[z, t] - m.ChargeEVs[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + ), ) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint. # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products m.EV_SPINNING_RESERVE_TYPES = Set( - initialize=m.options.ev_reserve_types + dimen=1, initialize=m.options.ev_reserve_types ) m.EVSpinningReserveUp = Var( - m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.EV_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) m.EVSpinningReserveDown = Var( - m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.EV_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_EVSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.EVSpinningReserveUp[rt, ba, tp] - for rt in m.EV_SPINNING_RESERVE_TYPES - ) <= m.EVSlackUp[ba, tp] + rule=lambda m, ba, tp: sum( + m.EVSpinningReserveUp[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) + <= m.EVSlackUp[ba, tp], ) m.Limit_EVSpinningReserveDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.EVSpinningReserveDown[rt, ba, tp] - for rt in m.EV_SPINNING_RESERVE_TYPES - ) <= m.EVSlackDown[ba, tp] + rule=lambda m, ba, tp: sum( + m.EVSpinningReserveDown[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) + <= m.EVSlackDown[ba, tp], ) - m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') - m.Spinning_Reserve_Down_Provisions.append('EVSpinningReserveDown') + m.Spinning_Reserve_Up_Provisions.append("EVSpinningReserveUp") + m.Spinning_Reserve_Down_Provisions.append("EVSpinningReserveDown") else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.ev_reserve_types != ['spinning']: + if m.options.ev_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('EVSlackUp') - m.Spinning_Reserve_Down_Provisions.append('EVSlacDown') + m.Spinning_Reserve_Up_Provisions.append("EVSlackUp") + m.Spinning_Reserve_Down_Provisions.append("EVSlacDown") def load_inputs(m, switch_data, inputs_dir): @@ -236,18 +281,14 @@ def load_inputs(m, switch_data, inputs_dir): Import ev data from .csv files. """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_share.csv'), - auto_select=True, - param=m.ev_share + filename=os.path.join(inputs_dir, "ev_share.csv"), param=m.ev_share ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_fleet_info_advanced.csv'), - auto_select=True, - param=[getattr(m, p) for p in ev_zone_type_period_params] + filename=os.path.join(inputs_dir, "ev_fleet_info_advanced.csv"), + param=[getattr(m, p) for p in ev_zone_type_period_params], ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_charging_bids.csv'), - auto_select=True, + filename=os.path.join(inputs_dir, "ev_charging_bids.csv"), param=m.ev_bid_by_type, - index=m.EV_ZONE_TYPE_BID_TP + index=m.EV_ZONE_TYPE_BID_TP, ) diff --git a/switch_model/hawaii/fed_subsidies.py b/switch_model/hawaii/fed_subsidies.py index cfcc16a63..35cfac8e8 100644 --- a/switch_model/hawaii/fed_subsidies.py +++ b/switch_model/hawaii/fed_subsidies.py @@ -1,57 +1,108 @@ from __future__ import absolute_import +from __future__ import print_function from pyomo.environ import * from .util import get +import time + def define_components(m): """ incorporate the effect of federal subsidies """ + m.logger.warning( + "WARNING: {} module does not account for storage attached to renewable projects.".format( + __name__ + ) + ) + m.logger.warning( + "WARNING: {} module should use 10% ITC for FlatDistPV in 2022 and later; see https://www.energy.gov/eere/solar/downloads/residential-and-commercial-itc-factsheets".format( + __name__ + ) + ) + # note: wind/solar/geothermal production tax credit expires in 2017-2019, # so we ignore that (http://programs.dsireusa.org/system/program/detail/734) # TODO: move these values into data files itc_rates = { # DistPV from http://programs.dsireusa.org/system/program/detail/1235 - (2018, 'DistPV'): 0.3, - (2019, 'DistPV'): 0.3, - (2020, 'DistPV'): 0.3, - (2021, 'DistPV'): 0.3, + (2018, "DistPV"): 0.3, + (2019, "DistPV"): 0.3, + (2020, "DistPV"): 0.26, + (2021, "DistPV"): 0.22, # Wind, Solar and Geothermal ITC from # http://programs.dsireusa.org/system/program/detail/658 - (2018, 'CentralTrackingPV'): 0.3, - (2019, 'CentralTrackingPV'): 0.3, - (2020, 'CentralTrackingPV'): 0.26, - (2021, 'CentralTrackingPV'): 0.22, - (2022, 'CentralTrackingPV'): 0.10, - (2018, 'OnshoreWind'): 0.22, - (2019, 'OnshoreWind'): 0.12, - (2018, 'OffshoreWind'): 0.22, - (2019, 'OffshoreWind'): 0.12, + (2018, "CentralTrackingPV"): 0.3, + (2019, "CentralTrackingPV"): 0.3, + (2020, "CentralTrackingPV"): 0.26, + (2021, "CentralTrackingPV"): 0.22, + (2022, "CentralTrackingPV"): 0.10, + (2018, "OnshoreWind"): 0.22, + (2019, "OnshoreWind"): 0.12, + (2018, "OffshoreWind"): 0.22, + (2019, "OffshoreWind"): 0.12, } - itc_rates.update({ - (y, 'CentralTrackingPV'): 0.1 - for y in range(2023, 2051) - }) - itc_rates.update({ # clone the CentralTrackingPV entries - (y, 'CentralFixedPV'): itc_rates[y, 'CentralTrackingPV'] - for y in range(2018, 2051) - }) - itc_rates.update({ - (y, 'Geothermal'): 0.1 - for y in range(2018, 2051) - }) - - # model the renewable investment tax credit as simply prorating the annual capital cost - m.Federal_Investment_Tax_Credit_Annual = Expression( - m.PERIODS, - rule=lambda m, pe: sum( - -itc_rates[bld_yr, m.gen_tech[g]] + itc_rates.update({(y, "CentralTrackingPV"): 0.1 for y in range(2023, 2051)}) + itc_rates.update({(y, "Geothermal"): 0.1 for y in range(2018, 2051)}) + + # clone entries for similar technologies + clones = [ + ("DistPV", "FlatDistPV"), + ("DistPV", "SlopedDistPV"), + ("CentralTrackingPV", "CentralFixedPV"), + ] + for src, dest in clones: + itc_rates.update( + {(y, dest): rate for (y, tech), rate in itc_rates.items() if tech == src} + ) + + def rule(m): + subsidized_techs = {k for (y, k) in itc_rates} + missing_techs = [ + t + for t in m.GENERATION_TECHNOLOGIES + if ( + any(x in t.lower() for x in ["pv", "solar", "wind", "geo"]) + and t not in subsidized_techs + ) + ] + if missing_techs: + print("") + print("=" * 80) + print( + "WARNING: these technologies are not listed in {}\n" + "but may need to be: \n" + "{}".format(__name__, ", ".join(missing_techs)) + ) + print("=" * 80) + print("") + time.sleep(3) + + m.fed_subsidies_check_techs = BuildAction(rule=rule) + + m.gen_investment_subsidy_fraction = Param( + m.GEN_BLD_YRS, + within=Reals, + rule=lambda m, g, bld_yr: itc_rates.get((bld_yr, m.gen_tech[g]), 0.0), + ) + # model the renewable investment tax credit as simply prorating the + # annual capital cost (done per generator to simplify reporting) + # TODO: apply to storage energy too + m.GenCapitalCostsSubsidy = Expression( + m.GEN_PERIODS, + rule=lambda m, g, p: sum( + -m.gen_investment_subsidy_fraction[g, bld_yr] * m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] - for g in m.NON_FUEL_BASED_GENS - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, pe] - if (bld_yr, m.gen_tech[g]) in itc_rates - ) + for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p] + ), + ) + + m.TotalGenCapitalCostsSubsidy = Expression( + m.PERIODS, + rule=lambda m, p: sum( + m.GenCapitalCostsSubsidy[g, p] for g in m.GENS_IN_PERIOD[p] + ), ) - m.Cost_Components_Per_Period.append('Federal_Investment_Tax_Credit_Annual') + m.Cost_Components_Per_Period.append("TotalGenCapitalCostsSubsidy") diff --git a/switch_model/hawaii/fuel_markets_expansion.py b/switch_model/hawaii/fuel_markets_expansion.py index 6e9c8a157..61036b752 100644 --- a/switch_model/hawaii/fuel_markets_expansion.py +++ b/switch_model/hawaii/fuel_markets_expansion.py @@ -11,7 +11,8 @@ import os from pyomo.environ import * -inf = float('inf') +inf = float("inf") + def define_components(m): @@ -23,15 +24,23 @@ def define_components(m): # are generators (fuel-based or intermittent), and some are storage), fuel-supply projects, # transmission lines, etc. - # fixed cost (per mmBtu/year of capacity) of having each tier in service during each period # note: this must be zero if a tier has unlimited capacity, to avoid having infinite cost - m.rfm_supply_tier_fixed_cost = Param(m.RFM_SUPPLY_TIERS, default=0.0, - validate=lambda m, v, r, p, st: v == 0.0 or m.rfm_supply_tier_limit[r, p, st] < inf) + m.rfm_supply_tier_fixed_cost = Param( + m.RFM_SUPPLY_TIERS, + within=Reals, + default=0.0, + validate=lambda m, v, r, p, st: v == 0.0 + or m.rfm_supply_tier_limit[r, p, st] < inf, + ) # lifetime for each tier, once it is placed in service # (default is one period) - m.rfm_supply_tier_max_age = Param(m.RFM_SUPPLY_TIERS, default=lambda m, r, p, st: m.period_length_years[p]) + m.rfm_supply_tier_max_age = Param( + m.RFM_SUPPLY_TIERS, + within=NonNegativeReals, + default=lambda m, r, p, st: m.period_length_years[p], + ) # Note: in large regions, a tier represents a block of expandable capacity, # so this could be continuous, but then you could just lump the fixed cost @@ -42,23 +51,27 @@ def define_components(m): m.RFMSupplyTierActivate = Var(m.RFM_SUPPLY_TIERS, within=PercentFraction) # force activation to match build decision - m.RFM_Build_Activate_Consistency = Constraint(m.RFM_SUPPLY_TIERS, rule=lambda m, r, p, st: - m.RFMSupplyTierActivate[r, p, st] - == - sum( + m.RFM_Build_Activate_Consistency = Constraint( + m.RFM_SUPPLY_TIERS, + rule=lambda m, r, p, st: m.RFMSupplyTierActivate[r, p, st] + == sum( m.RFMBuildSupplyTier[r, vintage, st] - for vintage in m.PERIODS - if vintage < m.period_start[p] + m.period_length_years[p] # starts before end of current period - and vintage + m.rfm_supply_tier_max_age[r, vintage, st] > m.period_start[p] # ends after start of current period - ) + for vintage in m.PERIODS + if vintage + < m.period_start[p] + + m.period_length_years[p] # starts before end of current period + and vintage + m.rfm_supply_tier_max_age[r, vintage, st] + > m.period_start[p] # ends after start of current period + ), ) # force all unlimited tiers to be activated (since they must have no cost, # and to avoid a limit of 0.0 * inf in the constraint below) - m.Force_Activate_Unlimited_RFM_Supply_Tier = Constraint(m.RFM_SUPPLY_TIERS, - rule=lambda m, r, p, st: - (m.RFMSupplyTierActivate[r, p, st] == 1) if (m.rfm_supply_tier_limit[r, p, st] == inf) - else Constraint.Skip + m.Force_Activate_Unlimited_RFM_Supply_Tier = Constraint( + m.RFM_SUPPLY_TIERS, + rule=lambda m, r, p, st: (m.RFMSupplyTierActivate[r, p, st] == 1) + if (m.rfm_supply_tier_limit[r, p, st] == inf) + else Constraint.Skip, ) # only allow delivery from activated tiers @@ -66,12 +79,12 @@ def define_components(m): # note: this could be merged with the previous constraint, since they are complementary m.Enforce_RFM_Supply_Tier_Activated = Constraint( m.RFM_SUPPLY_TIERS, - rule=lambda m, r, p, st: - ( - m.ConsumeFuelTier[r, p, st] - <= - m.RFMSupplyTierActivate[r, p, st] * m.rfm_supply_tier_limit[r, p, st] - ) if m.rfm_supply_tier_limit[r, p, st] < inf else Constraint.Skip + rule=lambda m, r, p, st: ( + m.ConsumeFuelTier[r, p, st] + <= m.RFMSupplyTierActivate[r, p, st] * m.rfm_supply_tier_limit[r, p, st] + ) + if m.rfm_supply_tier_limit[r, p, st] < inf + else Constraint.Skip, ) # Eventually, when we add capital costs for capacity expansion, we will need a @@ -87,18 +100,24 @@ def define_components(m): rule=lambda m, p: sum( ( # note: we dance around projects with unlimited supply and 0.0 fixed cost - 0.0 if m.rfm_supply_tier_fixed_cost[rfm_st] == 0.0 + 0.0 + if m.rfm_supply_tier_fixed_cost[rfm_st] == 0.0 else m.rfm_supply_tier_fixed_cost[rfm_st] - * m.RFMSupplyTierActivate[rfm_st] * m.rfm_supply_tier_limit[rfm_st] + * m.RFMSupplyTierActivate[rfm_st] + * m.rfm_supply_tier_limit[rfm_st] ) for r in m.REGIONAL_FUEL_MARKETS - for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[r, p])) + for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[r, p] + ), + ) + + m.Cost_Components_Per_Period.append("RFM_Fixed_Costs_Annual") - m.Cost_Components_Per_Period.append('RFM_Fixed_Costs_Annual') def load_inputs(m, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'fuel_supply_curves.csv'), - select=('regional_fuel_market', 'period', 'tier', 'fixed_cost', 'max_age'), - param=(m.rfm_supply_tier_fixed_cost,m.rfm_supply_tier_max_age)) + filename=os.path.join(inputs_dir, "fuel_supply_curves.csv"), + select=("regional_fuel_market", "period", "tier", "fixed_cost", "max_age"), + param=(m.rfm_supply_tier_fixed_cost, m.rfm_supply_tier_max_age), + ) diff --git a/switch_model/hawaii/heco_outlook_2019.py b/switch_model/hawaii/heco_outlook_2019.py new file mode 100644 index 000000000..624b4f95e --- /dev/null +++ b/switch_model/hawaii/heco_outlook_2019.py @@ -0,0 +1,732 @@ +from __future__ import division +from __future__ import print_function +from collections import defaultdict +from textwrap import dedent +import os +from pyomo.environ import * +import pandas as pd +import time + + +def TODO(note): + raise NotImplementedError(dedent(note).strip()) + + +def NOTE(note): + print("=" * 80) + print("{}:".format(__name__)) + print(dedent(note).strip()) + print("=" * 80) + print() + # time.sleep(2) + + +def define_arguments(argparser): + argparser.add_argument( + "--psip-force", + action="store_true", + default=False, + help="Force following of PSIP plans (building exact amounts of certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + argparser.add_argument( + "--psip-relax-after", + type=float, + default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.", + ) + + +def is_renewable(tech): + return any(txt in tech for txt in ("PV", "Wind", "Solar")) + + +def is_battery(tech): + return "battery" in tech.lower() + + +def define_components(m): + ################### + # resource rules to match HECO's forecast as of late 2019 or + # (optionally) 2016-12 PSIP + ################## + + # decide whether to enforce the PSIP preferred plan + # if an environment variable is set, that takes precedence + # (e.g., on a cluster to override options.txt) + psip_env_var = os.environ.get("USE_PSIP_PLAN") + if psip_env_var is None: + # no environment variable; use the --psip-relax flag + psip = m.options.psip_force + elif psip_env_var.lower() in ["1", "true", "y", "yes", "on"]: + psip = True + elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: + psip = False + else: + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) + + if m.options.verbose: + if psip: + print("Using PSIP construction plan.") + else: + print( + "Relaxing PSIP construction plan (optimizing around forecasted adoption)." + ) + + # make sure LNG is turned off + if ( + psip + and "LNG" in m.FUELS + and getattr(m.options, "force_lng_tier", []) != ["none"] + ): + raise RuntimeError( + "To match the PSIP with LNG available, you must use the lng_conversion " + 'module and set "--force-lng-tier none".' + ) + + # use cases: + # DistPV fixed all the way through for most-likely scenarios and PSIP scenarios but not for general Switch-Oahu + # Distributed storage fixed all the way through in most-likely and PSIP but not Switch-Oahu + # Centralized storage Battery_Bulk at lower limit all the way through (representing distributed storage) in + # Large PV, Onshore Wind, Offshore Wind, centralized storage fixed for some early years in most-likely case and PSIP, maybe in Switch-Oahu + # Other technologies at fixed levels in PSIP but not most-likely case + # In most-likely and PSIP scenarios, all renewables already in place plus everything specified in targets gets rebuilt at retirement. + + # Plan: + # - each year is either fixed or flexible, i.e., early years will have predetermined build or not + # - when PSIP is in effect, all targets are exact -- no construction possible except what's listed + # - when PSIP is relaxed, definite targets are applied exactly up until last year for which targets + # are specified, then extra capacity can be added freely + # - this locks in DistPV forecast and other "definite" construction elements + # - this also allows specifying early construction either here or in existing plants tables, + # with similar effect + # - "most-likely" (PBR) targets are listed as "definite" targets, applied when PSIP flag turned off + # - This module introduces a new treatment of the definite targets compared to the older psip_2012_12: + # they are treated as exact targets between the start of the study and the last date specified, but + # then more can be added in later years. + # - Battery_Bulk is cloned as DistBattery and targets are set for that (may be excluded from non-PSIP/PBR scenarios) + # - this allows fixed targets for DistBattery in same years as free investment in Battery_Bulk + # - DistPV and DistBattery are listed as definite targets through 2045 + # - PSIP thermal plants are listed in PSIP targets only + # - early-years storage and renewables automatically get rebuilt in later years, but we don't consider the + # rebuild targets when calculating the fixed-construction period for these technologies, so these are used + # as lower limits, not fixed targets. + + # * Alternative strategy (abandoned): start from scratch, modifying gen_predetermined_build + # * create input spreadsheet showing forecasted capacity for various technology groups in each zone, + # grouped into different adoption forecasts (tech_forecast_scenario) + # * store this spreadsheet in a table in the back-end database + # * store average cap factor of each project in project table + # * scenario_data translates this into construction plans + # * rank projects in each technology group by levelized cost + # * assign capacity target step-ups first to existing projects, then to lowest-cost project as of that year + # * assign reconstruction dates to continue capacity step-ups in later years + # * capacity step-downs can't be handled because it's not clear which projects should be retired, + # and they may be infeasible; they also don't fit with the idea that these tranches last forever + # * write all the construction steps into gen_predetermined_build + # * can't create construction plans in import_data because they must avoid rebuilding in occupied + # projects, which depends on asset life, which depends on tech_scen_id, not known till scenario_data runs + # * this approach could also be used to handle all the existing builds, instead of the current existing projects system + # * but we're back to an old problem then -- what about cases where these are floors but not upper limits, + # e.g., want to force in one CC plant, but open to having more than that? + # * could handle that by moving the predetermined part into a separate project, but then project definitions + # must depend on tech_forecast_scenario + + # NOTE: RESOLVE used different wind and solar profiles from Switch. + # Switch profiles seem to be more accurate, so we optimize against them + # and show that this may give (small) savings vs. the RESOLVE plan. + + # TODO: Should I use Switch to investigate how much of HECO's poor performance is due + # to using bad resource profiles (small onshore wind that doesn't rise in the rankings), + # how much is due to capping PV at 300 MW in 2020, + # how much is due to non-integrality in RESOLVE (fixed by later jimmying by HECO), and + # how much is due to forcing in elements before and after the optimization? + + # TODO (maybe): set project-specific targets, so that DistPV targets can be spread among tranches + # and specific projects in the PSIP can be represented accurately (really just NPM wind). This + # might also allow reconstruction of exactly the same existing or PSIP project when retired + # (as specified in the PSIP). Currently the code below lets Switch choose the best project with the + # same technology when it replaces retired renewable projects. + + # targets for individual generation technologies + # (year, technology, MW added) + # For storage technologies with flexible energy value (no + # gen_storage_energy_to_power_ratio provided), MW added should be replaced + # by a tuple of (MW, hours). + + # Technologies that are forecasted to be built in "most-likely" scenarios. + # These apply whenever this module is used, even if rest of PSIP plan is + # ignored by turning off psip flag. Like PSIP targets, these are assumed + # to be rebuilt at retirement until the end of the study. + # NOTE(""" + # Need to get Switch to model solar+storage using normal storage module; + # model AC limit and allow unlimited DC on back side. Then use this to + # model RFP PV+BESS and forecasted DGPV+DESS. + # """) + tech_group_targets_definite = [ + # HECO June 2018 forecast, saved on shared drive in PBR docket + # See the following: + # email from Doug Codiga 11/19/19: "FW: October RWG and PWG Meeting Follow-Ups" + # forecasts stored in https://drive.google.com/open?id=1ToL7x-m17M2t0Cfd5k6w8no0rDiPy31l + # "/s/data/Generator Info/HECO Dist PV Forecast Jun 2018.xlsx" + # We assume all DistPV and DistBattery are used efficiently/optimally, + # i.e., we do not attempt to model non-optimal pairing of DistPV with + # DistBattery or curtailment on self-supply tariffs. + (2020, "DistPV", 118.336), # net of 444 in existing capacity + (2021, "DistPV", 29.51), + (2022, "DistPV", 22.835), + (2023, "DistPV", 19.168), + (2024, "DistPV", 23.087), + (2025, "DistPV", 24.322), + (2026, "DistPV", 25.888), + (2027, "DistPV", 27.24), + (2028, "DistPV", 28.387), + (2029, "DistPV", 29.693), + (2030, "DistPV", 30.522), + (2031, "DistPV", 31.32), + (2032, "DistPV", 32.234), + (2033, "DistPV", 32.42), + (2034, "DistPV", 32.98), + (2035, "DistPV", 33.219), + (2036, "DistPV", 32.785), + (2037, "DistPV", 33.175), + (2038, "DistPV", 33.011), + (2039, "DistPV", 33.101), + (2040, "DistPV", 33.262), + (2041, "DistPV", 33.457), + (2042, "DistPV", 33.343), + (2043, "DistPV", 34.072), + (2044, "DistPV", 34.386), + (2045, "DistPV", 35.038), + # note: HECO provides a MWh forecast; we assume inverters are large + # enough to charge in 4h + (2020, "DistBattery", (31.941, 4)), + (2021, "DistBattery", (12.968, 4)), + (2022, "DistBattery", (9.693, 4)), + (2023, "DistBattery", (3.135, 4)), + (2024, "DistBattery", (3.732, 4)), + (2025, "DistBattery", (4.542, 4)), + (2026, "DistBattery", (5.324, 4)), + (2027, "DistBattery", (6.115, 4)), + (2028, "DistBattery", (6.719, 4)), + (2029, "DistBattery", (7.316, 4)), + (2030, "DistBattery", (7.913, 4)), + (2031, "DistBattery", (8.355, 4)), + (2032, "DistBattery", (8.723, 4)), + (2033, "DistBattery", (9.006, 4)), + (2034, "DistBattery", (9.315, 4)), + (2035, "DistBattery", (9.49, 4)), + (2036, "DistBattery", (9.556, 4)), + (2037, "DistBattery", (9.688, 4)), + (2038, "DistBattery", (9.777, 4)), + (2039, "DistBattery", (9.827, 4)), + (2040, "DistBattery", (9.874, 4)), + (2041, "DistBattery", (9.939, 4)), + (2042, "DistBattery", (10.098, 4)), + (2043, "DistBattery", (10.238, 4)), + (2044, "DistBattery", (10.37, 4)), + (2045, "DistBattery", (10.478, 4)), + # Na Pua Makani (NPM) wind + # 2018/24 MW in PSIP, but still under construction in late 2019; + # Reported as 24 MW to be online in 2020 in + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board (accessed 10/22/19) + # Listed as 27 MW with operation beginning by summer 2020 on https://www.napuamakanihawaii.org/fact-sheet/ + # TODO: Is Na Pua Makani 24 MW or 27 MW? + (2020, "OnshoreWind", 24), + # PSIP 2016: (2018, 'OnshoreWind', 24), + # HECO feed-in tariff (FIT) projects under construction as of 10/22/19, from + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # NOTE: PSIP Figure J-10 says these are in addition to the customer DGPV + # adoption forecast, so we model them as standard PV generation projects. + # TODO: move these to existing-projects tables + # TODO: model some of these as flat dist PV or utility-scale fixed-slope PV + ( + 2020, + "LargePV", + 5, + ), # Aloha Solar; actually fixed tilt at 10 degrees, facing south: https://dbedt.hawaii.gov/hcda/files/2017/12/KAL-17-017-ASEF-II-Development-Permit-Application.pdf + ( + 2020, + "LargePV", + 3.5, + ), # Mauka FIT 1, Kahuku, Tax Map Key (1)5-6-005:014; see PUC docket 2018-0056; can't find info on project geometry (fixed vs tracking), but probably fixed + # TODO: these weren't in the psip_2016_12 module; were they part of the PSIP DER forecast or did I mistakenly omit them? + # CBRE wind and PV + # Final order given allowing HECO to proceed with standardized contracts + # in June 2018: https://cca.hawaii.gov/dca/files/2018/07/Order-No-35560-HECO-CBRE.pdf + # "At the ten-month milestone [June 2019], three projects have half-executed standard + # form contracts ("SFCs") and interconnection agreements." None had subscribers or were + # under construction at this point. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19G15A93031F00794 + # In Oct. 2019, HECO's website said it had agreement(s) in place for 4990 kW + # of the 5000 MW solar allowed in Phase 1, with 330 kW in queue. I think the + # June 2018 D&O said this will roll over to Phase 2. No mention of wind on + # the HECO program website. + # https://www.hawaiianelectric.com/products-and-services/customer-renewable-programs/community-solar + # According to HECO press release, the first phase includes (only) 8 MW + # of solar on all islands (5 MW on Oahu). Other techs will be included + # in phase 2, which will begin "about two years" from 7/2018. + # https://www.hawaiianelectric.com/regulators-approve-community-solar-plans + # In 11/19/19 data sharing, HECO reported "One project for CBRE Phase 1 + # on O'ahu is slated to be installed by Q4 of 2019. Five Phase 1 + # projects are estimated to be installed in 2020 (one in Q2 2020 and + # four in Q4 2020). Lastly, two projects are estimated to be installed + # in Q3 of 2021.". We assume all these projects are equal size. + (2019, "LargePV", 4.990 * 1 / 8), # CBRE Phase 1 + (2020, "LargePV", 4.990 * 5 / 8), # CBRE Phase 1 + (2021, "LargePV", 4.990 * 2 / 8), # CBRE Phase 1 + # Original CBRE program design had only 72 MW in phase 1 and 2 (leaving + # 64 MW for phase 2), but HECO suggested increasing this to 235 MW over + # 5 years. HECO said this was because of projected shortfalls in DER + # program. Joint Parties say it should be possible to accept all of this + # earlier and expand the program if it goes quickly, and this should not + # be used to limit DER adoption. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H20B01349C00185 + # **** questions: + # **** Should we reduce DER forecast in light of HECO's projected shortfall reported in CBRE proceeding? + # **** How much solar should we expect on Oahu in CBRE Phase 2 and when? + # **** Do we expect any wind on Oahu in CBRE Phase 2, and if so, when? + # Until we answer the questions above, this is a placeholder Oahu CBRE Phase 2. + # This is in addition to RFPs noted below. + (2022, "LargePV", 150), # CBRE Phase 2 + # PSIP 2016: (2018, 'OnshoreWind', 10), + # PSIP 2016: (2018, 'LargePV', 15), + # 2018-2019 RFPs (docket 2017-0352) + # These replace large PV and bulk batteries reported in PSIP for 2020 and 2022. + # TODO: maybe move these to existing plants tables + # "On March 25, 2019, the commission approved six ... grid-scale, + # solar-plus-storage projects.... Cumulatively, the projects will add 247 + # megawatts ("MW") of solar energy with almost 1 gigawatt hour of + # storage to the HECO Companies' grids." + # -- D&O 36604, https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19J10A90756F00117 + # First 6 approved projects (dockets 2018-0430, -0431, -0432, -0434, -0435, and -0436) are listed at + # -- https://www.hawaiianelectric.com/six-low-priced-solar-plus-storage-projects-approved-for-oahu-maui-and-hawaii-islands + # On 8/20/19, PUC approved 7th project, 12.5 MW/50 MWh AES solar+storage (docket 2019-0050, order 36480) + # -- https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H21B03929E00301 + # -- https://www.hawaiianelectric.com/puc-approves-grid-scale-solar-project-in-west-oahu + # As of 10/22/19, 8th project, 15 MW/60 MWh solar+storage on Maui, is still under review (docket 2018-0433) + # Status of all approved projects and in-service data are listed at + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + (2021, "LargePV", 12.5), # AES West Oahu Solar + (2021, "LargePV", 52), # Hoohana Solar 1 + (2021, "LargePV", 39), # Mililani I Solar + (2021, "LargePV", 36), # Waiawa Solar + # storage associated with large PV projects; we assume this will be used + # efficiently, so we model it along with other large-scale storage. + (2021, "Battery_Bulk", (12.5 + 52 + 39 + 36, 4)), + # Placeholder for Oahu portion of RFP Phase 2. + # Proposals due 11/5/2019 for up to 1,300,000 MWh/year of solar according to + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # avg. cap factor for 560 MW starting after 390 best MW have been installed + # (existing projects + FIT + CBRE 1 + half of CBRE 2 + RFP 1) is 26.6%; see + # "select site, max_capacity, avg(cap_factor) from cap_factor natural join project where technology = 'CentralTrackingPV' group by 1, 2 order by 3 desc;" + # and (120*.271+247*.265+193*.264)/(120+247+193) + # Then (1,300,000 MWh/y)/(.266 * 8766 h/y) = 558 MW + (2022, "LargePV", 560), + # TODO: will this be only wind or also other technologies? + # For now, we assume solar-only. + # TODO: how much storage is anticipated as part of RFP Phase 2? + # For now, we let Switch choose. + # PSIP 2016-12-23 Table 4-1 included 90 MW of contingency battery in 2019 + # and https://www.hawaiianelectric.com/documents/clean_energy_hawaii/selling_power_to_the_utility/competitive_bidding/20190207_tri_company_future_procurement.pdf + # says the 2016-12 plan was to do 70 MW contingency in 2019 and more contingency/regulation in 2020 + # There has been no further discussion of these as of 10/22/19, so we assume they are + # replaced by storage that comes with the PV systems. + # PSIP 2016: (2019, 'Battery_Conting', 90), + ] + [ + # No new generation in 2020-2022 beyond what's shown above + (y, t, 0.0) + for y in [2020, 2021, 2022] + for t in ["OnshoreWind", "OffshoreWind", "IC_Barge", "IC_MCBH", "IC_Schofield"] + ] + + # add targets specified on the command line + # TODO: allow repeated invocation + if m.options.force_build is not None: + b = list(m.options.force_build) + build = ( + int(b[0]), # year + b[1], # tech + # quantity + float(b[2]) if len(b) == 3 else (float(b[2]), float(b[3])), + ) + print("Forcing build: {}".format(build)) + tech_group_targets_definite.append(build) + + # technologies proposed in PSIP but which may not be built if a better plan is found. + # All from final plan in Table 4-1 of PSIP 2016-12-23 sometimes cross-referenced with PLEXOS inputs. + # These differ somewhat from inputs to RESOLVE or the RESOLVE plans in Table 3-1 and 3-4, but + # they represent HECO's final plan as reported in the PSIP. + tech_group_targets_psip = [ + (2022, "IC_Barge", 100.0), # JBPHH plant + # note: we moved IC_MCBH one year earlier than PSIP to reduce infeasibility in 2022 + (2022, "IC_MCBH", 54.0), + (2025, "LargePV", 200), + (2025, "OffshoreWind", 200), + (2040, "LargePV", 280), + (2045, "LargePV", 1180), + (2045, "IC_MCBH", 68.0), # proxy for 68 MW of generic ICE capacity + # batteries (MW) + # from PSIP 2016-12-23 Table 4-1; also see energy ("capacity") and power files in + # "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Battery" + # (note: we mistakenly treated these as MWh quantities instead of MW before 2018-02-20) + (2025, "Battery_Bulk", (29, 4)), + (2030, "Battery_Bulk", (165, 4)), + (2035, "Battery_Bulk", (168, 4)), + (2040, "Battery_Bulk", (420, 4)), + (2045, "Battery_Bulk", (1525, 4)), + # RESOLVE modeled 4-hour batteries as being capable of providing reserves, + # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). + # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). + # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not + # from EVs or flexible demand. + # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled + # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? + # (no separate sign of EVs). + # TODO: check Resolve load levels against Switch. + # TODO: maybe I should switch over to using the ABC curves and load profiles that HECO used with PLEXOS + # (for all islands). + # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? + # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. + ] + + if psip: + if m.options.psip_relax_after is not None: + # NOTE: this could be moved later, if we want this flag to relax + # both the definite and psip targets + psip_targets = [ + t for t in tech_group_targets_psip if t[0] <= m.options.psip_relax_after + ] + else: + psip_targets = tech_group_targets_psip + tech_group_targets = tech_group_targets_definite + psip_targets + else: + tech_group_targets = tech_group_targets_definite + + # Show which technologies can contribute to the target for each technology + # group and which group each technology contributes to + techs_for_tech_group = { + "DistPV": ["DistPV", "SlopedDistPV", "FlatDistPV"], + "LargePV": ["CentralTrackingPV", "CentralFixedPV"], + } + # use the rest as-is + missing_techs = {t for y, t, s in tech_group_targets}.difference( + techs_for_tech_group.keys() + ) + techs_for_tech_group.update({t: [t] for t in missing_techs}) + # create a reverse mapping + tech_tech_group = { + tech: tech_group + for tech_group, techs in techs_for_tech_group.items() + for tech in techs + } + + # Rebuild renewable projects and forecasted technologies at retirement. + # In the future we may be able to simplify this by enforcing capacity targets + # instead of construction targets. + + # note: this behavior is consistent with the following: + # discussion on p. 3-8 of PSIP 2016-12-23 vol. 1. + # Resolve applied planned wind and solar as set levels through 2045, not set additions in each year. + # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Theme 5 + # show optional capacity built in 2020 or 2025 (in list below) continuing in service in 2045. + # and Plexos input files in data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/PSIP Max Capacity.csv + # don't show any retirements of wind and solar included as "planned" in RESOLVE and "existing" in Switch + # (Waivers PV1, West Loch; Kawailoa may be omitted?) + # also note: Plexos input files in XX + # show max battery capacity equal to sum of all prior additions + + # m = lambda: 3; m.options = m; m.options.inputs_dir = '/Users/matthias/Dropbox/Research/Ulupono/Enovation Model/pbr_scenario/inputs' + gen_info = pd.read_csv(os.path.join(m.options.inputs_dir, "gen_info.csv")) + gen_info["tech_group"] = gen_info["gen_tech"].map(tech_tech_group) + gen_info = gen_info[gen_info["tech_group"].notna()] + # existing technologies are also subject to rebuilding + existing_techs = ( + pd.read_csv(os.path.join(m.options.inputs_dir, "gen_build_predetermined.csv")) + .merge(gen_info, how="inner") + .groupby(["build_year", "tech_group"])["build_gen_predetermined"] + .sum() + .reset_index() + ) + assert not any( + is_battery(t) for i, y, t, q in existing_techs.itertuples() + ), "Must update {} to handle pre-existing batteries.".format(__name__) + ages = gen_info.groupby("tech_group")["gen_max_age"].agg(["min", "max", "mean"]) + assert all(ages["min"] == ages["max"]), "Some psip technologies have mixed ages." + last_period = pd.read_csv(os.path.join(m.options.inputs_dir, "periods.csv")).iloc[ + -1, 0 + ] + + # rebuild all renewables and batteries in place before the start of the study, + # plus any technologies with targets specified here + rebuildable_targets = [ + (y, t, q) + for i, y, t, q in existing_techs.itertuples() + if is_renewable(t) or is_battery(t) + ] + tech_group_targets + tech_life = dict() + for build_year, tech_group, cap in rebuildable_targets: + if tech_group not in ages.index: + raise ValueError( + "A target has been specified for {} but there are no matching " + "technologies in gen_info.csv.".format(tech_group) + ) + max_age = ages.loc[tech_group, "mean"] + tech_life[tech_group] = max_age + rebuild = 1 + while build_year + rebuild * max_age <= last_period: + tech_group_targets.append((build_year + rebuild * max_age, tech_group, cap)) + rebuild += 1 + del gen_info, existing_techs, ages, rebuildable_targets + + # we also convert to normal python datatypes to support serialization + tech_group_power_targets = [ + (int(y), t, float(q[0] if type(q) is tuple else q)) + for y, t, q in tech_group_targets + ] + tech_group_energy_targets = [ + (int(y), t, float(q[0] * q[1])) + for y, t, q in tech_group_targets + if type(q) is tuple + ] + # Save targets and group definitions for future reference + import json + + os.makedirs(m.options.outputs_dir, exist_ok=True) # avoid errors with new dir + with open(os.path.join(m.options.outputs_dir, "heco_outlook.json"), "w") as f: + json.dump( + { + "tech_group_power_targets": tech_group_power_targets, + "tech_group_energy_targets": tech_group_energy_targets, + "techs_for_tech_group": techs_for_tech_group, + "tech_tech_group": tech_tech_group, + }, + f, + indent=4, + ) + + m.FORECASTED_TECH_GROUPS = Set( + dimen=1, initialize=list(techs_for_tech_group.keys()) + ) + m.FORECASTED_TECH_GROUP_TECHS = Set( + m.FORECASTED_TECH_GROUPS, dimen=1, initialize=techs_for_tech_group + ) + m.FORECASTED_TECHS = Set(dimen=1, initialize=list(tech_tech_group.keys())) + m.tech_tech_group = Param( + m.FORECASTED_TECHS, within=NonNegativeReals, initialize=tech_tech_group + ) + + # make a list of renewable technologies + m.RENEWABLE_TECH_GROUPS = Set( + dimen=1, + initialize=m.FORECASTED_TECH_GROUPS, + filter=lambda m, tg: is_renewable(tg), + ) + + def tech_group_target(m, per, tech, targets): + """Find the amount of each technology that is targeted to be built + between the start of the previous period and the start of the current + period and not yet retired.""" + start = 0 if per == m.PERIODS.first() else m.PERIODS.prev(per) + end = per + target = sum( + q + for (tyear, ttech, q) in targets + if ttech == tech + and start < tyear + and tyear <= end + and tyear + tech_life[ttech] > end + ) + return target + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_power_targets) + + m.tech_group_power_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=NonNegativeReals, initialize=rule + ) + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_energy_targets) + + m.tech_group_energy_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=NonNegativeReals, initialize=rule + ) + + def MakeTechGroupDicts_rule(m): + # get unit sizes of all technologies + unit_sizes = m.tech_group_unit_size_dict = defaultdict(float) + for g, unit_size in m.gen_unit_size.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + if tech_group in unit_sizes: + if unit_sizes[tech_group] != unit_size: + raise ValueError( + "Generation technology {} uses different unit sizes for different projects." + ) + else: + unit_sizes[tech_group] = unit_size + # get predetermined capacity for all technologies + m.tech_group_predetermined_power_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_predetermined.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + m.tech_group_predetermined_power_cap_dict[tech_group, per] += cap + m.tech_group_predetermined_energy_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_predetermined.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS and g in m.STORAGE_GENS: + # Need to get predetermined energy capacity here, but there's no + # param for it yet, so currently these can only be implemented + # as technologies with fixed gen_storage_energy_to_power_ratio, + # in which case users should only provide a power target, not + # an energy target in this file. In the future, there may be + # a way to provide predetermined power and energy params, so we + # watch out for that here. + if m.gen_storage_energy_to_power_ratio[g] == float("inf"): + TODO( + "Need to lookup predetermined energy capacity for storage technologies." + ) + # m.tech_group_predetermined_energy_cap_dict[tech_group, per] += + + m.MakeTechGroupDicts = BuildAction(rule=MakeTechGroupDicts_rule) + + # Find last date for which a definite target was specified for each tech group. + # This sets the last year when construction of a technology is fixed at a + # predetermined level in the "most-likely" (non-PSIP) cases. + # This ignores PSIP targets, since _all_ construction is frozen when those are + # used, and ignores reconstruction targets, because those just follow on from + # the early-years construction, and we don't want to freeze construction all + # the way through. + last_definite_target = dict() + for y, t, q in tech_group_targets_definite: + last_definite_target[t] = max(y, last_definite_target.get(t, 0)) + + def tech_group_target_rule(m, per, tech_group, build_var, target): + """ + Enforce targets for each technology. + + with PSIP: build is zero except for tech_group_power_targets + (sum during each period or before first period) + without PSIP: build is == definite targets during time range when targets specified + build is >= target later; + Note: in the last case the target is the sum of targets between start of prior period and start of this one + """ + build = sum( + build_var[g, per] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] in m.FORECASTED_TECHS + and m.tech_tech_group[m.gen_tech[g]] == tech_group + and (g, per) in build_var + ) + + if type(build) is int and build == 0: + # no matching projects found + if target == 0: + return Constraint.Skip + else: + raise ValueError( + "Target was set for {} in {}, but no matching projects are available.".format( + tech_group, per + ) + ) + + if psip and ( + m.options.psip_relax_after is None or per <= m.options.psip_relax_after + ): + # PSIP in effect: exactly match the target (possibly zero) + return build == target + elif per <= last_definite_target.get(tech_group, 0): + # PSIP not in effect, but a definite target is + return build == target + elif ( + m.options.psip_minimal_renewables and tech_group in m.RENEWABLE_TECH_GROUPS + ): + # Only build the specified amount of renewables, no more. + # This is used to apply the definite targets, but otherwise minimize renewable development. + return build == target + else: + # treat the target as a lower bound + return build >= target + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_power_target[per, tech_group] + + m.tech_group_predetermined_power_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildGen, target) + + m.Enforce_Tech_Group_Power_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_energy_target[per, tech_group] + + m.tech_group_predetermined_energy_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildStorageEnergy, target) + + m.Enforce_Tech_Group_Energy_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + if psip: + TODO( + """ + Need to force construction to zero for technologies without targets + in the PSIP. + """ + ) + # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) + advanced_tech_vars = [ + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", + "BuildFuelCellMW", + ] + + def no_advanced_tech_rule_factory(v): + return lambda m, *k: (getattr(m, v)[k] == 0) + + for v in advanced_tech_vars: + try: + var = getattr(m, v) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) + except AttributeError: + pass # model doesn't have this var diff --git a/switch_model/hawaii/heco_outlook_2020_06.py b/switch_model/hawaii/heco_outlook_2020_06.py new file mode 100644 index 000000000..b6cf00f51 --- /dev/null +++ b/switch_model/hawaii/heco_outlook_2020_06.py @@ -0,0 +1,926 @@ +from __future__ import division +from __future__ import print_function +from collections import defaultdict +from textwrap import dedent +import os +from pyomo.environ import * +import pandas as pd +import time + +# This module represents our best forecasts of capacity additions on Oahu as of +# June 2020. There are near-term forecasts through 2025 for all technologies and +# long-term forecasts for DGPV and distributed batteries. These are close to the +# forecasts HECO used for their modeling, but sometimes more up-to-date or +# realistic. The forecasts HECO used for their modeling at this time are in +# heco_plan_2020_06.py + + +def TODO(note): + raise NotImplementedError(dedent(note).strip()) + + +def NOTE(note): + print("=" * 80) + print("{}:".format(__name__)) + print(dedent(note).strip()) + print("=" * 80) + print() + # time.sleep(2) + + +def define_arguments(argparser): + argparser.add_argument( + "--psip-force", + action="store_true", + default=False, + help="Force following of PSIP plans (building exact amounts of certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + argparser.add_argument( + "--psip-relax-after", + type=float, + default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.", + ) + + argparser.add_argument( + "--psip-allow-more-solar-2025", + action="store_true", + default=False, + help="Treat 2025 target for LargePV as lower limit, not exact target.", + ) + argparser.add_argument( + "--psip-no-additional-onshore-wind", + action="store_true", + default=False, + help="Don't allow construction of any onshore wind beyond the current plan.", + ) + + +def is_renewable(tech): + return any(txt in tech for txt in ("PV", "Wind", "Solar")) + + +def is_battery(tech): + return "battery" in tech.lower() + + +def define_components(m): + ################### + # resource rules to match HECO's forecast as of late 2019 or + # (optionally) 2016-12 PSIP + ################## + + # decide whether to enforce the PSIP preferred plan + # if an environment variable is set, that takes precedence + # (e.g., on a cluster to override options.txt) + psip_env_var = os.environ.get("USE_PSIP_PLAN") + if psip_env_var is None: + # no environment variable; use the --psip-relax flag + psip = m.options.psip_force + elif psip_env_var.lower() in ["1", "true", "y", "yes", "on"]: + psip = True + elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: + psip = False + else: + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) + + if m.options.verbose: + if psip: + print("Using PSIP construction plan.") + else: + print( + "Relaxing PSIP construction plan (optimizing around forecasted adoption)." + ) + + # make sure LNG is turned off + if ( + psip + and "LNG" in m.FUELS + and getattr(m.options, "force_lng_tier", []) != ["none"] + ): + raise RuntimeError( + "To match the PSIP with LNG available, you must use the lng_conversion " + 'module and set "--force-lng-tier none".' + ) + + # use cases: + # DistPV fixed all the way through for most-likely scenarios and PSIP scenarios but not for general Switch-Oahu + # Distributed storage fixed all the way through in most-likely and PSIP but not Switch-Oahu + # Centralized storage Battery_Bulk at lower limit all the way through (representing distributed storage) in + # Large PV, Onshore Wind, Offshore Wind, centralized storage fixed for some early years in most-likely case and PSIP, maybe in Switch-Oahu + # Other technologies at fixed levels in PSIP but not most-likely case + # In most-likely and PSIP scenarios, all renewables already in place plus everything specified in targets gets rebuilt at retirement. + + # Plan: + # - each year is either fixed or flexible, i.e., early years will have predetermined build or not + # - when PSIP is in effect, all targets are exact -- no construction possible except what's listed + # - when PSIP is relaxed, definite targets are applied exactly up until last year for which targets + # are specified, then extra capacity can be added freely + # - this locks in DistPV forecast and other "definite" construction elements + # - this also allows specifying early construction either here or in existing plants tables, + # with similar effect + # - "most-likely" (PBR) targets are listed as "definite" targets, applied when PSIP flag turned off + # - This module introduces a new treatment of the definite targets compared to the older psip_2012_12: + # they are treated as exact targets between the start of the study and the last date specified, but + # then more can be added in later years. + # - Battery_Bulk is cloned as DistBattery and targets are set for that (may be excluded from non-PSIP/PBR scenarios) + # - this allows fixed targets for DistBattery in same years as free investment in Battery_Bulk + # - DistPV and DistBattery are listed as definite targets through 2045 + # - PSIP thermal plants are listed in PSIP targets only + # - early-years storage and renewables automatically get rebuilt in later years, but we don't consider the + # rebuild targets when calculating the fixed-construction period for these technologies, so these are used + # as lower limits, not fixed targets. + + # * Alternative strategy (abandoned): start from scratch, modifying gen_predetermined_build + # * create input spreadsheet showing forecasted capacity for various technology groups in each zone, + # grouped into different adoption forecasts (tech_forecast_scenario) + # * store this spreadsheet in a table in the back-end database + # * store average cap factor of each project in project table + # * scenario_data translates this into construction plans + # * rank projects in each technology group by levelized cost + # * assign capacity target step-ups first to existing projects, then to lowest-cost project as of that year + # * assign reconstruction dates to continue capacity step-ups in later years + # * capacity step-downs can't be handled because it's not clear which projects should be retired, + # and they may be infeasible; they also don't fit with the idea that these tranches last forever + # * write all the construction steps into gen_predetermined_build + # * can't create construction plans in import_data because they must avoid rebuilding in occupied + # projects, which depends on asset life, which depends on tech_scen_id, not known till scenario_data runs + # * this approach could also be used to handle all the existing builds, instead of the current existing projects system + # * but we're back to an old problem then -- what about cases where these are floors but not upper limits, + # e.g., want to force in one CC plant, but open to having more than that? + # * could handle that by moving the predetermined part into a separate project, but then project definitions + # must depend on tech_forecast_scenario + + # NOTE: RESOLVE used different wind and solar profiles from Switch. + # Switch profiles seem to be more accurate, so we optimize against them + # and show that this may give (small) savings vs. the RESOLVE plan. + + # TODO: Should I use Switch to investigate how much of HECO's poor performance is due + # to using bad resource profiles (small onshore wind that doesn't rise in the rankings), + # how much is due to capping PV at 300 MW in 2020, + # how much is due to non-integrality in RESOLVE (fixed by later jimmying by HECO), and + # how much is due to forcing in elements before and after the optimization? + + # TODO (maybe): set project-specific targets, so that DistPV targets can be spread among tranches + # and specific projects in the PSIP can be represented accurately (really just NPM wind). This + # might also allow reconstruction of exactly the same existing or PSIP project when retired + # (as specified in the PSIP). Currently the code below lets Switch choose the best project with the + # same technology when it replaces retired renewable projects. + + # targets for individual generation technologies + # (year, technology, MW added) + # For storage technologies with flexible energy value (no + # gen_storage_energy_to_power_ratio provided), MW added should be replaced + # by a tuple of (MW, hours). + + # Technologies that are forecasted to be built in "most-likely" scenarios. + # These apply whenever this module is used, even if rest of PSIP plan is + # ignored by turning off psip flag. Like PSIP targets, these are assumed + # to be rebuilt at retirement until the end of the study. + # NOTE(""" + # Need to get Switch to model solar+storage using normal storage module; + # model AC limit and allow unlimited DC on back side. Then use this to + # model RFP PV+BESS and forecasted DGPV+DESS. + # """) + NOTE( + """ + ***** For future work, use the newer start-of-year existing PV capacity + ***** in Existing Plants.xlsx and smooth the transition from the + ***** actual value at start of 2020 (674) to the 2020 forecast (562). + ***** Maybe do the same for large solar, i.e., shift everything that was + ***** online at start of 2020 into the "existing" category and/or make + ***** the first optimized year 2021. + """ + ) + tech_group_targets_definite = [ + # HECO June 2018 forecast, saved on shared drive in PBR docket + # See the following: + # email from Doug Codiga 11/19/19: "FW: October RWG and PWG Meeting Follow-Ups" + # forecasts stored in https://drive.google.com/open?id=1ToL7x-m17M2t0Cfd5k6w8no0rDiPy31l + # "/s/data/Generator Info/HECO Dist PV Forecast Jun 2018.xlsx" + # We assume all DistPV and DistBattery are used efficiently/optimally, + # i.e., we do not attempt to model non-optimal pairing of DistPV with + # DistBattery or curtailment on self-supply tariffs. + # NOTE: HECO sent a new forecast on 2020-03-18 (see email from Murray + # Clay at Ulupono that day), but we don't use it because it seems + # unrealistic. (See email from Murray Clay (Ulupono) 2020-04-14 13:58 + # and /s/data/Generator Info/HECO Dist PV Forecast 2018-03-17.xlsx) + (2020, "DistPV", 15.336, "DER forecast"), # net of 547 in existing capacity + (2021, "DistPV", 29.51, "DER forecast"), + (2022, "DistPV", 22.835, "DER forecast"), + (2023, "DistPV", 19.168, "DER forecast"), + (2024, "DistPV", 23.087, "DER forecast"), + (2025, "DistPV", 24.322, "DER forecast"), + (2026, "DistPV", 25.888, "DER forecast"), + (2027, "DistPV", 27.24, "DER forecast"), + (2028, "DistPV", 28.387, "DER forecast"), + (2029, "DistPV", 29.693, "DER forecast"), + (2030, "DistPV", 30.522, "DER forecast"), + (2031, "DistPV", 31.32, "DER forecast"), + (2032, "DistPV", 32.234, "DER forecast"), + (2033, "DistPV", 32.42, "DER forecast"), + (2034, "DistPV", 32.98, "DER forecast"), + (2035, "DistPV", 33.219, "DER forecast"), + (2036, "DistPV", 32.785, "DER forecast"), + (2037, "DistPV", 33.175, "DER forecast"), + (2038, "DistPV", 33.011, "DER forecast"), + (2039, "DistPV", 33.101, "DER forecast"), + (2040, "DistPV", 33.262, "DER forecast"), + (2041, "DistPV", 33.457, "DER forecast"), + (2042, "DistPV", 33.343, "DER forecast"), + (2043, "DistPV", 34.072, "DER forecast"), + (2044, "DistPV", 34.386, "DER forecast"), + (2045, "DistPV", 35.038, "DER forecast"), + # note: HECO provides a MWh forecast; we assume inverters are large + # enough to charge in 4h + (2020, "DistBattery", (31.941, 4), "DER forecast"), + (2021, "DistBattery", (12.968, 4), "DER forecast"), + (2022, "DistBattery", (9.693, 4), "DER forecast"), + (2023, "DistBattery", (3.135, 4), "DER forecast"), + (2024, "DistBattery", (3.732, 4), "DER forecast"), + (2025, "DistBattery", (4.542, 4), "DER forecast"), + (2026, "DistBattery", (5.324, 4), "DER forecast"), + (2027, "DistBattery", (6.115, 4), "DER forecast"), + (2028, "DistBattery", (6.719, 4), "DER forecast"), + (2029, "DistBattery", (7.316, 4), "DER forecast"), + (2030, "DistBattery", (7.913, 4), "DER forecast"), + (2031, "DistBattery", (8.355, 4), "DER forecast"), + (2032, "DistBattery", (8.723, 4), "DER forecast"), + (2033, "DistBattery", (9.006, 4), "DER forecast"), + (2034, "DistBattery", (9.315, 4), "DER forecast"), + (2035, "DistBattery", (9.49, 4), "DER forecast"), + (2036, "DistBattery", (9.556, 4), "DER forecast"), + (2037, "DistBattery", (9.688, 4), "DER forecast"), + (2038, "DistBattery", (9.777, 4), "DER forecast"), + (2039, "DistBattery", (9.827, 4), "DER forecast"), + (2040, "DistBattery", (9.874, 4), "DER forecast"), + (2041, "DistBattery", (9.939, 4), "DER forecast"), + (2042, "DistBattery", (10.098, 4), "DER forecast"), + (2043, "DistBattery", (10.238, 4), "DER forecast"), + (2044, "DistBattery", (10.37, 4), "DER forecast"), + (2045, "DistBattery", (10.478, 4), "DER forecast"), + # HECO feed-in tariff (FIT) projects under construction as of 10/22/19, from + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # NOTE: PSIP Figure J-10 says these are in addition to the customer DGPV + # adoption forecast but they are not in "HECO construction plan 2020-03-17.docx". + # Samantha Ruiz (Ulupono) recommended in email 5/26/20 to count them as + # non-DER esp. since HECO's March 2020 DER forecast is flat in early + # years. Note: these are probably fixed-axis rather than tracking (i.e., + # more like DistPV than LargePV), and they are at particular locations. + # But we include them here as LargePV and put them here instead of in + # existing projects because (a) they don't reduce available roof + # inventory and (b) counting them as existing but built in 2020 would + # block construction of additional large PV in 2020. + ( + 2020, + "LargePV", + 5, + "Aloha Solar II", + ), # Aloha Solar Energy Fund II, online 4/2/20 + (2021, "LargePV", 3.5, "Mauka FIT 1"), # Mauka FIT 1 + # note: Mauka FIT 1 and Na Pua Makani (below) are scheduled to come online + # in 2020, but they are not online yet as of 6/4/2020, so we model them + # as starting 1/1/2021. + # Na Pua Makani (NPM) wind + # 2018/24 MW in PSIP, but still under construction in late 2019; + # Reported as 24 MW to be online in 2020 in + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board (accessed 10/22/19) + # Listed as 27 MW with operation beginning by summer 2020 on https://www.napuamakanihawaii.org/fact-sheet/ + # TODO: Is Na Pua Makani 24 MW or 27 MW? + (2021, "OnshoreWind", 24, "Na Pua Makani"), + # PSIP 2016: (2018, 'OnshoreWind', 24), + # CBRE wind and PV + # Final order given allowing HECO to proceed with standardized contracts + # in June 2018: https://cca.hawaii.gov/dca/files/2018/07/Order-No-35560-HECO-CBRE.pdf + # "At the ten-month milestone [June 2019], three projects have half-executed standard + # form contracts ("SFCs") and interconnection agreements." None had subscribers or were + # under construction at this point. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19G15A93031F00794 + # In Oct. 2019, HECO's website said it had agreement(s) in place for 4990 kW + # of the 5000 MW solar allowed in Phase 1, with 330 kW in queue. I think the + # June 2018 D&O said this will roll over to Phase 2. No mention of wind on + # the HECO program website. + # https://www.hawaiianelectric.com/products-and-services/customer-renewable-programs/community-solar + # According to HECO press release, the first phase includes (only) 8 MW + # of solar on all islands (5 MW on Oahu). Other techs will be included + # in phase 2, which will begin "about two years" from 7/2018. + # https://www.hawaiianelectric.com/regulators-approve-community-solar-plans + # In 11/19/19 data sharing, HECO reported "One project for CBRE Phase 1 + # on O'ahu is slated to be installed by Q4 of 2019. Five Phase 1 + # projects are estimated to be installed in 2020 (one in Q2 2020 and + # four in Q4 2020). Lastly, two projects are estimated to be installed + # in Q3 of 2021.". In heco_outlook_2019 we broke these up into + # installations in 2019, 2020 and 2021, but in "HECO construction plan 2020-03-17.docx" + # they treat them all as being installed in 2020, so we do that now. + (2020, "LargePV", 5, "CBRE Phase 1"), # CBRE Phase 1 + # Original CBRE program design had only 72 MW in phase 1 and 2 (leaving + # 64 MW for phase 2), but HECO suggested increasing this to 235 MW over + # 5 years. HECO said this was because of projected shortfalls in DER + # program. Joint Parties say it should be possible to accept all of this + # earlier and expand the program if it goes quickly, and this should not + # be used to limit DER adoption. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H20B01349C00185 + # **** questions: + # **** Should we reduce DER forecast in light of HECO's projected shortfall reported in CBRE proceeding? + # **** How much solar should we expect on Oahu in CBRE Phase 2 and when? + # **** Do we expect any wind on Oahu in CBRE Phase 2, and if so, when? + # In heco_outlook_2019, we used 150 MW in 2022 as a placeholder Oahu CBRE Phase 2. + # In this version, we switch to 43.5 in 2025, as shown in "HECO construction plan 2020-03-17.docx" + # This is in addition to RFPs noted below. + # (2025, 'LargePV', 43.5), # CBRE Phase 2 + # According to Murray Clay email 4/14/20, PUC Order No. 37070 in Docket + # 2015-0389 specified Oahu Phase 2 as 170 MW. "In tranche 1 there is an + # RFP process for 75 MW and 15 MW through an expedited (small project + # process). Tranche 2 is again 75 MW for RFP and 5 MW for expedited + # small projects for the 170 MW Oahu total. I think the tariff for the + # expedited small projects has to be filed in Sept 2020. RFP 1 is + # second half of 2020 and RFP 2 is second half of 2021. ... CBRE often + # takes a lot of time to be deployed... + # Based on this and later discussion with Ulupono (Samantha Ruiz email + # 2020-05-26 23:15), we adopted the CBRE phase 2 forecast below: + ( + 2023, + "LargePV", + 15, + "CBRE phase 2, small", + ), # small, expedited procurement in order + ( + 2024, + "LargePV", + 5, + "CBRE phase 2, small", + ), # small, expedited procurement in order + (2025, "LargePV", 150, "CBRE phase 2"), # larger, slower + # 2018-2019 RFPs (docket 2017-0352) + # These replace large PV and bulk batteries reported in PSIP for 2020 and 2022. + # TODO: maybe move these to existing plants tables + # "On March 25, 2019, the commission approved six ... grid-scale, + # solar-plus-storage projects.... Cumulatively, the projects will add 247 + # megawatts ("MW") of solar energy with almost 1 gigawatt hour of + # storage to the HECO Companies' grids." + # -- D&O 36604, https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19J10A90756F00117 + # First 6 approved projects (dockets 2018-0430, -0431, -0432, -0434, -0435, and -0436) are listed at + # -- https://www.hawaiianelectric.com/six-low-priced-solar-plus-storage-projects-approved-for-oahu-maui-and-hawaii-islands + # On 8/20/19, PUC approved 7th project, 12.5 MW/50 MWh AES solar+storage (docket 2019-0050, order 36480) + # -- https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H21B03929E00301 + # -- https://www.hawaiianelectric.com/puc-approves-grid-scale-solar-project-in-west-oahu + # As of 10/22/19, 8th project, 15 MW/60 MWh solar+storage on Maui, is still under review (docket 2018-0433) + # Status of all approved projects and in-service data are listed at + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # They are also shown in "HECO construction plan 2020-03-17.docx". + # As of 2020-05-25, both sources say the first one will be online in + # 2021 and the other three will be online in 2022. But in an email to + # Ulupono May 23, 2020, Rod Aoki (HECO) said all RFP 1 projects would + # be online at the end of 2022. In an email to M Fripp 5/21/20, + # Samantha Ruiz (Ulupono) said they would likely come online in 2022. + # In an email to M Fripp 5/23/20, quoting the Rod Aoki email, Murray + # Clay (Ulupono) recommended they counting them as coming online at + # start of 2022, not end. Taking account of all this, we set them all + # to start in 2022. + (2022, "LargePV", 12.5, "RFP stage 1"), # AES West Oahu Solar + (2022, "LargePV", 52, "RFP stage 1"), # Hoohana Solar 1 + (2022, "LargePV", 39, "RFP stage 1"), # Mililani I Solar + (2022, "LargePV", 36, "RFP stage 1"), # Waiawa Solar + # storage associated with large PV projects; we assume this will be used + # efficiently, so we model it along with other large-scale storage. + (2022, "Battery_Bulk", (12.5, 4), "RFP stage 1"), # AES West Oahu Solar + (2022, "Battery_Bulk", (52, 4), "RFP stage 1"), # Hoohana Solar 1 + (2022, "Battery_Bulk", (39, 4), "RFP stage 1"), # Mililani I Solar + (2022, "Battery_Bulk", (36, 4), "RFP stage 1"), # Waiawa Solar + # Oahu RFP Stage 2 projects, retrieved 2020-06-04 from + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # We assume they come online at _end_ of year (start of next year). + # See "data/Generator Info/HECO RFP Stage 2 summary.xlsx" to generate this code. + # Also see https://www.hawaiianelectric.com/hawaiian-electric-selects-16-projects-in-largest-quest-for-renewable-energy-energy-storage-for-3-islands + (2023, "LargePV", 6, "RFP stage 2"), # Kaukonahua Solar + (2023, "LargePV", 60, "RFP stage 2"), # Kupehau Solar + (2023, "LargePV", 42, "RFP stage 2"), # Kupono Solar + (2023, "LargePV", 6.6, "RFP stage 2"), # Mehana Solar + (2024, "LargePV", 15, "RFP stage 2"), # Barbers Point Solar + (2024, "LargePV", 120, "RFP stage 2"), # Mahi Solar + (2024, "LargePV", 7, "RFP stage 2"), # Mountain View Solar + (2024, "LargePV", 30, "RFP stage 2"), # Waiawa Phase 2 Solar + (2023, "Battery_Bulk", (185, 3.054), "RFP stage 2"), # Kapolei Energy Storage + (2023, "Battery_Bulk", (6, 4.233), "RFP stage 2"), # Kaukonahua Solar + (2023, "Battery_Bulk", (60, 4), "RFP stage 2"), # Kupehau Solar + (2023, "Battery_Bulk", (42, 4), "RFP stage 2"), # Kupono Solar + (2023, "Battery_Bulk", (6.6, 4), "RFP stage 2"), # Mehana Solar + (2024, "Battery_Bulk", (15, 4), "RFP stage 2"), # Barbers Point Solar + (2024, "Battery_Bulk", (120, 4), "RFP stage 2"), # Mahi Solar + (2024, "Battery_Bulk", (7, 5), "RFP stage 2"), # Mountain View Solar + (2024, "Battery_Bulk", (30, 8), "RFP stage 2"), # Waiawa Phase 2 Solar + # NOTE: Samantha Ruiz (Ulupono) email 2020-05-21 and 5/26/20 14:36 says + # PUC directed HECO to install these by 2022, but she thinks 2023-24 is + # more likely. In email to Ulupono 5/23/20, Rod Aoki (HECO) said all RFP + # 2 projects would come online at end of 2025 (forwarded by Murray Clay, + # Ulupono, 5/23/20). + # Note: HECO said in "HECO construction plan 2020-03-17.docx" that RFP 2 + # would add 1,300 GWh/year in 2025; their renewable project status board + # (10/2019-5/2020) says the same amount in 2022-25. (Proposals were due + # 11/5/19 for up to this amount.) This would be about 560 GW (calculation + # below), or 594 GWh according to HECO + # https://www.hawaiianelectric.com/hawaiis-largest-renewable-energy-push-detailed-in-new-procurement-plan + # That is larger than what they ended up procuring. Original plan could + # also have been a mix of wind and solar, but final procurement was only + # solar. + # avg. cap factor for 560 MW starting after 390 best MW have been installed + # (existing projects + FIT + CBRE 1 + half of CBRE 2 + RFP 1) is 26.6%; see + # "select site, max_capacity, avg(cap_factor) from cap_factor natural join project where technology = 'CentralTrackingPV' group by 1, 2 order by 3 desc;" + # and (120*.271+247*.265+193*.264)/(120+247+193) + # Then (1,300,000 MWh/y)/(.266 * 8766 h/y) = 558 MW + # Apply an extra chunk of PV in 2025. This could be done for either of + # two reasons: + # (a) If there is no forecast for 2025, then Switch would build a lot + # in 2025 and then we would interpolate that to 2023-25. This would be + # an unrealistic installatino rate and would clobber the 2023-24 + # forecasts. + # (b) If there is a forecast for 2025 that is lower than 2024 and lower + # than the 2026 value chosen by Switch (0.2 * 2030 value), then that + # leaves a weird gap that we want to fill. (For now, we handle that case + # by potentially interpolating back from 2030 to 2025 instead of 2026.) + # (2025, 'LargePV', 50), + # PSIP 2016-12-23 Table 4-1 included 90 MW of contingency battery in 2019 + # and https://www.hawaiianelectric.com/documents/clean_energy_hawaii/selling_power_to_the_utility/competitive_bidding/20190207_tri_company_future_procurement.pdf + # says the 2016-12 plan was to do 70 MW contingency in 2019 and more contingency/regulation in 2020 + # There has been no further discussion of these as of 10/22/19, so we assume they are + # replaced by storage that comes with the PV systems. + # PSIP 2016: (2019, 'Battery_Conting', 90), + ] + [ + # Assume no new distributed generation or batteries after 2045 + # (we need some forecast to avoid picking winners between large PV + # and dist PV, and forecasting continuous increases in distpv would + # be redundant with already adequate large-renewables) + (y, t, 0.0, "late freeze") + for y in range(2046, 2060) + for t in ["DistPV", "DistBattery"] + ] + # No new generation in early years beyond what's shown above + # (this will also block construction of these techs in all years if the + # --psip-force flag is set) + tech_group_targets_definite += [ + (y, t, 0.0, "early freeze") + for techs, years in [ + (("OnshoreWind", "OffshoreWind", "LargePV"), range(2020, 2025 + 1)), + ( + ( + "IC_Barge", + "IC_MCBH", + "IC_Schofield", + "CC_152", + "Battery_Conting", + "Battery_Reg", + ), + range(2020, 2023 + 1), + ), + ] + for t in techs + for y in years + ] + + if m.options.psip_no_additional_onshore_wind: + tech_group_targets_definite += [ + (y, "OnshoreWind", 0.0, "block onshore wind") for y in range(2020, 2056) + ] + + # add targets specified on the command line + # TODO: allow repeated invocation + if m.options.force_build is not None: + b = list(m.options.force_build) + build = ( + int(b[0]), # year + b[1], # tech + # quantity + float(b[2]) if len(b) == 3 else (float(b[2]), float(b[3])), + "manual override", + ) + print("Forcing build: {}".format(build)) + tech_group_targets_definite.append(build) + + # technologies proposed in "HECO construction plan 2020-03-17.docx" but which may not be built if a better plan is found. + tech_group_targets_psip = [ + (2026, "CC_152", 150.586, "HECO plan 3/17/20"), + (2028, "CC_152", 150.586, "HECO plan 3/17/20"), + (2030, "Battery_Bulk", (165, 4), "HECO plan 3/17/20"), + (2032, "CC_152", 2 * 150.586, "HECO plan 3/17/20"), + (2035, "Battery_Bulk", (168, 4), "HECO plan 3/17/20"), + (2040, "LargePV", 280, "HECO plan 3/17/20"), + (2040, "Battery_Bulk", (420, 4), "HECO plan 3/17/20"), + (2045, "LargePV", 1180, "HECO plan 3/17/20"), + (2045, "Battery_Bulk", (1525, 4), "HECO plan 3/17/20"), + ( + 2045, + "IC_Barge", + 4 * 16.786392, + "HECO plan 3/17/20", + ), # proxy for 4*17 MW of generic ICE capacity + # RESOLVE modeled 4-hour batteries as being capable of providing reserves, + # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). + # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). + # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not + # from EVs or flexible demand. + # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled + # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? + # (no separate sign of EVs). + # TODO: check Resolve load levels against Switch. + # TODO: maybe I should switch over to using the ABC curves and load profiles that HECO used with PLEXOS + # (for all islands). + # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? + # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. + ] + + if psip: + if m.options.psip_relax_after is not None: + # NOTE: this could be moved later, if we want this flag to relax + # both the definite and psip targets + psip_targets = [ + t for t in tech_group_targets_psip if t[0] <= m.options.psip_relax_after + ] + else: + psip_targets = tech_group_targets_psip.copy() + tech_group_targets = tech_group_targets_definite + psip_targets + else: + # must make a copy here so that rebuilds will be added to + # tech_group_targets but not tech_group_targets_definite + tech_group_targets = tech_group_targets_definite.copy() + + # Show which technologies can contribute to the target for each technology + # group and which group each technology contributes to + techs_for_tech_group = { + "DistPV": ["DistPV", "SlopedDistPV", "FlatDistPV"], + "LargePV": ["CentralTrackingPV", "CentralFixedPV"], + } + # use the rest as-is + missing_techs = {t for y, t, s, l in tech_group_targets}.difference( + techs_for_tech_group.keys() + ) + techs_for_tech_group.update({t: [t] for t in missing_techs}) + # create a reverse mapping + tech_tech_group = { + tech: tech_group + for tech_group, techs in techs_for_tech_group.items() + for tech in techs + } + + # Rebuild renewable projects and forecasted technologies at retirement. + # In the future we may be able to simplify this by enforcing capacity targets + # instead of construction targets. + + # note: this behavior is consistent with the following: + # discussion on p. 3-8 of PSIP 2016-12-23 vol. 1. + # Resolve applied planned wind and solar as set levels through 2045, not set additions in each year. + # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Theme 5 + # show optional capacity built in 2020 or 2025 (in list below) continuing in service in 2045. + # and Plexos input files in data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/PSIP Max Capacity.csv + # don't show any retirements of wind and solar included as "planned" in RESOLVE and "existing" in Switch + # (Waivers PV1, West Loch; Kawailoa may be omitted?) + # also note: Plexos input files in XX + # show max battery capacity equal to sum of all prior additions + + # m = lambda: 3; m.options = m; m.options.inputs_dir = '/Users/matthias/Dropbox/Research/Ulupono/Enovation Model/pbr_scenario/inputs' + gen_info = pd.read_csv(os.path.join(m.options.inputs_dir, "gen_info.csv")) + gen_info["tech_group"] = gen_info["gen_tech"].map(tech_tech_group) + gen_info = gen_info[gen_info["tech_group"].notna()] + # existing technologies are also subject to rebuilding + existing_techs = ( + pd.read_csv(os.path.join(m.options.inputs_dir, "gen_build_predetermined.csv")) + .merge(gen_info, how="inner") + .groupby(["build_year", "tech_group"])["build_gen_predetermined"] + .sum() + .reset_index() + ) + assert not any( + is_battery(t) for i, y, t, q in existing_techs.itertuples() + ), "Must update {} to handle pre-existing batteries.".format(__name__) + ages = gen_info.groupby("tech_group")["gen_max_age"].agg(["min", "max", "mean"]) + assert all(ages["min"] == ages["max"]), "Some psip technologies have mixed ages." + last_period = pd.read_csv(os.path.join(m.options.inputs_dir, "periods.csv")).iloc[ + -1, 0 + ] + + # rebuild all renewables and batteries in place before the start of the study, + # plus any technologies with targets specified here + rebuildable_targets = [ + (y, t, q, "existing") + for i, y, t, q in existing_techs.itertuples() + if is_renewable(t) or is_battery(t) + ] + tech_group_targets + tech_life = dict() + for build_year, tech_group, cap, label in rebuildable_targets: + if tech_group not in ages.index: + raise ValueError( + "A target has been specified for {} but there are no matching " + "technologies in gen_info.csv.".format(tech_group) + ) + max_age = ages.loc[tech_group, "mean"] + tech_life[tech_group] = max_age + rebuild_year = build_year + max_age + while rebuild_year <= last_period: + tech_group_targets.append( + (rebuild_year, tech_group, cap, "rebuild " + label) + ) + rebuild_year += max_age + del gen_info, existing_techs, ages, rebuildable_targets + + # we also convert to normal python datatypes to support serialization + tech_group_power_targets = [ + (int(y), t, float(q[0] if type(q) is tuple else q), l) + for y, t, q, l in tech_group_targets + ] + tech_group_energy_targets = [ + (int(y), t, float(q[0] * q[1]), l) + for y, t, q, l in tech_group_targets + if type(q) is tuple + ] + + m.FORECASTED_TECH_GROUPS = Set( + dimen=1, initialize=list(techs_for_tech_group.keys()) + ) + m.FORECASTED_TECH_GROUP_TECHS = Set( + m.FORECASTED_TECH_GROUPS, dimen=1, initialize=techs_for_tech_group + ) + m.FORECASTED_TECHS = Set(dimen=1, initialize=list(tech_tech_group.keys())) + m.tech_tech_group = Param( + m.FORECASTED_TECHS, within=Any, initialize=tech_tech_group + ) + + # make a list of renewable technologies + m.RENEWABLE_TECH_GROUPS = Set( + dimen=1, + initialize=m.FORECASTED_TECH_GROUPS, + filter=lambda m, tg: is_renewable(tg), + ) + + def tech_group_target(m, per, tech, targets): + """Find the amount of each technology that is targeted to be built + between the start of the previous period and the start of the current + period and not yet retired.""" + start = 0 if per == m.PERIODS.first() else m.PERIODS.prev(per) + end = per + target = sum( + q + for (tyear, ttech, q, l) in targets + if ttech == tech + and start < tyear + and tyear <= end + and tyear + tech_life[ttech] > end + ) + return target + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_power_targets) + + m.tech_group_power_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=Reals, initialize=rule + ) + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_energy_targets) + + m.tech_group_energy_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=Reals, initialize=rule + ) + + def MakeTechGroupDicts_rule(m): + # get unit sizes of all technologies + unit_sizes = m.tech_group_unit_size_dict = defaultdict(float) + for g, unit_size in m.gen_unit_size.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + if tech_group in unit_sizes: + if unit_sizes[tech_group] != unit_size: + raise ValueError( + "Generation technology {} uses different unit sizes for different projects." + ) + else: + unit_sizes[tech_group] = unit_size + # get predetermined capacity for all technologies + m.tech_group_predetermined_power_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_predetermined.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + m.tech_group_predetermined_power_cap_dict[tech_group, per] += cap + m.tech_group_predetermined_energy_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_predetermined.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS and g in m.STORAGE_GENS: + # Need to get predetermined energy capacity here, but there's no + # param for it yet, so currently these can only be implemented + # as technologies with fixed gen_storage_energy_to_power_ratio, + # in which case users should only provide a power target, not + # an energy target in this file. In the future, there may be + # a way to provide predetermined power and energy params, so we + # watch out for that here. + if m.gen_storage_energy_to_power_ratio[g] == float("inf"): + TODO( + "Need to lookup predetermined energy capacity for storage technologies." + ) + # m.tech_group_predetermined_energy_cap_dict[tech_group, per] += + + m.MakeTechGroupDicts = BuildAction(rule=MakeTechGroupDicts_rule) + + # Find last date for which a definite target was specified for each tech group. + # This sets the last year when construction of a technology is fixed at a + # predetermined level in the "most-likely" (non-PSIP) cases. + # This ignores PSIP targets, since _all_ construction is frozen when those are + # used, and ignores reconstruction targets, because those just follow on from + # the early-years construction, and we don't want to freeze construction all + # the way through. + last_definite_target = dict() + for y, t, q, l in tech_group_targets_definite: + last_definite_target[t] = max(y, last_definite_target.get(t, 0)) + + # Save targets and group definitions for future reference + import json + + os.makedirs(m.options.outputs_dir, exist_ok=True) # avoid errors with new dir + with open(os.path.join(m.options.outputs_dir, "heco_outlook.json"), "w") as f: + json.dump( + { + "tech_group_power_targets": tech_group_power_targets, + "tech_group_energy_targets": tech_group_energy_targets, + "techs_for_tech_group": techs_for_tech_group, + "tech_tech_group": tech_tech_group, + "last_definite_target": last_definite_target, + }, + f, + indent=4, + ) + + # def build_tech_group_in_period(m, tech_group, period): + # """ + # How much capacity is added in this tech_group in this period? + # Returns literal 0 if and only if there are no matching projects. + # Otherwise returns a Pyomo expression. + # """ + # return sum( + # build_var[g, period] + # for g in m.GENERATION_PROJECTS + # if m.gen_tech[g] in m.FORECASTED_TECHS + # and m.tech_tech_group[m.gen_tech[g]] == tech_group + # and (g, period) in build_var, + # 0 + # ) + + # # allow extra solar in 2025, up to the point of straight-line additions + # # between 2025 and 2030 (inclusive) + # ####### We don't do this here, we just interpolate back from 2030 to 2025 + # ####### instead of 2026 (slighly less optimal, but much simpler) + # if last_definite_target['LargePV'] == 2025: + # last_definite_target['LargePV'] = 2024 # use target as lower bound in 2025 + # print("="*80) + # print("NOTE: Using HECO 2025 LargePV plan as lower bound, not fixed target.") + # print("="*80) + # ##### slack variable to allow 2025 to overshoot 20% of 2030 if needed + # m.SolarOvershoot2025 = Var(within=NonNegativeReals) + # def rule(m): + # build2025 = build_tech_group_in_period['LargePV', 2025] + # build2030 = build_tech_group_in_period['LargePV', 2030] + # ####### This doesn't work, needs a big-M constraint to force + # ####### build2025 to be below the max of the target or 0.2 * build2030 + # return build2025 - m.SolarOvershoot2025 <= 0.2 * build2030 + # m.Even_Increment_Solar_2025 = Constraint(rule=rule) + # else: + # raise ValueError( + # 'Expected last HECO target for LargePV to be in 2025, but it is in {}.' + # .format(last_definite_target['LargePV']) + # ) + + def tech_group_target_rule(m, per, tech_group, build_var, target): + """ + Enforce targets for each technology. + + with PSIP: build is zero except for tech_group_power_targets + (sum during each period or before first period) + without PSIP: build is == definite targets during time range when targets specified + build is >= target later; + Note: in the last case the target is the sum of targets between start of prior period and start of this one + """ + build = sum( + build_var[g, per] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] in m.FORECASTED_TECHS + and m.tech_tech_group[m.gen_tech[g]] == tech_group + and (g, per) in build_var + ) + + if isinstance(build, int) and build == 0: + # no matching projects found, left with literal 0 + if target == 0: + return Constraint.Skip + else: + raise ValueError( + "Target was set for {} in {}, but no matching projects are available.".format( + tech_group, per + ) + ) + + if psip and ( + m.options.psip_relax_after is None or per <= m.options.psip_relax_after + ): + # PSIP in effect: exactly match the target (possibly zero) + return build == target + elif per <= last_definite_target.get(tech_group, 0): + # PSIP not in effect, but a definite target is + return build == target + elif ( + m.options.psip_minimal_renewables and tech_group in m.RENEWABLE_TECH_GROUPS + ): + # Only build the specified amount of renewables, no more. + # This is used to apply the definite targets, but otherwise minimize renewable development. + return build == target + else: + # treat the target as a lower bound + return build >= target + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_power_target[per, tech_group] + + m.tech_group_predetermined_power_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildGen, target) + + m.Enforce_Tech_Group_Power_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_energy_target[per, tech_group] + + m.tech_group_predetermined_energy_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildStorageEnergy, target) + + m.Enforce_Tech_Group_Energy_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + if psip: + + def rule(m): + buildable_techs = set(m.gen_tech[g] for (g, y) in m.NEW_GEN_BLD_YRS) + if buildable_techs - set(m.FORECASTED_TECHS): + # TODO: automatically add zero-targets + m.logger.error( + "\nERROR: You need to provide at least one zero target for " + "each technology without targets in the PSIP to prevent it " + "from being built." + ) + return False + else: + return True + + m.Check_For_Buildable_Techs_Under_PSIP = BuildCheck(rule=rule) + + # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) + advanced_tech_vars = [ + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", + "BuildFuelCellMW", + ] + + def no_advanced_tech_rule_factory(v): + return lambda m, *k: (getattr(m, v)[k] == 0) + + for v in advanced_tech_vars: + try: + var = getattr(m, v) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) + except AttributeError: + pass # model doesn't have this var diff --git a/switch_model/hawaii/heco_outlook_2020_08.py b/switch_model/hawaii/heco_outlook_2020_08.py new file mode 100644 index 000000000..9ee632cfd --- /dev/null +++ b/switch_model/hawaii/heco_outlook_2020_08.py @@ -0,0 +1,915 @@ +from __future__ import division +from __future__ import print_function +from collections import defaultdict +from textwrap import dedent +from math import isnan +import os +from pyomo.environ import * +import pandas as pd +import time + +# This module represents our best forecasts of capacity additions on Oahu as of +# August 2020. There are near-term forecasts for 2021-2025 for all technologies and +# long-term forecasts for DGPV and distributed batteries. These are close to the +# forecasts HECO used for their modeling, but sometimes more up-to-date or +# realistic. The forecasts HECO used for their modeling at this time are in +# heco_plan_2020_08.py + + +def TODO(note): + raise NotImplementedError(dedent(note).strip()) + + +def NOTE(note): + print("=" * 80) + print("{}:".format(__name__)) + print(dedent(note).strip()) + print("=" * 80) + print() + # time.sleep(2) + + +def define_arguments(argparser): + argparser.add_argument( + "--psip-force", + action="store_true", + default=False, + help="Force following of PSIP plans (building exact amounts of certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + argparser.add_argument( + "--psip-relax-after", + type=float, + default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.", + ) + + argparser.add_argument( + "--psip-allow-more-solar-2025", + action="store_true", + default=False, + help="Treat 2025 target for LargePV as lower limit, not exact target.", + ) + argparser.add_argument( + "--psip-no-additional-onshore-wind", + action="store_true", + default=False, + help="Don't allow construction of any onshore wind beyond the current plan.", + ) + + +def is_renewable(tech): + return any(txt in tech for txt in ("PV", "Wind", "Solar")) + + +def is_battery(tech): + return "battery" in tech.lower() + + +def define_components(m): + ################### + # resource rules to match HECO's forecast as of late 2019 or + # (optionally) 2016-12 PSIP + ################## + + # decide whether to enforce the PSIP preferred plan + # if an environment variable is set, that takes precedence + # (e.g., on a cluster to override options.txt) + psip_env_var = os.environ.get("USE_PSIP_PLAN") + if psip_env_var is None: + # no environment variable; use the --psip-relax flag + psip = m.options.psip_force + elif psip_env_var.lower() in ["1", "true", "y", "yes", "on"]: + psip = True + elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: + psip = False + else: + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) + + if m.options.verbose: + if psip: + print("Using PSIP construction plan.") + else: + print( + "Relaxing PSIP construction plan (optimizing around forecasted adoption)." + ) + + # make sure LNG is turned off + if ( + psip + and "LNG" in m.FUELS + and getattr(m.options, "force_lng_tier", []) != ["none"] + ): + raise RuntimeError( + "To match the PSIP with LNG available, you must use the lng_conversion " + 'module and set "--force-lng-tier none".' + ) + + # use cases: + # DistPV fixed all the way through for most-likely scenarios and PSIP scenarios but not for general Switch-Oahu + # Distributed storage fixed all the way through in most-likely and PSIP but not Switch-Oahu + # Centralized storage Battery_Bulk at lower limit all the way through (representing distributed storage) in + # Large PV, Onshore Wind, Offshore Wind, centralized storage fixed for some early years in most-likely case and PSIP, maybe in Switch-Oahu + # Other technologies at fixed levels in PSIP but not most-likely case + # In most-likely and PSIP scenarios, all renewables already in place plus everything specified in targets gets rebuilt at retirement. + + # Plan: + # - each year is either fixed or flexible, i.e., early years will have predetermined build or not + # - when PSIP is in effect, all targets are exact -- no construction possible except what's listed + # - when PSIP is relaxed, definite targets are applied exactly up until last year for which targets + # are specified, then extra capacity can be added freely + # - this locks in DistPV forecast and other "definite" construction elements + # - this also allows specifying early construction either here or in existing plants tables, + # with similar effect + # - "most-likely" (PBR) targets are listed as "definite" targets, applied when PSIP flag turned off + # - This module introduces a new treatment of the definite targets compared to the older psip_2012_12: + # they are treated as exact targets between the start of the study and the last date specified, but + # then more can be added in later years. + # - Battery_Bulk is cloned as DistBattery and targets are set for that (may be excluded from non-PSIP/PBR scenarios) + # - this allows fixed targets for DistBattery in same years as free investment in Battery_Bulk + # - DistPV and DistBattery are listed as definite targets through 2045 + # - PSIP thermal plants are listed in PSIP targets only + # - early-years storage and renewables automatically get rebuilt in later years, but we don't consider the + # rebuild targets when calculating the fixed-construction period for these technologies, so these are used + # as lower limits, not fixed targets. + + # * Alternative strategy (abandoned): start from scratch, modifying gen_predetermined_build + # * create input spreadsheet showing forecasted capacity for various technology groups in each zone, + # grouped into different adoption forecasts (tech_forecast_scenario) + # * store this spreadsheet in a table in the back-end database + # * store average cap factor of each project in project table + # * scenario_data translates this into construction plans + # * rank projects in each technology group by levelized cost + # * assign capacity target step-ups first to existing projects, then to lowest-cost project as of that year + # * assign reconstruction dates to continue capacity step-ups in later years + # * capacity step-downs can't be handled because it's not clear which projects should be retired, + # and they may be infeasible; they also don't fit with the idea that these tranches last forever + # * write all the construction steps into gen_predetermined_build + # * can't create construction plans in import_data because they must avoid rebuilding in occupied + # projects, which depends on asset life, which depends on tech_scen_id, not known till scenario_data runs + # * this approach could also be used to handle all the existing builds, instead of the current existing projects system + # * but we're back to an old problem then -- what about cases where these are floors but not upper limits, + # e.g., want to force in one CC plant, but open to having more than that? + # * could handle that by moving the predetermined part into a separate project, but then project definitions + # must depend on tech_forecast_scenario + + # NOTE: RESOLVE used different wind and solar profiles from Switch. + # Switch profiles seem to be more accurate, so we optimize against them + # and show that this may give (small) savings vs. the RESOLVE plan. + + # TODO: Should I use Switch to investigate how much of HECO's poor performance is due + # to using bad resource profiles (small onshore wind that doesn't rise in the rankings), + # how much is due to capping PV at 300 MW in 2020, + # how much is due to non-integrality in RESOLVE (fixed by later jimmying by HECO), and + # how much is due to forcing in elements before and after the optimization? + + # TODO (maybe): set project-specific targets, so that DistPV targets can be spread among tranches + # and specific projects in the PSIP can be represented accurately (really just NPM wind). This + # might also allow reconstruction of exactly the same existing or PSIP project when retired + # (as specified in the PSIP). Currently the code below lets Switch choose the best project with the + # same technology when it replaces retired renewable projects. + + # targets for individual generation technologies + # (year, technology, MW added) + # For storage technologies with flexible energy value (no + # gen_storage_energy_to_power_ratio provided), MW added should be replaced + # by a tuple of (MW, hours). + + # Technologies that are forecasted to be built in "most-likely" scenarios. + # These apply whenever this module is used, even if rest of PSIP plan is + # ignored by turning off psip flag. Like PSIP targets, these are assumed + # to be rebuilt at retirement until the end of the study. + # NOTE(""" + # Need to get Switch to model solar+storage using normal storage module; + # model AC limit and allow unlimited DC on back side. Then use this to + # model RFP PV+BESS and forecasted DGPV+DESS. + # """) + + tech_group_targets_definite = [ + # HECO June 2018 forecast, saved on shared drive in PBR docket + # See the following: + # email from Doug Codiga 11/19/19: "FW: October RWG and PWG Meeting Follow-Ups" + # forecasts stored in https://drive.google.com/open?id=1ToL7x-m17M2t0Cfd5k6w8no0rDiPy31l + # "/s/data/Generator Info/HECO Dist PV Forecast Jun 2018.xlsx" + # We assume all DistPV and DistBattery are used efficiently/optimally, + # i.e., we do not attempt to model non-optimal pairing of DistPV with + # DistBattery or curtailment on self-supply tariffs. + # NOTE: HECO sent a new forecast on 2020-03-18 (see email from Murray + # Clay at Ulupono that day), but we don't use it because it seems + # unrealistic. (See email from Murray Clay (Ulupono) 2020-04-14 13:58 + # and /s/data/Generator Info/HECO Dist PV Forecast 2018-03-17.xlsx) + # NOTE: the HECO forecast for 2020 is below actual installed capacity + # (in Existing Plant Data.xlsx); so we just apply the HECO additions on + # top of the existing capacity, which gives total installed capacity + # 112 MW (PV) and 6.1 MW (Battery) higher than the HECO forecast. + (2021, "DistPV", 29.51, "DER forecast"), + (2022, "DistPV", 22.835, "DER forecast"), + (2023, "DistPV", 19.168, "DER forecast"), + (2024, "DistPV", 23.087, "DER forecast"), + (2025, "DistPV", 24.322, "DER forecast"), + (2026, "DistPV", 25.888, "DER forecast"), + (2027, "DistPV", 27.24, "DER forecast"), + (2028, "DistPV", 28.387, "DER forecast"), + (2029, "DistPV", 29.693, "DER forecast"), + (2030, "DistPV", 30.522, "DER forecast"), + (2031, "DistPV", 31.32, "DER forecast"), + (2032, "DistPV", 32.234, "DER forecast"), + (2033, "DistPV", 32.42, "DER forecast"), + (2034, "DistPV", 32.98, "DER forecast"), + (2035, "DistPV", 33.219, "DER forecast"), + (2036, "DistPV", 32.785, "DER forecast"), + (2037, "DistPV", 33.175, "DER forecast"), + (2038, "DistPV", 33.011, "DER forecast"), + (2039, "DistPV", 33.101, "DER forecast"), + (2040, "DistPV", 33.262, "DER forecast"), + (2041, "DistPV", 33.457, "DER forecast"), + (2042, "DistPV", 33.343, "DER forecast"), + (2043, "DistPV", 34.072, "DER forecast"), + (2044, "DistPV", 34.386, "DER forecast"), + (2045, "DistPV", 35.038, "DER forecast"), + # note: HECO provides a MWh forecast; we assume inverters are large + # enough to charge in 4h + (2021, "DistBattery", (12.968, 4), "DER forecast"), + (2022, "DistBattery", (9.693, 4), "DER forecast"), + (2023, "DistBattery", (3.135, 4), "DER forecast"), + (2024, "DistBattery", (3.732, 4), "DER forecast"), + (2025, "DistBattery", (4.542, 4), "DER forecast"), + (2026, "DistBattery", (5.324, 4), "DER forecast"), + (2027, "DistBattery", (6.115, 4), "DER forecast"), + (2028, "DistBattery", (6.719, 4), "DER forecast"), + (2029, "DistBattery", (7.316, 4), "DER forecast"), + (2030, "DistBattery", (7.913, 4), "DER forecast"), + (2031, "DistBattery", (8.355, 4), "DER forecast"), + (2032, "DistBattery", (8.723, 4), "DER forecast"), + (2033, "DistBattery", (9.006, 4), "DER forecast"), + (2034, "DistBattery", (9.315, 4), "DER forecast"), + (2035, "DistBattery", (9.49, 4), "DER forecast"), + (2036, "DistBattery", (9.556, 4), "DER forecast"), + (2037, "DistBattery", (9.688, 4), "DER forecast"), + (2038, "DistBattery", (9.777, 4), "DER forecast"), + (2039, "DistBattery", (9.827, 4), "DER forecast"), + (2040, "DistBattery", (9.874, 4), "DER forecast"), + (2041, "DistBattery", (9.939, 4), "DER forecast"), + (2042, "DistBattery", (10.098, 4), "DER forecast"), + (2043, "DistBattery", (10.238, 4), "DER forecast"), + (2044, "DistBattery", (10.37, 4), "DER forecast"), + (2045, "DistBattery", (10.478, 4), "DER forecast"), + # Mauka Fit 1 and Na Pua Makani are scheduled to come online in 2020 but + # are still under construction as of 8/7/20 according to + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # In the HECO plan (Docket 2018-0088 HECO SOP 2, Exhibit P2), Mauka Fit + # 1 is online in 2021 and Na Pua Makani and CBRE Phase 1 are online in + # 2020. Since none of these are online by Aug. 2020, we model them as + # starting 1/1/2021. + # NOTE: PSIP Figure J-10 says FIT projects (Mauka FIT and Aloha Solar II (in Existing Plant Data) are in addition to the customer DGPV + # adoption forecast, but they are not in "HECO construction plan 2020-03-17.docx". + # Samantha Ruiz (Ulupono) recommended in email 5/26/20 to count them as + # non-DER esp. since HECO's March 2020 DER forecast is flat in early + # years. Note: these are probably fixed-axis rather than tracking (i.e., + # more like DistPV than LargePV), but we include them as LargePV because + # they don't reduce available roof inventory. + # NOTE: Mauka FIT and Na Pua Makani are at particular locations but we + # include them here because counting them as existing capacity in 2021 + # would block construction of additional generators in 2021. + (2021, "LargePV", 3.5, "Mauka FIT 1"), # Mauka FIT 1 + # Na Pua Makani (NPM) wind + # Reported as 24 MW in + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # (accessed 10/22/19) but 27 MW on + # https://www.napuamakanihawaii.org/fact-sheet/. HECO confirmed by email + # that it is 24 MW. + (2021, "OnshoreWind", 24, "Na Pua Makani"), + # CBRE wind and PV + # Final order given allowing HECO to proceed with standardized contracts + # in June 2018: https://cca.hawaii.gov/dca/files/2018/07/Order-No-35560-HECO-CBRE.pdf + # "At the ten-month milestone [June 2019], three projects have half-executed standard + # form contracts ("SFCs") and interconnection agreements." None had subscribers or were + # under construction at this point. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19G15A93031F00794 + # In Oct. 2019, HECO's website said it had agreement(s) in place for 4990 kW + # of the 5000 MW solar allowed in Phase 1, with 330 kW in queue. I think the + # June 2018 D&O said this will roll over to Phase 2. No mention of wind on + # the HECO program website. + # https://www.hawaiianelectric.com/products-and-services/customer-renewable-programs/community-solar + # According to HECO press release, the first phase includes (only) 8 MW + # of solar on all islands (5 MW on Oahu). Other techs will be included + # in phase 2, which will begin "about two years" from 7/2018. + # https://www.hawaiianelectric.com/regulators-approve-community-solar-plans + # In 11/19/19 data sharing, HECO reported "One project for CBRE Phase 1 + # on O'ahu is slated to be installed by Q4 of 2019. Five Phase 1 + # projects are estimated to be installed in 2020 (one in Q2 2020 and + # four in Q4 2020). Lastly, two projects are estimated to be installed + # in Q3 of 2021.". In heco_outlook_2019 we broke these up into + # installations in 2019, 2020 and 2021. In "HECO construction plan 2020-03-17.docx" + # they treat them all as being installed in 2020. + # As of 8/7/2020, HECO website (https://www.hawaiianelectric.com/products-and-services/customer-renewable-programs/community-solar/cbre-phase-1) + # says "Some projects for Phase 1 are still in development under the + # original rules. Hawaiian Electric expects two Phase 1 CBRE projects to + # go online in 2020 Q4. The unallocated capacity from Phase 1 has been + # rolled over into Phase 2. No Phase 1 capacity is currently available." + # We haven't found names, sizes or installation dates for individual + # projects so we can't easily include them in Existing Capacity. So to + # simplify modeling we move the online date to 2021. + (2021, "LargePV", 5, "CBRE Phase 1"), # CBRE Phase 1 + # Original CBRE program design had only 72 MW in phase 1 and 2 (leaving + # 64 MW for phase 2), but HECO suggested increasing this to 235 MW over + # 5 years. HECO said this was because of projected shortfalls in DER + # program. Joint Parties say it should be possible to accept all of this + # earlier and expand the program if it goes quickly, and this should not + # be used to limit DER adoption. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H20B01349C00185 + # **** questions: + # **** Should we reduce DER forecast in light of HECO's projected shortfall reported in CBRE proceeding? + # **** How much solar should we expect on Oahu in CBRE Phase 2 and when? + # **** Do we expect any wind on Oahu in CBRE Phase 2, and if so, when? + # In heco_outlook_2019, we used 150 MW in 2022 as a placeholder Oahu CBRE Phase 2. + # In this version, we switch to 43.5 in 2025, as shown in "HECO construction plan 2020-03-17.docx" + # This is in addition to RFPs noted below. + # (2025, 'LargePV', 43.5), # CBRE Phase 2 + # According to Murray Clay email 4/14/20, PUC Order No. 37070 in Docket + # 2015-0389 specified Oahu Phase 2 as 170 MW. "In tranche 1 there is an + # RFP process for 75 MW and 15 MW through an expedited (small project + # process). Tranche 2 is again 75 MW for RFP and 5 MW for expedited + # small projects for the 170 MW Oahu total. I think the tariff for the + # expedited small projects has to be filed in Sept 2020. RFP 1 is + # second half of 2020 and RFP 2 is second half of 2021. ... CBRE often + # takes a lot of time to be deployed... + # Based on this and later discussion with Ulupono (Samantha Ruiz email + # 2020-05-26 23:15), we adopted the CBRE phase 2 forecast below: + ( + 2023, + "LargePV", + 15, + "CBRE phase 2, small", + ), # small, expedited procurement in order + ( + 2024, + "LargePV", + 5, + "CBRE phase 2, small", + ), # small, expedited procurement in order + (2025, "LargePV", 150, "CBRE phase 2"), # larger, slower + # 2018-2019 RFPs (docket 2017-0352) + # These replace large PV and bulk batteries reported in PSIP for 2020 and 2022. + # TODO: maybe move these to existing plants tables + # "On March 25, 2019, the commission approved six ... grid-scale, + # solar-plus-storage projects.... Cumulatively, the projects will add 247 + # megawatts ("MW") of solar energy with almost 1 gigawatt hour of + # storage to the HECO Companies' grids." + # -- D&O 36604, https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19J10A90756F00117 + # First 6 approved projects (dockets 2018-0430, -0431, -0432, -0434, -0435, and -0436) are listed at + # -- https://www.hawaiianelectric.com/six-low-priced-solar-plus-storage-projects-approved-for-oahu-maui-and-hawaii-islands + # On 8/20/19, PUC approved 7th project, 12.5 MW/50 MWh AES solar+storage (docket 2019-0050, order 36480) + # -- https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H21B03929E00301 + # -- https://www.hawaiianelectric.com/puc-approves-grid-scale-solar-project-in-west-oahu + # As of 10/22/19, 8th project, 15 MW/60 MWh solar+storage on Maui, is still under review (docket 2018-0433) + # In an email to Ulupono May 23, 2020, Rod Aoki (HECO) said all RFP 1 + # projects would be online at the end of 2022. However, HECO project + # status board + # (https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board, + # 2020-02-27 - 2020-08-07) says they all come online in 2021. "HECO + # construction plan 2020-03-17.docx" and HECO SOP 2 Exhibit P2 + # (https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A20F19A83157F00805) + # sort of split the difference -- AES West Oahu online in 2021, others + # in 2022. So we use that as the best estimate for modeling. + (2021, "LargePV", 12.5, "RFP stage 1"), # AES West Oahu Solar + (2022, "LargePV", 52, "RFP stage 1"), # Hoohana Solar 1 + (2022, "LargePV", 39, "RFP stage 1"), # Mililani I Solar + (2022, "LargePV", 36, "RFP stage 1"), # Waiawa Solar + # storage associated with large PV projects; we assume this will be used + # efficiently, so we model it along with other large-scale storage. + (2021, "Battery_Bulk", (12.5, 4), "RFP stage 1"), # AES West Oahu Solar + (2022, "Battery_Bulk", (52, 4), "RFP stage 1"), # Hoohana Solar 1 + (2022, "Battery_Bulk", (39, 4), "RFP stage 1"), # Mililani I Solar + (2022, "Battery_Bulk", (36, 4), "RFP stage 1"), # Waiawa Solar + # Oahu RFP Stage 2 projects, retrieved 2020-08-06 from + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # We assume they come online at _end_ of year (start of next year). + # See "data/Generator Info/HECO RFP Stage 2 summary 2020-08-06.xlsx" to generate this code. + # Also see https://www.hawaiianelectric.com/hawaiian-electric-selects-16-projects-in-largest-quest-for-renewable-energy-energy-storage-for-3-islands + # (That included two more projects which were subsequently withdrawn.) + (2023, "LargePV", 60, "RFP stage 2"), # Kupehau Solar + (2023, "LargePV", 42, "RFP stage 2"), # Kupono Solar + (2024, "LargePV", 15, "RFP stage 2"), # Barbers Point Solar + (2024, "LargePV", 120, "RFP stage 2"), # Mahi Solar + (2024, "LargePV", 7, "RFP stage 2"), # Mountain View Solar + (2024, "LargePV", 30, "RFP stage 2"), # Waiawa Phase 2 Solar + (2023, "Battery_Bulk", (185, 3.054), "RFP stage 2"), # Kapolei Energy Storage + (2023, "Battery_Bulk", (60, 4), "RFP stage 2"), # Kupehau Solar + (2023, "Battery_Bulk", (42, 4), "RFP stage 2"), # Kupono Solar + (2024, "Battery_Bulk", (15, 4), "RFP stage 2"), # Barbers Point Solar + (2024, "Battery_Bulk", (120, 4), "RFP stage 2"), # Mahi Solar + (2024, "Battery_Bulk", (7, 5), "RFP stage 2"), # Mountain View Solar + (2024, "Battery_Bulk", (30, 8), "RFP stage 2"), # Waiawa Phase 2 Solar + # NOTE: Samantha Ruiz (Ulupono) email 2020-05-21 and 5/26/20 14:36 says + # PUC directed HECO to install these by 2022, but she thinks 2023-24 is + # more likely. In email to Ulupono 5/23/20, Rod Aoki (HECO) said all RFP + # 2 projects would come online at end of 2025 (forwarded by Murray Clay, + # Ulupono, 5/23/20). + # Note: HECO said in "HECO construction plan 2020-03-17.docx" that RFP 2 + # would add 1,300 GWh/year in 2025; their renewable project status board + # (10/2019-5/2020) says the same amount in 2022-25. (Proposals were due + # 11/5/19 for up to this amount.) This would be about 560 GW (calculation + # below), or 594 GWh according to HECO + # https://www.hawaiianelectric.com/hawaiis-largest-renewable-energy-push-detailed-in-new-procurement-plan + # That is larger than what they ended up procuring. Original plan could + # also have been a mix of wind and solar, but final procurement was only + # solar. + # avg. cap factor for 560 MW starting after 390 best MW have been installed + # (existing projects + FIT + CBRE 1 + half of CBRE 2 + RFP 1) is 26.6%; see + # "select site, max_capacity, avg(cap_factor) from cap_factor natural join project where technology = 'CentralTrackingPV' group by 1, 2 order by 3 desc;" + # and (120*.271+247*.265+193*.264)/(120+247+193) + # Then (1,300,000 MWh/y)/(.266 * 8766 h/y) = 558 MW + # Apply an extra chunk of PV in 2025. This could be done for either of + # two reasons: + # (a) If there is no forecast for 2025, then Switch would build a lot + # in 2025 and then we would interpolate that to 2023-25. This would be + # an unrealistic installatino rate and would clobber the 2023-24 + # forecasts. + # (b) If there is a forecast for 2025 that is lower than 2024 and lower + # than the 2026 value chosen by Switch (0.2 * 2030 value), then that + # leaves a weird gap that we want to fill. (For now, we handle that case + # by potentially interpolating back from 2030 to 2025 instead of 2026.) + # (2025, 'LargePV', 50), + # PSIP 2016-12-23 Table 4-1 included 90 MW of contingency battery in 2019 + # and https://www.hawaiianelectric.com/documents/clean_energy_hawaii/selling_power_to_the_utility/competitive_bidding/20190207_tri_company_future_procurement.pdf + # says the 2016-12 plan was to do 70 MW contingency in 2019 and more contingency/regulation in 2020 + # There has been no further discussion of these as of 10/22/19, so we assume they are + # replaced by storage that comes with the PV systems. + # PSIP 2016: (2019, 'Battery_Conting', 90), + ] + [ + # Assume no new distributed generation or batteries after 2045 + # (we need some forecast to avoid picking winners between large PV + # and dist PV, and forecasting continuous increases in distpv would + # be redundant with already adequate large-renewables) + (y, t, 0.0, "late freeze") + for y in range(2046, 2060) + for t in ["DistPV", "DistBattery"] + ] + # No new generation in early years beyond what's shown above + # (this will also block construction of these techs in all years if the + # --psip-force flag is set) + tech_group_targets_definite += [ + (y, t, 0.0, "early freeze") + for techs, years in [ + (("OnshoreWind", "OffshoreWind", "LargePV"), range(2020, 2025 + 1)), + ( + ( + "IC_Barge", + "IC_MCBH", + "IC_Schofield", + "CC_152", + "Battery_Conting", + "Battery_Reg", + ), + range(2020, 2023 + 1), + ), + ] + for t in techs + for y in years + ] + + if m.options.psip_no_additional_onshore_wind: + tech_group_targets_definite += [ + (y, "OnshoreWind", 0.0, "block onshore wind") for y in range(2020, 2056) + ] + + # add targets specified on the command line + # TODO: allow repeated invocation + if m.options.force_build is not None: + b = list(m.options.force_build) + build = ( + int(b[0]), # year + b[1], # tech + # quantity + float(b[2]) if len(b) == 3 else (float(b[2]), float(b[3])), + "manual override", + ) + print("Forcing build: {}".format(build)) + tech_group_targets_definite.append(build) + + # technologies proposed in "HECO construction plan 2020-03-17.docx" but which may not be built if a better plan is found. + tech_group_targets_psip = [ + (2026, "CC_152", 150.586, "HECO plan 3/17/20"), + (2028, "CC_152", 150.586, "HECO plan 3/17/20"), + (2030, "Battery_Bulk", (165, 4), "HECO plan 3/17/20"), + (2032, "CC_152", 2 * 150.586, "HECO plan 3/17/20"), + (2035, "Battery_Bulk", (168, 4), "HECO plan 3/17/20"), + (2040, "LargePV", 280, "HECO plan 3/17/20"), + (2040, "Battery_Bulk", (420, 4), "HECO plan 3/17/20"), + (2045, "LargePV", 1180, "HECO plan 3/17/20"), + (2045, "Battery_Bulk", (1525, 4), "HECO plan 3/17/20"), + ( + 2045, + "IC_Barge", + 4 * 16.786392, + "HECO plan 3/17/20", + ), # proxy for 4*17 MW of generic ICE capacity + # RESOLVE modeled 4-hour batteries as being capable of providing reserves, + # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). + # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). + # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not + # from EVs or flexible demand. + # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled + # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? + # (no separate sign of EVs). + # TODO: check Resolve load levels against Switch. + # TODO: maybe I should switch over to using the ABC curves and load profiles that HECO used with PLEXOS + # (for all islands). + # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? + # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. + ] + + if psip: + if m.options.psip_relax_after is not None: + # NOTE: this could be moved later, if we want this flag to relax + # both the definite and psip targets + psip_targets = [ + t for t in tech_group_targets_psip if t[0] <= m.options.psip_relax_after + ] + else: + psip_targets = tech_group_targets_psip.copy() + tech_group_targets = tech_group_targets_definite + psip_targets + else: + # must make a copy here so that rebuilds will be added to + # tech_group_targets but not tech_group_targets_definite + tech_group_targets = tech_group_targets_definite.copy() + + # Show which technologies can contribute to the target for each technology + # group and which group each technology contributes to + techs_for_tech_group = { + "DistPV": ["DistPV", "SlopedDistPV", "FlatDistPV"], + "LargePV": ["CentralTrackingPV", "CentralFixedPV"], + } + # use the rest as-is + missing_techs = {t for y, t, s, l in tech_group_targets}.difference( + techs_for_tech_group.keys() + ) + techs_for_tech_group.update({t: [t] for t in missing_techs}) + # create a reverse mapping + tech_tech_group = { + tech: tech_group + for tech_group, techs in techs_for_tech_group.items() + for tech in techs + } + + # Rebuild renewable projects and forecasted technologies at retirement. + # In the future we may be able to simplify this by enforcing capacity targets + # instead of construction targets. + + # note: this behavior is consistent with the following: + # discussion on p. 3-8 of PSIP 2016-12-23 vol. 1. + # Resolve applied planned wind and solar as set levels through 2045, not set additions in each year. + # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Theme 5 + # show optional capacity built in 2020 or 2025 (in list below) continuing in service in 2045. + # and Plexos input files in data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/PSIP Max Capacity.csv + # don't show any retirements of wind and solar included as "planned" in RESOLVE and "existing" in Switch + # (Waivers PV1, West Loch; Kawailoa may be omitted?) + # also note: Plexos input files in XX + # show max battery capacity equal to sum of all prior additions + + # m = lambda: 3; m.options = m; m.options.inputs_dir = '/Users/matthias/Dropbox/Research/Ulupono/Enovation Model/pbr_scenario/inputs' + gen_info = pd.read_csv(os.path.join(m.options.inputs_dir, "gen_info.csv")) + gen_info["tech_group"] = gen_info["gen_tech"].map(tech_tech_group) + gen_info = gen_info[gen_info["tech_group"].notna()] + # existing technologies are also subject to rebuilding + existing_techs = ( + pd.read_csv( + os.path.join(m.options.inputs_dir, "gen_build_predetermined.csv"), + na_values=["."], + ) + .merge(gen_info, how="inner") + .groupby(["build_year", "tech_group"])[ + ["build_gen_predetermined", "build_gen_energy_predetermined"] + ] + .agg(lambda x: x.sum(skipna=False)) + .reset_index() + ) + ages = gen_info.groupby("tech_group")["gen_max_age"].agg(["min", "max", "mean"]) + assert all(ages["min"] == ages["max"]), "Some psip technologies have mixed ages." + last_period = pd.read_csv(os.path.join(m.options.inputs_dir, "periods.csv")).iloc[ + -1, 0 + ] + + # rebuild all renewables and batteries in place before the start of the study, + # plus any technologies with targets specified here + rebuildable_targets = [ + (y, t, (mw if isnan(mwh) else (mw, mwh / mw)), "existing") + for i, y, t, mw, mwh in existing_techs.itertuples() + if is_renewable(t) or is_battery(t) + ] + tech_group_targets + tech_life = dict() + for build_year, tech_group, cap, label in rebuildable_targets: + if tech_group not in ages.index: + raise ValueError( + "A target has been specified for {} but there are no matching " + "technologies in gen_info.csv.".format(tech_group) + ) + max_age = ages.loc[tech_group, "mean"] + tech_life[tech_group] = max_age + rebuild_year = build_year + max_age + while rebuild_year <= last_period: + tech_group_targets.append( + (rebuild_year, tech_group, cap, "rebuild " + label) + ) + rebuild_year += max_age + del gen_info, existing_techs, ages, rebuildable_targets + + # we also convert to normal python datatypes to support serialization + tech_group_power_targets = [ + (int(y), t, float(q[0] if type(q) is tuple else q), l) + for y, t, q, l in tech_group_targets + ] + tech_group_energy_targets = [ + (int(y), t, float(q[0] * q[1]), l) + for y, t, q, l in tech_group_targets + if type(q) is tuple + ] + + # import pdb; pdb.set_trace() + + m.FORECASTED_TECH_GROUPS = Set( + dimen=1, initialize=list(techs_for_tech_group.keys()) + ) + m.FORECASTED_TECH_GROUP_TECHS = Set( + m.FORECASTED_TECH_GROUPS, dimen=1, initialize=techs_for_tech_group + ) + m.FORECASTED_TECHS = Set(dimen=1, initialize=list(tech_tech_group.keys())) + m.tech_tech_group = Param( + m.FORECASTED_TECHS, within=Any, initialize=tech_tech_group + ) + + # make a list of renewable technologies + m.RENEWABLE_TECH_GROUPS = Set( + dimen=1, + initialize=m.FORECASTED_TECH_GROUPS, + filter=lambda m, tg: is_renewable(tg), + ) + + def tech_group_target(m, per, tech, targets): + """Find the amount of each technology that is targeted to be built + between the start of the previous period and the start of the current + period and not yet retired.""" + start = 0 if per == m.PERIODS.first() else m.PERIODS.prev(per) + end = per + target = sum( + q + for (tyear, ttech, q, l) in targets + if ttech == tech + and start < tyear + and tyear <= end + and tyear + tech_life[ttech] > end + ) + return target + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_power_targets) + + m.tech_group_power_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=Reals, initialize=rule + ) + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_energy_targets) + + m.tech_group_energy_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=Reals, initialize=rule + ) + + def MakeTechGroupDicts_rule(m): + # get unit sizes of all technologies + unit_sizes = m.tech_group_unit_size_dict = defaultdict(float) + for g, unit_size in m.gen_unit_size.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + if tech_group in unit_sizes: + if unit_sizes[tech_group] != unit_size: + raise ValueError( + "Generation technology {} uses different unit sizes for different projects." + ) + else: + unit_sizes[tech_group] = unit_size + # get predetermined capacity for all technologies + m.tech_group_predetermined_power_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_predetermined.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + m.tech_group_predetermined_power_cap_dict[tech_group, per] += cap + m.tech_group_predetermined_energy_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_energy_predetermined.items(): + tech = m.gen_tech[g] + if ( + tech in m.FORECASTED_TECHS + and g in m.STORAGE_GENS + and m.gen_storage_energy_to_power_ratio[g] == float("inf") + ): + tech_group = m.tech_tech_group[tech] + m.tech_group_predetermined_energy_cap_dict[tech_group, per] += cap + + m.MakeTechGroupDicts = BuildAction(rule=MakeTechGroupDicts_rule) + + # Find last date for which a definite target was specified for each tech group. + # This sets the last year when construction of a technology is fixed at a + # predetermined level in the "most-likely" (non-PSIP) cases. + # This ignores PSIP targets, since _all_ construction is frozen when those are + # used, and ignores reconstruction targets, because those just follow on from + # the early-years construction, and we don't want to freeze construction all + # the way through. + last_definite_target = dict() + for y, t, q, l in tech_group_targets_definite: + last_definite_target[t] = max(y, last_definite_target.get(t, 0)) + + # Save targets and group definitions for future reference + import json + + os.makedirs(m.options.outputs_dir, exist_ok=True) # avoid errors with new dir + with open(os.path.join(m.options.outputs_dir, "heco_outlook.json"), "w") as f: + json.dump( + { + "tech_group_power_targets": tech_group_power_targets, + "tech_group_energy_targets": tech_group_energy_targets, + "techs_for_tech_group": techs_for_tech_group, + "tech_tech_group": tech_tech_group, + "last_definite_target": last_definite_target, + }, + f, + indent=4, + ) + + # def build_tech_group_in_period(m, tech_group, period): + # """ + # How much capacity is added in this tech_group in this period? + # Returns literal 0 if and only if there are no matching projects. + # Otherwise returns a Pyomo expression. + # """ + # return sum( + # build_var[g, period] + # for g in m.GENERATION_PROJECTS + # if m.gen_tech[g] in m.FORECASTED_TECHS + # and m.tech_tech_group[m.gen_tech[g]] == tech_group + # and (g, period) in build_var, + # 0 + # ) + + # # allow extra solar in 2025, up to the point of straight-line additions + # # between 2025 and 2030 (inclusive) + # ####### We don't do this here, we just interpolate back from 2030 to 2025 + # ####### instead of 2026 (slighly less optimal, but much simpler) + # if last_definite_target['LargePV'] == 2025: + # last_definite_target['LargePV'] = 2024 # use target as lower bound in 2025 + # print("="*80) + # print("NOTE: Using HECO 2025 LargePV plan as lower bound, not fixed target.") + # print("="*80) + # ##### slack variable to allow 2025 to overshoot 20% of 2030 if needed + # m.SolarOvershoot2025 = Var(within=NonNegativeReals) + # def rule(m): + # build2025 = build_tech_group_in_period['LargePV', 2025] + # build2030 = build_tech_group_in_period['LargePV', 2030] + # ####### This doesn't work, needs a big-M constraint to force + # ####### build2025 to be below the max of the target or 0.2 * build2030 + # return build2025 - m.SolarOvershoot2025 <= 0.2 * build2030 + # m.Even_Increment_Solar_2025 = Constraint(rule=rule) + # else: + # raise ValueError( + # 'Expected last HECO target for LargePV to be in 2025, but it is in {}.' + # .format(last_definite_target['LargePV']) + # ) + + def tech_group_target_rule(m, per, tech_group, build_var, target): + """ + Enforce targets for each technology. + + with PSIP: build is zero except for tech_group_power_targets + (sum during each period or before first period) + without PSIP: build is == definite targets during time range when targets specified + build is >= target later; + Note: in the last case the target is the sum of targets between start of prior period and start of this one + """ + build = sum( + build_var[g, per] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] in m.FORECASTED_TECHS + and m.tech_tech_group[m.gen_tech[g]] == tech_group + and (g, per) in build_var + ) + + if isinstance(build, int) and build == 0: + # no matching projects found, left with literal 0 + if target == 0: + return Constraint.Skip + else: + raise ValueError( + "Target was set for {} in {}, but no matching projects are available.".format( + tech_group, per + ) + ) + + if psip and ( + m.options.psip_relax_after is None or per <= m.options.psip_relax_after + ): + # PSIP in effect: exactly match the target (possibly zero) + return build == target + elif per <= last_definite_target.get(tech_group, 0): + # PSIP not in effect, but a definite target is + return build == target + elif ( + m.options.psip_minimal_renewables and tech_group in m.RENEWABLE_TECH_GROUPS + ): + # Only build the specified amount of renewables, no more. + # This is used to apply the definite targets, but otherwise minimize renewable development. + return build == target + else: + # treat the target as a lower bound + return build >= target + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_power_target[per, tech_group] + + m.tech_group_predetermined_power_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildGen, target) + + m.Enforce_Tech_Group_Power_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_energy_target[per, tech_group] + + m.tech_group_predetermined_energy_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildStorageEnergy, target) + + m.Enforce_Tech_Group_Energy_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + if psip: + + def rule(m): + buildable_techs = set(m.gen_tech[g] for (g, y) in m.NEW_GEN_BLD_YRS) + if buildable_techs - set(m.FORECASTED_TECHS): + # TODO: automatically add zero-targets + m.logger.error( + "\nERROR: You need to provide at least one zero target for " + "each technology without targets in the PSIP to prevent it " + "from being built." + ) + return False + else: + return True + + m.Check_For_Buildable_Techs_Under_PSIP = BuildCheck(rule=rule) + + # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) + advanced_tech_vars = [ + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", + "BuildFuelCellMW", + ] + + def no_advanced_tech_rule_factory(v): + return lambda m, *k: (getattr(m, v)[k] == 0) + + for v in advanced_tech_vars: + try: + var = getattr(m, v) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) + except AttributeError: + pass # model doesn't have this var diff --git a/switch_model/hawaii/heco_plan_2020_06.py b/switch_model/hawaii/heco_plan_2020_06.py new file mode 100644 index 000000000..fc593418f --- /dev/null +++ b/switch_model/hawaii/heco_plan_2020_06.py @@ -0,0 +1,768 @@ +from __future__ import division +from __future__ import print_function +from collections import defaultdict +from textwrap import dedent +import os +from pyomo.environ import * +import pandas as pd +import time + +# This module represents HECO's outlook as described in their modeling work in +# March-June 2020. Use the --psip-force flag to apply the plan they specified +# for that work too. + +# See psip_2016_12 and heco_outlook_2020_06 for documentation of general structure + + +def TODO(note): + raise NotImplementedError(dedent(note).strip()) + + +def NOTE(note): + print("=" * 80) + print("{}:".format(__name__)) + print(dedent(note).strip()) + print("=" * 80) + print() + # time.sleep(2) + + +def define_arguments(argparser): + argparser.add_argument( + "--psip-force", + action="store_true", + default=False, + help="Force following of PSIP plans (building exact amounts of certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + argparser.add_argument( + "--psip-relax-after", + type=float, + default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.", + ) + + argparser.add_argument( + "--psip-allow-more-solar-2025", + action="store_true", + default=False, + help="Treat 2025 target for LargePV as lower limit, not exact target.", + ) + argparser.add_argument( + "--psip-no-additional-onshore-wind", + action="store_true", + default=False, + help="Don't allow construction of any onshore wind beyond the current plan.", + ) + + +def is_renewable(tech): + return any(txt in tech for txt in ("PV", "Wind", "Solar")) + + +def is_battery(tech): + return "battery" in tech.lower() + + +def define_components(m): + + # decide whether to enforce the PSIP preferred plan + # if an environment variable is set, that takes precedence + # (e.g., on a cluster to override options.txt) + psip_env_var = os.environ.get("USE_PSIP_PLAN") + if psip_env_var is None: + # no environment variable; use the --psip-relax flag + psip = m.options.psip_force + elif psip_env_var.lower() in ["1", "true", "y", "yes", "on"]: + psip = True + elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: + psip = False + else: + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) + + if m.options.verbose: + if psip: + print("Using PSIP construction plan.") + else: + print( + "Relaxing PSIP construction plan (optimizing around forecasted adoption)." + ) + + # make sure LNG is turned off + if ( + psip + and "LNG" in m.FUELS + and getattr(m.options, "force_lng_tier", []) != ["none"] + ): + raise RuntimeError( + "To match the PSIP with LNG available, you must use the lng_conversion " + 'module and set "--force-lng-tier none".' + ) + + tech_group_targets_definite = [ + # HECO seems to have left Pearl City Peninsula Solar Park out of their plan + # (they call it "other solar"), so we cancel it out here + (2020, "LargePV", -1, "missing Pearl City Solar"), + # HECO March 2020 forecast + # /s/data/Generator Info/HECO Dist PV Forecast 2018-03-17.xlsx + # We assume all DistPV and DistBattery are used efficiently/optimally, + # i.e., we do not attempt to model non-optimal pairing of DistPV with + # DistBattery or curtailment on self-supply tariffs. + (2020, "DistPV", 0, "DER forecast"), + (2021, "DistPV", 0, "DER forecast"), + (2022, "DistPV", 15.3, "DER forecast"), + (2023, "DistPV", 0, "DER forecast"), + (2024, "DistPV", 29.5, "DER forecast"), + (2025, "DistPV", 22.9, "DER forecast"), + (2026, "DistPV", 19.1, "DER forecast"), + (2027, "DistPV", 23.1, "DER forecast"), + (2028, "DistPV", 24.4, "DER forecast"), + (2029, "DistPV", 25.8, "DER forecast"), + (2030, "DistPV", 27.3, "DER forecast"), + (2031, "DistPV", 28.4, "DER forecast"), + (2032, "DistPV", 29.7, "DER forecast"), + (2033, "DistPV", 30.5, "DER forecast"), + (2034, "DistPV", 31.3, "DER forecast"), + (2035, "DistPV", 32.2, "DER forecast"), + (2036, "DistPV", 32.5, "DER forecast"), + (2037, "DistPV", 32.9, "DER forecast"), + (2038, "DistPV", 33.3, "DER forecast"), + (2039, "DistPV", 32.7, "DER forecast"), + (2040, "DistPV", 33.2, "DER forecast"), + (2041, "DistPV", 33, "DER forecast"), + (2042, "DistPV", 33.1, "DER forecast"), + (2043, "DistPV", 33.3, "DER forecast"), + (2044, "DistPV", 33.5, "DER forecast"), + (2045, "DistPV", 33.3, "DER forecast"), + # note: HECO provides a MWh forecast; we assume inverters are large + # enough to charge in 4h + (2020, "DistBattery", (240.85, 4), "DER forecast"), + (2021, "DistBattery", (0, 4), "DER forecast"), + (2022, "DistBattery", (18.1, 4), "DER forecast"), + (2023, "DistBattery", (0, 4), "DER forecast"), + (2024, "DistBattery", (13.675, 4), "DER forecast"), + (2025, "DistBattery", (11.625, 4), "DER forecast"), + (2026, "DistBattery", (7.7, 4), "DER forecast"), + (2027, "DistBattery", (9.1, 4), "DER forecast"), + (2028, "DistBattery", (10.325, 4), "DER forecast"), + (2029, "DistBattery", (10.15, 4), "DER forecast"), + (2030, "DistBattery", (11.425, 4), "DER forecast"), + (2031, "DistBattery", (12.325, 4), "DER forecast"), + (2032, "DistBattery", (12.825, 4), "DER forecast"), + (2033, "DistBattery", (12.5, 4), "DER forecast"), + (2034, "DistBattery", (13.325, 4), "DER forecast"), + (2035, "DistBattery", (13.875, 4), "DER forecast"), + (2036, "DistBattery", (14.175, 4), "DER forecast"), + (2037, "DistBattery", (13.25, 4), "DER forecast"), + (2038, "DistBattery", (14.275, 4), "DER forecast"), + (2039, "DistBattery", (14.4, 4), "DER forecast"), + (2040, "DistBattery", (13.925, 4), "DER forecast"), + (2041, "DistBattery", (13.325, 4), "DER forecast"), + (2042, "DistBattery", (13.825, 4), "DER forecast"), + (2043, "DistBattery", (14.475, 4), "DER forecast"), + (2044, "DistBattery", (13.725, 4), "DER forecast"), + (2045, "DistBattery", (13.075, 4), "DER forecast"), + # HECO feed-in tariff (FIT) projects under construction as of 10/22/19, from + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # NOTE: PSIP Figure J-10 says these are in addition to the customer DGPV + # adoption forecast but they are not in "HECO construction plan 2020-03-17.docx". + # Samantha Ruiz (Ulupono) recommended in email 5/26/20 to count them as + # non-DER esp. since HECO's March 2020 DER forecast is flat in early + # years. Note: these are probably fixed-axis rather than tracking (i.e., + # more like DistPV than LargePV), and they are at particular locations. + # But we include them here as LargePV and put them here instead of in + # existing projects because (a) they don't reduce available roof + # inventory and (b) counting them as existing but built in 2020 would + # block construction of additional large PV in 2020. + ( + 2021, + "LargePV", + 5, + "Aloha Solar II", + ), # Aloha Solar Energy Fund II, online 4/2/20 + (2021, "LargePV", 3.5, "Mauka FIT 1"), # Mauka FIT 1 + # note: Mauka FIT 1 and Na Pua Makani (below) are scheduled to come online + # in 2020, but they are not online yet as of 6/4/2020, so we model them + # as starting 1/1/2021. + # Na Pua Makani (NPM) wind + # 2018/24 MW in PSIP, but still under construction in late 2019; + # Reported as 24 MW to be online in 2020 in + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board (accessed 10/22/19) + # Listed as 27 MW with operation beginning by summer 2020 on https://www.napuamakanihawaii.org/fact-sheet/ + # TODO: Is Na Pua Makani 24 MW or 27 MW? + (2020, "OnshoreWind", 24, "Na Pua Makani"), + # PSIP 2016: (2018, 'OnshoreWind', 24), + # CBRE wind and PV + # Final order given allowing HECO to proceed with standardized contracts + # in June 2018: https://cca.hawaii.gov/dca/files/2018/07/Order-No-35560-HECO-CBRE.pdf + # "At the ten-month milestone [June 2019], three projects have half-executed standard + # form contracts ("SFCs") and interconnection agreements." None had subscribers or were + # under construction at this point. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19G15A93031F00794 + # In Oct. 2019, HECO's website said it had agreement(s) in place for 4990 kW + # of the 5000 MW solar allowed in Phase 1, with 330 kW in queue. I think the + # June 2018 D&O said this will roll over to Phase 2. No mention of wind on + # the HECO program website. + # https://www.hawaiianelectric.com/products-and-services/customer-renewable-programs/community-solar + # According to HECO press release, the first phase includes (only) 8 MW + # of solar on all islands (5 MW on Oahu). Other techs will be included + # in phase 2, which will begin "about two years" from 7/2018. + # https://www.hawaiianelectric.com/regulators-approve-community-solar-plans + # In 11/19/19 data sharing, HECO reported "One project for CBRE Phase 1 + # on O'ahu is slated to be installed by Q4 of 2019. Five Phase 1 + # projects are estimated to be installed in 2020 (one in Q2 2020 and + # four in Q4 2020). Lastly, two projects are estimated to be installed + # in Q3 of 2021.". In heco_outlook_2019 we broke these up into + # installations in 2019, 2020 and 2021, but in "HECO construction plan 2020-03-17.docx" + # they treat them all as being installed in 2020, so we do that now. + (2020, "LargePV", 5, "CBRE Phase 1"), # CBRE Phase 1 + # Original CBRE program design had only 72 MW in phase 1 and 2 (leaving + # 64 MW for phase 2), but HECO suggested increasing this to 235 MW over + # 5 years. HECO said this was because of projected shortfalls in DER + # program. Joint Parties say it should be possible to accept all of this + # earlier and expand the program if it goes quickly, and this should not + # be used to limit DER adoption. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H20B01349C00185 + # **** questions: + # **** Should we reduce DER forecast in light of HECO's projected shortfall reported in CBRE proceeding? + # **** How much solar should we expect on Oahu in CBRE Phase 2 and when? + # **** Do we expect any wind on Oahu in CBRE Phase 2, and if so, when? + # In heco_outlook_2019, we used 150 MW in 2022 as a placeholder Oahu CBRE Phase 2. + # In this version, we switch to 43.5 in 2025, as shown in "HECO construction plan 2020-03-17.docx" + # This is in addition to RFPs noted below. + # (2025, 'LargePV', 43.5), # CBRE Phase 2 + (2025, "LargePV", 43.5, "CBRE phase 2"), + # 2018-2019 RFPs (docket 2017-0352) + # These replace large PV and bulk batteries reported in PSIP for 2020 and 2022. + # TODO: maybe move these to existing plants tables + # "On March 25, 2019, the commission approved six ... grid-scale, + # solar-plus-storage projects.... Cumulatively, the projects will add 247 + # megawatts ("MW") of solar energy with almost 1 gigawatt hour of + # storage to the HECO Companies' grids." + # -- D&O 36604, https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19J10A90756F00117 + # First 6 approved projects (dockets 2018-0430, -0431, -0432, -0434, -0435, and -0436) are listed at + # -- https://www.hawaiianelectric.com/six-low-priced-solar-plus-storage-projects-approved-for-oahu-maui-and-hawaii-islands + # On 8/20/19, PUC approved 7th project, 12.5 MW/50 MWh AES solar+storage (docket 2019-0050, order 36480) + # -- https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H21B03929E00301 + # -- https://www.hawaiianelectric.com/puc-approves-grid-scale-solar-project-in-west-oahu + # As of 10/22/19, 8th project, 15 MW/60 MWh solar+storage on Maui, is still under review (docket 2018-0433) + # Status of all approved projects and in-service data are listed at + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # They are also shown in "HECO construction plan 2020-03-17.docx". + # As of 2020-05-25, both sources say the first one will be online in + # 2021 and the other three will be online in 2022. But in an email to + # Ulupono May 23, 2020, Rod Aoki (HECO) said all RFP 1 projects would + # be online at the end of 2022. In an email to M Fripp 5/21/20, + # Samantha Ruiz (Ulupono) said they would likely come online in 2022. + # In an email to M Fripp 5/23/20, quoting the Rod Aoki email, Murray + # Clay (Ulupono) recommended they counting them as coming online at + # start of 2022, not end. Taking account of all this, we set them all + # to start in 2022. + (2021, "LargePV", 12.5, "RFP stage 1"), # AES West Oahu Solar + (2022, "LargePV", 52, "RFP stage 1"), # Hoohana Solar 1 + (2022, "LargePV", 39, "RFP stage 1"), # Mililani I Solar + (2022, "LargePV", 36, "RFP stage 1"), # Waiawa Solar + # storage associated with large PV projects; we assume this will be used + # efficiently, so we model it along with other large-scale storage. + (2021, "Battery_Bulk", (12.5, 4), "RFP stage 1"), # AES West Oahu Solar + (2022, "Battery_Bulk", (52, 4), "RFP stage 1"), # Hoohana Solar 1 + (2022, "Battery_Bulk", (39, 4), "RFP stage 1"), # Mililani I Solar + (2022, "Battery_Bulk", (36, 4), "RFP stage 1"), # Waiawa Solar + # 200 MW / 6 hour BESS in HECO Phase 2 SOP Exhibit P2 Attachment 1 + (2022, "Battery_Bulk", (200, 6), "HECO plan"), + # Note: HECO said in "HECO construction plan 2020-03-17.docx" that RFP 2 + # would add 1,300 GWh/year in 2025; their renewable project status board + # (10/2019-5/2020) says the same amount in 2022-25. + # PBR Phase 2 SOP Attachment 1 says this too. + # We think this would be 560 MW, but they think it is 594 MW (see p. 9 of Exhibit A of Dkt 2018-0088 2020-06-18 HECO Phase 2 SOP.pdf) + # We use 594 MW, because that meshes better with the total MW reported in their plan. + (2025, "LargePV", 594, "RFP stage 2"), + # avg. cap factor for 560 MW starting after 390 best MW have been installed + # (existing projects + FIT + CBRE 1 + half of CBRE 2 + RFP 1) is 26.6%; see + # "select site, max_capacity, avg(cap_factor) from cap_factor natural join project where technology = 'CentralTrackingPV' group by 1, 2 order by 3 desc;" + # and (120*.271+247*.265+193*.264)/(120+247+193) + # Then (1,300,000 MWh/y)/(.266 * 8766 h/y) = 558 MW + # PSIP 2016-12-23 Table 4-1 included 90 MW of contingency battery in 2019 + # and https://www.hawaiianelectric.com/documents/clean_energy_hawaii/selling_power_to_the_utility/competitive_bidding/20190207_tri_company_future_procurement.pdf + # says the 2016-12 plan was to do 70 MW contingency in 2019 and more contingency/regulation in 2020 + # There has been no further discussion of these as of 10/22/19, so we assume they are + # replaced by storage that comes with the PV systems. + # PSIP 2016: (2019, 'Battery_Conting', 90), + ] + [ + # Assume no new distributed generation or batteries after 2045 + # (we need some forecast to avoid picking winners between large PV + # and dist PV, and forecasting continuous increases in distpv would + # be redundant with already adequate large-renewables) + (y, t, 0.0, "late freeze") + for y in range(2046, 2060) + for t in ["DistPV", "DistBattery"] + ] + # No new generation in early years beyond what's shown above + # (this will also block construction of these techs in all years if the + # --psip-force flag is set) + tech_group_targets_definite += [ + (y, t, 0.0, "early freeze") + for techs, years in [ + (("OnshoreWind", "OffshoreWind", "LargePV"), range(2020, 2025 + 1)), + ( + ( + "IC_Barge", + "IC_MCBH", + "IC_Schofield", + "CC_152", + "Battery_Conting", + "Battery_Reg", + ), + range(2020, 2023 + 1), + ), + ] + for t in techs + for y in years + ] + + if m.options.psip_no_additional_onshore_wind: + tech_group_targets_definite += [ + (y, "OnshoreWind", 0.0, "block onshore wind") for y in range(2020, 2056) + ] + + # add targets specified on the command line + # TODO: allow repeated invocation + if m.options.force_build is not None: + b = list(m.options.force_build) + build = ( + int(b[0]), # year + b[1], # tech + # quantity + float(b[2]) if len(b) == 3 else (float(b[2]), float(b[3])), + "manual override", + ) + print("Forcing build: {}".format(build)) + tech_group_targets_definite.append(build) + + # technologies proposed in "HECO construction plan 2020-03-17.docx" but which may not be built if a better plan is found. + tech_group_targets_psip = [ + (2026, "CC_152", 150.586, "HECO plan 3/17/20"), + (2028, "CC_152", 150.586, "HECO plan 3/17/20"), + (2030, "Battery_Bulk", (165, 4), "HECO plan 3/17/20"), + (2032, "CC_152", 2 * 150.586, "HECO plan 3/17/20"), + (2035, "Battery_Bulk", (168, 4), "HECO plan 3/17/20"), + (2040, "LargePV", 280, "HECO plan 3/17/20"), + (2040, "Battery_Bulk", (420, 4), "HECO plan 3/17/20"), + (2045, "LargePV", 1180, "HECO plan 3/17/20"), + (2045, "Battery_Bulk", (1525, 4), "HECO plan 3/17/20"), + ( + 2045, + "IC_Barge", + 4 * 16.786392, + "HECO plan 3/17/20", + ), # proxy for 4*17 MW of generic ICE capacity + # RESOLVE modeled 4-hour batteries as being capable of providing reserves, + # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). + # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). + # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not + # from EVs or flexible demand. + # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled + # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? + # (no separate sign of EVs). + # TODO: check Resolve load levels against Switch. + # TODO: maybe I should switch over to using the ABC curves and load profiles that HECO used with PLEXOS + # (for all islands). + # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? + # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. + ] + + if psip: + if m.options.psip_relax_after is not None: + # NOTE: this could be moved later, if we want this flag to relax + # both the definite and psip targets + psip_targets = [ + t for t in tech_group_targets_psip if t[0] <= m.options.psip_relax_after + ] + else: + psip_targets = tech_group_targets_psip.copy() + tech_group_targets = tech_group_targets_definite + psip_targets + else: + # must make a copy here so that rebuilds will be added to + # tech_group_targets but not tech_group_targets_definite + tech_group_targets = tech_group_targets_definite.copy() + + # Show which technologies can contribute to the target for each technology + # group and which group each technology contributes to + techs_for_tech_group = { + "DistPV": ["DistPV", "SlopedDistPV", "FlatDistPV"], + "LargePV": ["CentralTrackingPV", "CentralFixedPV"], + } + # use the rest as-is + missing_techs = {t for y, t, s, l in tech_group_targets}.difference( + techs_for_tech_group.keys() + ) + techs_for_tech_group.update({t: [t] for t in missing_techs}) + # create a reverse mapping + tech_tech_group = { + tech: tech_group + for tech_group, techs in techs_for_tech_group.items() + for tech in techs + } + + # Rebuild renewable projects and forecasted technologies at retirement. + # In the future we may be able to simplify this by enforcing capacity targets + # instead of construction targets. + + # note: this behavior is consistent with the following: + # discussion on p. 3-8 of PSIP 2016-12-23 vol. 1. + # Resolve applied planned wind and solar as set levels through 2045, not set additions in each year. + # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Theme 5 + # show optional capacity built in 2020 or 2025 (in list below) continuing in service in 2045. + # and Plexos input files in data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/PSIP Max Capacity.csv + # don't show any retirements of wind and solar included as "planned" in RESOLVE and "existing" in Switch + # (Waivers PV1, West Loch; Kawailoa may be omitted?) + # also note: Plexos input files in XX + # show max battery capacity equal to sum of all prior additions + + # m = lambda: 3; m.options = m; m.options.inputs_dir = '/Users/matthias/Dropbox/Research/Ulupono/Enovation Model/pbr_scenario/inputs' + gen_info = pd.read_csv(os.path.join(m.options.inputs_dir, "gen_info.csv")) + gen_info["tech_group"] = gen_info["gen_tech"].map(tech_tech_group) + gen_info = gen_info[gen_info["tech_group"].notna()] + # existing technologies are also subject to rebuilding + existing_techs = ( + pd.read_csv(os.path.join(m.options.inputs_dir, "gen_build_predetermined.csv")) + .merge(gen_info, how="inner") + .groupby(["build_year", "tech_group"])["build_gen_predetermined"] + .sum() + .reset_index() + ) + assert not any( + is_battery(t) for i, y, t, q in existing_techs.itertuples() + ), "Must update {} to handle pre-existing batteries.".format(__name__) + ages = gen_info.groupby("tech_group")["gen_max_age"].agg(["min", "max", "mean"]) + assert all(ages["min"] == ages["max"]), "Some psip technologies have mixed ages." + last_period = pd.read_csv(os.path.join(m.options.inputs_dir, "periods.csv")).iloc[ + -1, 0 + ] + + # rebuild all renewables and batteries in place before the start of the study, + # plus any technologies with targets specified here + rebuildable_targets = [ + (y, t, q, "existing") + for i, y, t, q in existing_techs.itertuples() + if is_renewable(t) or is_battery(t) + ] + tech_group_targets + tech_life = dict() + for build_year, tech_group, cap, label in rebuildable_targets: + if tech_group not in ages.index: + raise ValueError( + "A target has been specified for {} but there are no matching " + "technologies in gen_info.csv.".format(tech_group) + ) + max_age = ages.loc[tech_group, "mean"] + tech_life[tech_group] = max_age + rebuild_year = build_year + max_age + while rebuild_year <= last_period: + tech_group_targets.append( + (rebuild_year, tech_group, cap, "rebuild " + label) + ) + rebuild_year += max_age + del gen_info, existing_techs, ages, rebuildable_targets + + # we also convert to normal python datatypes to support serialization + tech_group_power_targets = [ + (int(y), t, float(q[0] if type(q) is tuple else q), l) + for y, t, q, l in tech_group_targets + ] + tech_group_energy_targets = [ + (int(y), t, float(q[0] * q[1]), l) + for y, t, q, l in tech_group_targets + if type(q) is tuple + ] + + m.FORECASTED_TECH_GROUPS = Set( + dimen=1, initialize=list(techs_for_tech_group.keys()) + ) + m.FORECASTED_TECH_GROUP_TECHS = Set( + m.FORECASTED_TECH_GROUPS, dimen=1, initialize=techs_for_tech_group + ) + m.FORECASTED_TECHS = Set(dimen=1, initialize=list(tech_tech_group.keys())) + m.tech_tech_group = Param( + m.FORECASTED_TECHS, within=Any, initialize=tech_tech_group + ) + + # make a list of renewable technologies + m.RENEWABLE_TECH_GROUPS = Set( + dimen=1, + initialize=m.FORECASTED_TECH_GROUPS, + filter=lambda m, tg: is_renewable(tg), + ) + + def tech_group_target(m, per, tech, targets): + """Find the amount of each technology that is targeted to be built + between the start of the previous period and the start of the current + period and not yet retired.""" + start = 0 if per == m.PERIODS.first() else m.PERIODS.prev(per) + end = per + target = sum( + q + for (tyear, ttech, q, l) in targets + if ttech == tech + and start < tyear + and tyear <= end + and tyear + tech_life[ttech] > end + ) + return target + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_power_targets) + + m.tech_group_power_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=Reals, initialize=rule + ) + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_energy_targets) + + m.tech_group_energy_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=Reals, initialize=rule + ) + + def MakeTechGroupDicts_rule(m): + # get unit sizes of all technologies + unit_sizes = m.tech_group_unit_size_dict = defaultdict(float) + for g, unit_size in m.gen_unit_size.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + if tech_group in unit_sizes: + if unit_sizes[tech_group] != unit_size: + raise ValueError( + "Generation technology {} uses different unit sizes for different projects." + ) + else: + unit_sizes[tech_group] = unit_size + # get predetermined capacity for all technologies + m.tech_group_predetermined_power_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_predetermined.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + m.tech_group_predetermined_power_cap_dict[tech_group, per] += cap + m.tech_group_predetermined_energy_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_predetermined.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS and g in m.STORAGE_GENS: + # Need to get predetermined energy capacity here, but there's no + # param for it yet, so currently these can only be implemented + # as technologies with fixed gen_storage_energy_to_power_ratio, + # in which case users should only provide a power target, not + # an energy target in this file. In the future, there may be + # a way to provide predetermined power and energy params, so we + # watch out for that here. + if m.gen_storage_energy_to_power_ratio[g] == float("inf"): + TODO( + "Need to lookup predetermined energy capacity for storage technologies." + ) + # m.tech_group_predetermined_energy_cap_dict[tech_group, per] += + + m.MakeTechGroupDicts = BuildAction(rule=MakeTechGroupDicts_rule) + + # Find last date for which a definite target was specified for each tech group. + # This sets the last year when construction of a technology is fixed at a + # predetermined level in the "most-likely" (non-PSIP) cases. + # This ignores PSIP targets, since _all_ construction is frozen when those are + # used, and ignores reconstruction targets, because those just follow on from + # the early-years construction, and we don't want to freeze construction all + # the way through. + last_definite_target = dict() + for y, t, q, l in tech_group_targets_definite: + last_definite_target[t] = max(y, last_definite_target.get(t, 0)) + + # Save targets and group definitions for future reference + import json + + os.makedirs(m.options.outputs_dir, exist_ok=True) # avoid errors with new dir + with open(os.path.join(m.options.outputs_dir, "heco_outlook.json"), "w") as f: + json.dump( + { + "tech_group_power_targets": tech_group_power_targets, + "tech_group_energy_targets": tech_group_energy_targets, + "techs_for_tech_group": techs_for_tech_group, + "tech_tech_group": tech_tech_group, + "last_definite_target": last_definite_target, + }, + f, + indent=4, + ) + + # def build_tech_group_in_period(m, tech_group, period): + # """ + # How much capacity is added in this tech_group in this period? + # Returns literal 0 if and only if there are no matching projects. + # Otherwise returns a Pyomo expression. + # """ + # return sum( + # build_var[g, period] + # for g in m.GENERATION_PROJECTS + # if m.gen_tech[g] in m.FORECASTED_TECHS + # and m.tech_tech_group[m.gen_tech[g]] == tech_group + # and (g, period) in build_var, + # 0 + # ) + + # # allow extra solar in 2025, up to the point of straight-line additions + # # between 2025 and 2030 (inclusive) + # ####### We don't do this here, we just interpolate back from 2030 to 2025 + # ####### instead of 2026 (slighly less optimal, but much simpler) + # if last_definite_target['LargePV'] == 2025: + # last_definite_target['LargePV'] = 2024 # use target as lower bound in 2025 + # print("="*80) + # print("NOTE: Using HECO 2025 LargePV plan as lower bound, not fixed target.") + # print("="*80) + # ##### slack variable to allow 2025 to overshoot 20% of 2030 if needed + # m.SolarOvershoot2025 = Var(within=NonNegativeReals) + # def rule(m): + # build2025 = build_tech_group_in_period['LargePV', 2025] + # build2030 = build_tech_group_in_period['LargePV', 2030] + # ####### This doesn't work, needs a big-M constraint to force + # ####### build2025 to be below the max of the target or 0.2 * build2030 + # return build2025 - m.SolarOvershoot2025 <= 0.2 * build2030 + # m.Even_Increment_Solar_2025 = Constraint(rule=rule) + # else: + # raise ValueError( + # 'Expected last HECO target for LargePV to be in 2025, but it is in {}.' + # .format(last_definite_target['LargePV']) + # ) + + def tech_group_target_rule(m, per, tech_group, build_var, target): + """ + Enforce targets for each technology. + + with PSIP: build is zero except for tech_group_power_targets + (sum during each period or before first period) + without PSIP: build is == definite targets during time range when targets specified + build is >= target later; + Note: in the last case the target is the sum of targets between start of prior period and start of this one + """ + build = sum( + build_var[g, per] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] in m.FORECASTED_TECHS + and m.tech_tech_group[m.gen_tech[g]] == tech_group + and (g, per) in build_var + ) + + if isinstance(build, int) and build == 0: + # no matching projects found, left with literal 0 + if target == 0: + return Constraint.Skip + else: + raise ValueError( + "Target was set for {} in {}, but no matching projects are available.".format( + tech_group, per + ) + ) + + if psip and ( + m.options.psip_relax_after is None or per <= m.options.psip_relax_after + ): + # PSIP in effect: exactly match the target (possibly zero) + return build == target + elif per <= last_definite_target.get(tech_group, 0): + # PSIP not in effect, but a definite target is + return build == target + elif ( + m.options.psip_minimal_renewables and tech_group in m.RENEWABLE_TECH_GROUPS + ): + # Only build the specified amount of renewables, no more. + # This is used to apply the definite targets, but otherwise minimize renewable development. + return build == target + else: + # treat the target as a lower bound + return build >= target + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_power_target[per, tech_group] + + m.tech_group_predetermined_power_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildGen, target) + + m.Enforce_Tech_Group_Power_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_energy_target[per, tech_group] + + m.tech_group_predetermined_energy_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildStorageEnergy, target) + + m.Enforce_Tech_Group_Energy_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + if psip: + + def rule(m): + buildable_techs = set(m.gen_tech[g] for (g, y) in m.NEW_GEN_BLD_YRS) + if buildable_techs - set(m.FORECASTED_TECHS): + # TODO: automatically add zero-targets + m.logger.error( + "\nERROR: You need to provide at least one zero target for " + "each technology without targets in the PSIP to prevent it " + "from being built." + ) + return False + else: + return True + + m.Check_For_Buildable_Techs_Under_PSIP = BuildCheck(rule=rule) + + # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) + advanced_tech_vars = [ + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", + "BuildFuelCellMW", + ] + + def no_advanced_tech_rule_factory(v): + return lambda m, *k: (getattr(m, v)[k] == 0) + + for v in advanced_tech_vars: + try: + var = getattr(m, v) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) + except AttributeError: + pass # model doesn't have this var diff --git a/switch_model/hawaii/heco_plan_2020_08.py b/switch_model/hawaii/heco_plan_2020_08.py new file mode 100644 index 000000000..f5a3c0e93 --- /dev/null +++ b/switch_model/hawaii/heco_plan_2020_08.py @@ -0,0 +1,762 @@ +from __future__ import division +from __future__ import print_function +from collections import defaultdict +from textwrap import dedent +from math import isnan +import os +from pyomo.environ import * +import pandas as pd +import time + +# This module represents HECO's outlook as described in their modeling work in +# March-June 2020. Use the --psip-force flag to apply the plan they specified +# for that work too. + +# See psip_2016_12 and heco_outlook_2020_06 for documentation of general structure + + +def TODO(note): + raise NotImplementedError(dedent(note).strip()) + + +def NOTE(note): + print("=" * 80) + print("{}:".format(__name__)) + print(dedent(note).strip()) + print("=" * 80) + print() + # time.sleep(2) + + +def define_arguments(argparser): + argparser.add_argument( + "--psip-force", + action="store_true", + default=False, + help="Force following of PSIP plans (building exact amounts of certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + argparser.add_argument( + "--psip-relax-after", + type=float, + default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.", + ) + + argparser.add_argument( + "--psip-allow-more-solar-2025", + action="store_true", + default=False, + help="Treat 2025 target for LargePV as lower limit, not exact target.", + ) + argparser.add_argument( + "--psip-no-additional-onshore-wind", + action="store_true", + default=False, + help="Don't allow construction of any onshore wind beyond the current plan.", + ) + + +def is_renewable(tech): + return any(txt in tech for txt in ("PV", "Wind", "Solar")) + + +def is_battery(tech): + return "battery" in tech.lower() + + +def define_components(m): + + # decide whether to enforce the PSIP preferred plan + # if an environment variable is set, that takes precedence + # (e.g., on a cluster to override options.txt) + psip_env_var = os.environ.get("USE_PSIP_PLAN") + if psip_env_var is None: + # no environment variable; use the --psip-relax flag + psip = m.options.psip_force + elif psip_env_var.lower() in ["1", "true", "y", "yes", "on"]: + psip = True + elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: + psip = False + else: + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) + + if m.options.verbose: + if psip: + print("Using PSIP construction plan.") + else: + print( + "Relaxing PSIP construction plan (optimizing around forecasted adoption)." + ) + + # make sure LNG is turned off + if ( + psip + and "LNG" in m.FUELS + and getattr(m.options, "force_lng_tier", []) != ["none"] + ): + raise RuntimeError( + "To match the PSIP with LNG available, you must use the lng_conversion " + 'module and set "--force-lng-tier none".' + ) + + tech_group_targets_definite = [ + # HECO seems to have left Pearl City Peninsula Solar Park out of their plan + # (they call it "other solar"), so we cancel it out here + # (2021, 'LargePV', -1, 'missing Pearl City Solar'), + # Actually we don't, because it is uncancellable at this point; we just + # assume they proceed with the additional solar installations they report, on top of this. + # HECO March 2020 forecast + # /s/data/Generator Info/HECO Dist PV Forecast 2020-03-17.xlsx + # We assume all DistPV and DistBattery are used efficiently/optimally, + # i.e., we do not attempt to model non-optimal pairing of DistPV with + # DistBattery or curtailment on self-supply tariffs. + (2021, "DistPV", 0, "DER forecast"), + (2022, "DistPV", 0, "DER forecast"), + (2023, "DistPV", 0, "DER forecast"), + (2024, "DistPV", 0, "DER forecast"), + (2025, "DistPV", 0, "DER forecast"), + (2026, "DistPV", 0, "DER forecast"), + (2027, "DistPV", 0, "DER forecast"), + (2028, "DistPV", 7.3, "DER forecast"), + (2029, "DistPV", 25.8, "DER forecast"), + (2030, "DistPV", 27.3, "DER forecast"), + (2031, "DistPV", 28.4, "DER forecast"), + (2032, "DistPV", 29.7, "DER forecast"), + (2033, "DistPV", 30.5, "DER forecast"), + (2034, "DistPV", 31.3, "DER forecast"), + (2035, "DistPV", 32.2, "DER forecast"), + (2036, "DistPV", 32.5, "DER forecast"), + (2037, "DistPV", 32.9, "DER forecast"), + (2038, "DistPV", 33.3, "DER forecast"), + (2039, "DistPV", 32.7, "DER forecast"), + (2040, "DistPV", 33.2, "DER forecast"), + (2041, "DistPV", 33, "DER forecast"), + (2042, "DistPV", 33.1, "DER forecast"), + (2043, "DistPV", 33.3, "DER forecast"), + (2044, "DistPV", 33.5, "DER forecast"), + (2045, "DistPV", 33.3, "DER forecast"), + # note: HECO provides a MWh forecast; we assume inverters are large + # enough to charge in 4h + (2021, "DistBattery", (0, 4), "DER forecast"), + (2022, "DistBattery", (0, 4), "DER forecast"), + (2023, "DistBattery", (0, 4), "DER forecast"), + (2024, "DistBattery", (6.812, 4), "DER forecast"), + (2025, "DistBattery", (9.693, 4), "DER forecast"), + (2026, "DistBattery", (3.135, 4), "DER forecast"), + (2027, "DistBattery", (3.732, 4), "DER forecast"), + (2028, "DistBattery", (4.542, 4), "DER forecast"), + (2029, "DistBattery", (5.324, 4), "DER forecast"), + (2030, "DistBattery", (6.115, 4), "DER forecast"), + (2031, "DistBattery", (6.719, 4), "DER forecast"), + (2032, "DistBattery", (7.316, 4), "DER forecast"), + (2033, "DistBattery", (7.913, 4), "DER forecast"), + (2034, "DistBattery", (8.355, 4), "DER forecast"), + (2035, "DistBattery", (8.723, 4), "DER forecast"), + (2036, "DistBattery", (9.006, 4), "DER forecast"), + (2037, "DistBattery", (9.315, 4), "DER forecast"), + (2038, "DistBattery", (9.49, 4), "DER forecast"), + (2039, "DistBattery", (9.556, 4), "DER forecast"), + (2040, "DistBattery", (9.688, 4), "DER forecast"), + (2041, "DistBattery", (9.777, 4), "DER forecast"), + (2042, "DistBattery", (9.827, 4), "DER forecast"), + (2043, "DistBattery", (9.874, 4), "DER forecast"), + (2044, "DistBattery", (9.939, 4), "DER forecast"), + (2045, "DistBattery", (10.098, 4), "DER forecast"), + # Mauka Fit 1 and Na Pua Makani are scheduled to come online in 2020 but + # are still under construction as of 8/7/20 according to + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # In the HECO plan (Docket 2018-0088 HECO SOP 2, Exhibit P2), Mauka Fit + # 1 is online in 2021 and Na Pua Makani and CBRE Phase 1 are online in + # 2020. Since none of these are online by Aug. 2020, we model them as + # starting 1/1/2021. + # NOTE: PSIP Figure J-10 says FIT projects (Mauka FIT and Aloha Solar II (in Existing Plant Data) are in addition to the customer DGPV + # adoption forecast, but they are not in "HECO construction plan 2020-03-17.docx". + # Samantha Ruiz (Ulupono) recommended in email 5/26/20 to count them as + # non-DER esp. since HECO's March 2020 DER forecast is flat in early + # years. Note: these are probably fixed-axis rather than tracking (i.e., + # more like DistPV than LargePV), but we include them as LargePV because + # they don't reduce available roof inventory. + # NOTE: Mauka FIT and Na Pua Makani are at particular locations but we + # include them here because counting them as existing capacity in 2021 + # would block construction of additional generators in 2021. + (2021, "LargePV", 3.5, "Mauka FIT 1"), # Mauka FIT 1 + # Na Pua Makani (NPM) wind + # Reported as 24 MW in + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # (accessed 10/22/19) but 27 MW on + # https://www.napuamakanihawaii.org/fact-sheet/. HECO confirmed by email + # that it is 24 MW. + (2021, "OnshoreWind", 24, "Na Pua Makani"), + # CBRE wind and PV + # Final order given allowing HECO to proceed with standardized contracts + # in June 2018: https://cca.hawaii.gov/dca/files/2018/07/Order-No-35560-HECO-CBRE.pdf + # "At the ten-month milestone [June 2019], three projects have half-executed standard + # form contracts ("SFCs") and interconnection agreements." None had subscribers or were + # under construction at this point. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19G15A93031F00794 + # In Oct. 2019, HECO's website said it had agreement(s) in place for 4990 kW + # of the 5000 MW solar allowed in Phase 1, with 330 kW in queue. I think the + # June 2018 D&O said this will roll over to Phase 2. No mention of wind on + # the HECO program website. + # https://www.hawaiianelectric.com/products-and-services/customer-renewable-programs/community-solar + # According to HECO press release, the first phase includes (only) 8 MW + # of solar on all islands (5 MW on Oahu). Other techs will be included + # in phase 2, which will begin "about two years" from 7/2018. + # https://www.hawaiianelectric.com/regulators-approve-community-solar-plans + # In 11/19/19 data sharing, HECO reported "One project for CBRE Phase 1 + # on O'ahu is slated to be installed by Q4 of 2019. Five Phase 1 + # projects are estimated to be installed in 2020 (one in Q2 2020 and + # four in Q4 2020). Lastly, two projects are estimated to be installed + # in Q3 of 2021.". In heco_outlook_2019 we broke these up into + # installations in 2019, 2020 and 2021, but in "HECO construction plan 2020-03-17.docx" + # they treat them all as being installed in 2020, so we do that now. + (2021, "LargePV", 5, "CBRE Phase 1"), # CBRE Phase 1 + # Original CBRE program design had only 72 MW in phase 1 and 2 (leaving + # 64 MW for phase 2), but HECO suggested increasing this to 235 MW over + # 5 years. HECO said this was because of projected shortfalls in DER + # program. Joint Parties say it should be possible to accept all of this + # earlier and expand the program if it goes quickly, and this should not + # be used to limit DER adoption. + # https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H20B01349C00185 + # **** questions: + # **** Should we reduce DER forecast in light of HECO's projected shortfall reported in CBRE proceeding? + # **** How much solar should we expect on Oahu in CBRE Phase 2 and when? + # **** Do we expect any wind on Oahu in CBRE Phase 2, and if so, when? + # In heco_outlook_2019, we used 150 MW in 2022 as a placeholder Oahu CBRE Phase 2. + # In this version, we switch to 43.5 in 2025, as shown in "HECO construction plan 2020-03-17.docx" + # This is in addition to RFPs noted below. + # (2025, 'LargePV', 43.5), # CBRE Phase 2 + (2025, "LargePV", 43.5, "CBRE phase 2"), + # 2018-2019 RFPs (docket 2017-0352) + # These replace large PV and bulk batteries reported in PSIP for 2020 and 2022. + # TODO: maybe move these to existing plants tables + # "On March 25, 2019, the commission approved six ... grid-scale, + # solar-plus-storage projects.... Cumulatively, the projects will add 247 + # megawatts ("MW") of solar energy with almost 1 gigawatt hour of + # storage to the HECO Companies' grids." + # -- D&O 36604, https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19J10A90756F00117 + # First 6 approved projects (dockets 2018-0430, -0431, -0432, -0434, -0435, and -0436) are listed at + # -- https://www.hawaiianelectric.com/six-low-priced-solar-plus-storage-projects-approved-for-oahu-maui-and-hawaii-islands + # On 8/20/19, PUC approved 7th project, 12.5 MW/50 MWh AES solar+storage (docket 2019-0050, order 36480) + # -- https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A19H21B03929E00301 + # -- https://www.hawaiianelectric.com/puc-approves-grid-scale-solar-project-in-west-oahu + # As of 10/22/19, 8th project, 15 MW/60 MWh solar+storage on Maui, is still under review (docket 2018-0433) + # Status of all approved projects and in-service data are listed at + # https://www.hawaiianelectric.com/clean-energy-hawaii/our-clean-energy-portfolio/renewable-project-status-board + # They are also shown in "HECO construction plan 2020-03-17.docx". + # As of 2020-05-25, both sources say the first one will be online in + # 2021 and the other three will be online in 2022. But in an email to + # Ulupono May 23, 2020, Rod Aoki (HECO) said all RFP 1 projects would + # be online at the end of 2022. In an email to M Fripp 5/21/20, + # Samantha Ruiz (Ulupono) said they would likely come online in 2022. + # In an email to M Fripp 5/23/20, quoting the Rod Aoki email, Murray + # Clay (Ulupono) recommended they counting them as coming online at + # start of 2022, not end. Taking account of all this, we set them all + # to start in 2022. + (2021, "LargePV", 12.5, "RFP stage 1"), # AES West Oahu Solar + (2022, "LargePV", 52, "RFP stage 1"), # Hoohana Solar 1 + (2022, "LargePV", 39, "RFP stage 1"), # Mililani I Solar + (2022, "LargePV", 36, "RFP stage 1"), # Waiawa Solar + # storage associated with large PV projects; we assume this will be used + # efficiently, so we model it along with other large-scale storage. + (2021, "Battery_Bulk", (12.5, 4), "RFP stage 1"), # AES West Oahu Solar + (2022, "Battery_Bulk", (52, 4), "RFP stage 1"), # Hoohana Solar 1 + (2022, "Battery_Bulk", (39, 4), "RFP stage 1"), # Mililani I Solar + (2022, "Battery_Bulk", (36, 4), "RFP stage 1"), # Waiawa Solar + # 200 MW / 6 hour BESS in HECO Phase 2 SOP Exhibit P2 Attachment 1 + (2022, "Battery_Bulk", (200, 6), "HECO plan"), + # Note: HECO said in "HECO construction plan 2020-03-17.docx" that RFP 2 + # would add 1,300 GWh/year in 2025; their renewable project status board + # (10/2019-5/2020) says the same amount in 2022-25. + # PBR Phase 2 SOP Attachment 1 says this too. + # We think this would be 560 MW, but they think it is 594 MW (see p. 9 of Exhibit A of Dkt 2018-0088 2020-06-18 HECO Phase 2 SOP.pdf) + # We use 594 MW, because that meshes better with the total MW reported in their plan. + (2025, "LargePV", 594, "RFP stage 2"), + # avg. cap factor for 560 MW starting after 390 best MW have been installed + # (existing projects + FIT + CBRE 1 + half of CBRE 2 + RFP 1) is 26.6%; see + # "select site, max_capacity, avg(cap_factor) from cap_factor natural join project where technology = 'CentralTrackingPV' group by 1, 2 order by 3 desc;" + # and (120*.271+247*.265+193*.264)/(120+247+193) + # Then (1,300,000 MWh/y)/(.266 * 8766 h/y) = 558 MW + # PSIP 2016-12-23 Table 4-1 included 90 MW of contingency battery in 2019 + # and https://www.hawaiianelectric.com/documents/clean_energy_hawaii/selling_power_to_the_utility/competitive_bidding/20190207_tri_company_future_procurement.pdf + # says the 2016-12 plan was to do 70 MW contingency in 2019 and more contingency/regulation in 2020 + # There has been no further discussion of these as of 10/22/19, so we assume they are + # replaced by storage that comes with the PV systems. + # PSIP 2016: (2019, 'Battery_Conting', 90), + ] + [ + # Assume no new distributed generation or batteries after 2045 + # (we need some forecast to avoid picking winners between large PV + # and dist PV, and forecasting continuous increases in distpv would + # be redundant with already adequate large-renewables) + (y, t, 0.0, "late freeze") + for y in range(2046, 2060) + for t in ["DistPV", "DistBattery"] + ] + # No new generation in early years beyond what's shown above + # (this will also block construction of these techs in all years if the + # --psip-force flag is set) + tech_group_targets_definite += [ + (y, t, 0.0, "early freeze") + for techs, years in [ + (("OnshoreWind", "OffshoreWind", "LargePV"), range(2020, 2025 + 1)), + ( + ( + "IC_Barge", + "IC_MCBH", + "IC_Schofield", + "CC_152", + "Battery_Conting", + "Battery_Reg", + ), + range(2020, 2023 + 1), + ), + ] + for t in techs + for y in years + ] + + if m.options.psip_no_additional_onshore_wind: + tech_group_targets_definite += [ + (y, "OnshoreWind", 0.0, "block onshore wind") for y in range(2020, 2056) + ] + + # add targets specified on the command line + # TODO: allow repeated invocation + if m.options.force_build is not None: + b = list(m.options.force_build) + build = ( + int(b[0]), # year + b[1], # tech + # quantity + float(b[2]) if len(b) == 3 else (float(b[2]), float(b[3])), + "manual override", + ) + print("Forcing build: {}".format(build)) + tech_group_targets_definite.append(build) + + # technologies proposed in "HECO construction plan 2020-03-17.docx" but which may not be built if a better plan is found. + tech_group_targets_psip = [ + (2026, "CC_152", 150.586, "HECO plan 3/17/20"), + (2028, "CC_152", 150.586, "HECO plan 3/17/20"), + (2030, "Battery_Bulk", (165, 4), "HECO plan 3/17/20"), + (2032, "CC_152", 2 * 150.586, "HECO plan 3/17/20"), + (2035, "Battery_Bulk", (168, 4), "HECO plan 3/17/20"), + (2040, "LargePV", 280, "HECO plan 3/17/20"), + (2040, "Battery_Bulk", (420, 4), "HECO plan 3/17/20"), + (2045, "LargePV", 1180, "HECO plan 3/17/20"), + (2045, "Battery_Bulk", (1525, 4), "HECO plan 3/17/20"), + ( + 2045, + "IC_Barge", + 4 * 16.786392, + "HECO plan 3/17/20", + ), # proxy for 4*17 MW of generic ICE capacity + # RESOLVE modeled 4-hour batteries as being capable of providing reserves, + # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). + # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). + # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not + # from EVs or flexible demand. + # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled + # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? + # (no separate sign of EVs). + # TODO: check Resolve load levels against Switch. + # TODO: maybe I should switch over to using the ABC curves and load profiles that HECO used with PLEXOS + # (for all islands). + # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? + # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. + ] + + if psip: + if m.options.psip_relax_after is not None: + # NOTE: this could be moved later, if we want this flag to relax + # both the definite and psip targets + psip_targets = [ + t for t in tech_group_targets_psip if t[0] <= m.options.psip_relax_after + ] + else: + psip_targets = tech_group_targets_psip.copy() + tech_group_targets = tech_group_targets_definite + psip_targets + else: + # must make a copy here so that rebuilds will be added to + # tech_group_targets but not tech_group_targets_definite + tech_group_targets = tech_group_targets_definite.copy() + + # Show which technologies can contribute to the target for each technology + # group and which group each technology contributes to + techs_for_tech_group = { + "DistPV": ["DistPV", "SlopedDistPV", "FlatDistPV"], + "LargePV": ["CentralTrackingPV", "CentralFixedPV"], + } + # use the rest as-is + missing_techs = {t for y, t, s, l in tech_group_targets}.difference( + techs_for_tech_group.keys() + ) + techs_for_tech_group.update({t: [t] for t in missing_techs}) + # create a reverse mapping + tech_tech_group = { + tech: tech_group + for tech_group, techs in techs_for_tech_group.items() + for tech in techs + } + + # Rebuild renewable projects and forecasted technologies at retirement. + # In the future we may be able to simplify this by enforcing capacity targets + # instead of construction targets. + + # note: this behavior is consistent with the following: + # discussion on p. 3-8 of PSIP 2016-12-23 vol. 1. + # Resolve applied planned wind and solar as set levels through 2045, not set additions in each year. + # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Theme 5 + # show optional capacity built in 2020 or 2025 (in list below) continuing in service in 2045. + # and Plexos input files in data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/PSIP Max Capacity.csv + # don't show any retirements of wind and solar included as "planned" in RESOLVE and "existing" in Switch + # (Waivers PV1, West Loch; Kawailoa may be omitted?) + # also note: Plexos input files in XX + # show max battery capacity equal to sum of all prior additions + + # m = lambda: 3; m.options = m; m.options.inputs_dir = '/Users/matthias/Dropbox/Research/Ulupono/Enovation Model/pbr_scenario/inputs' + gen_info = pd.read_csv(os.path.join(m.options.inputs_dir, "gen_info.csv")) + gen_info["tech_group"] = gen_info["gen_tech"].map(tech_tech_group) + gen_info = gen_info[gen_info["tech_group"].notna()] + # existing technologies are also subject to rebuilding + existing_techs = ( + pd.read_csv( + os.path.join(m.options.inputs_dir, "gen_build_predetermined.csv"), + na_values=["."], + ) + .merge(gen_info, how="inner") + .groupby(["build_year", "tech_group"])[ + ["build_gen_predetermined", "build_gen_energy_predetermined"] + ] + .agg(lambda x: x.sum(skipna=False)) + .reset_index() + ) + ages = gen_info.groupby("tech_group")["gen_max_age"].agg(["min", "max", "mean"]) + assert all(ages["min"] == ages["max"]), "Some psip technologies have mixed ages." + last_period = pd.read_csv(os.path.join(m.options.inputs_dir, "periods.csv")).iloc[ + -1, 0 + ] + + # rebuild all renewables and batteries in place before the start of the study, + # plus any technologies with targets specified here + rebuildable_targets = [ + (y, t, (mw if isnan(mwh) else (mw, mwh / mw)), "existing") + for i, y, t, mw, mwh in existing_techs.itertuples() + if is_renewable(t) or is_battery(t) + ] + tech_group_targets + tech_life = dict() + for build_year, tech_group, cap, label in rebuildable_targets: + if tech_group not in ages.index: + raise ValueError( + "A target has been specified for {} but there are no matching " + "technologies in gen_info.csv.".format(tech_group) + ) + max_age = ages.loc[tech_group, "mean"] + tech_life[tech_group] = max_age + rebuild_year = build_year + max_age + while rebuild_year <= last_period: + tech_group_targets.append( + (rebuild_year, tech_group, cap, "rebuild " + label) + ) + rebuild_year += max_age + del gen_info, existing_techs, ages, rebuildable_targets + + # we also convert to normal python datatypes to support serialization + tech_group_power_targets = [ + (int(y), t, float(q[0] if type(q) is tuple else q), l) + for y, t, q, l in tech_group_targets + ] + tech_group_energy_targets = [ + (int(y), t, float(q[0] * q[1]), l) + for y, t, q, l in tech_group_targets + if type(q) is tuple + ] + + # import pdb; pdb.set_trace() + + m.FORECASTED_TECH_GROUPS = Set( + dimen=1, initialize=list(techs_for_tech_group.keys()) + ) + m.FORECASTED_TECH_GROUP_TECHS = Set( + m.FORECASTED_TECH_GROUPS, dimen=1, initialize=techs_for_tech_group + ) + m.FORECASTED_TECHS = Set(dimen=1, initialize=list(tech_tech_group.keys())) + m.tech_tech_group = Param( + m.FORECASTED_TECHS, within=NonNegativeReals, initialize=tech_tech_group + ) + + # make a list of renewable technologies + m.RENEWABLE_TECH_GROUPS = Set( + dimen=1, + initialize=m.FORECASTED_TECH_GROUPS, + filter=lambda m, tg: is_renewable(tg), + ) + + def tech_group_target(m, per, tech, targets): + """Find the amount of each technology that is targeted to be built + between the start of the previous period and the start of the current + period and not yet retired.""" + start = 0 if per == m.PERIODS.first() else m.PERIODS.prev(per) + end = per + target = sum( + q + for (tyear, ttech, q, l) in targets + if ttech == tech + and start < tyear + and tyear <= end + and tyear + tech_life[ttech] > end + ) + return target + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_power_targets) + + m.tech_group_power_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=NonNegativeReals, initialize=rule + ) + + def rule(m, per, tech): + return tech_group_target(m, per, tech, tech_group_energy_targets) + + m.tech_group_energy_target = Param( + m.PERIODS, m.FORECASTED_TECH_GROUPS, within=NonNegativeReals, initialize=rule + ) + + def MakeTechGroupDicts_rule(m): + # get unit sizes of all technologies + unit_sizes = m.tech_group_unit_size_dict = defaultdict(float) + for g, unit_size in m.gen_unit_size.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + if tech_group in unit_sizes: + if unit_sizes[tech_group] != unit_size: + raise ValueError( + "Generation technology {} uses different unit sizes for different projects." + ) + else: + unit_sizes[tech_group] = unit_size + # get predetermined capacity for all technologies + m.tech_group_predetermined_power_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_predetermined.items(): + tech = m.gen_tech[g] + if tech in m.FORECASTED_TECHS: + tech_group = m.tech_tech_group[tech] + m.tech_group_predetermined_power_cap_dict[tech_group, per] += cap + m.tech_group_predetermined_energy_cap_dict = defaultdict(float) + for (g, per), cap in m.build_gen_energy_predetermined.items(): + tech = m.gen_tech[g] + if ( + tech in m.FORECASTED_TECHS + and g in m.STORAGE_GENS + and m.gen_storage_energy_to_power_ratio[g] == float("inf") + ): + tech_group = m.tech_tech_group[tech] + m.tech_group_predetermined_energy_cap_dict[tech_group, per] += cap + + m.MakeTechGroupDicts = BuildAction(rule=MakeTechGroupDicts_rule) + + # Find last date for which a definite target was specified for each tech group. + # This sets the last year when construction of a technology is fixed at a + # predetermined level in the "most-likely" (non-PSIP) cases. + # This ignores PSIP targets, since _all_ construction is frozen when those are + # used, and ignores reconstruction targets, because those just follow on from + # the early-years construction, and we don't want to freeze construction all + # the way through. + last_definite_target = dict() + for y, t, q, l in tech_group_targets_definite: + last_definite_target[t] = max(y, last_definite_target.get(t, 0)) + + # Save targets and group definitions for future reference + import json + + os.makedirs(m.options.outputs_dir, exist_ok=True) # avoid errors with new dir + with open(os.path.join(m.options.outputs_dir, "heco_outlook.json"), "w") as f: + json.dump( + { + "tech_group_power_targets": tech_group_power_targets, + "tech_group_energy_targets": tech_group_energy_targets, + "techs_for_tech_group": techs_for_tech_group, + "tech_tech_group": tech_tech_group, + "last_definite_target": last_definite_target, + }, + f, + indent=4, + ) + + # def build_tech_group_in_period(m, tech_group, period): + # """ + # How much capacity is added in this tech_group in this period? + # Returns literal 0 if and only if there are no matching projects. + # Otherwise returns a Pyomo expression. + # """ + # return sum( + # build_var[g, period] + # for g in m.GENERATION_PROJECTS + # if m.gen_tech[g] in m.FORECASTED_TECHS + # and m.tech_tech_group[m.gen_tech[g]] == tech_group + # and (g, period) in build_var, + # 0 + # ) + + # # allow extra solar in 2025, up to the point of straight-line additions + # # between 2025 and 2030 (inclusive) + # ####### We don't do this here, we just interpolate back from 2030 to 2025 + # ####### instead of 2026 (slighly less optimal, but much simpler) + # if last_definite_target['LargePV'] == 2025: + # last_definite_target['LargePV'] = 2024 # use target as lower bound in 2025 + # print("="*80) + # print("NOTE: Using HECO 2025 LargePV plan as lower bound, not fixed target.") + # print("="*80) + # ##### slack variable to allow 2025 to overshoot 20% of 2030 if needed + # m.SolarOvershoot2025 = Var(within=NonNegativeReals) + # def rule(m): + # build2025 = build_tech_group_in_period['LargePV', 2025] + # build2030 = build_tech_group_in_period['LargePV', 2030] + # ####### This doesn't work, needs a big-M constraint to force + # ####### build2025 to be below the max of the target or 0.2 * build2030 + # return build2025 - m.SolarOvershoot2025 <= 0.2 * build2030 + # m.Even_Increment_Solar_2025 = Constraint(rule=rule) + # else: + # raise ValueError( + # 'Expected last HECO target for LargePV to be in 2025, but it is in {}.' + # .format(last_definite_target['LargePV']) + # ) + + def tech_group_target_rule(m, per, tech_group, build_var, target): + """ + Enforce targets for each technology. + + with PSIP: build is zero except for tech_group_power_targets + (sum during each period or before first period) + without PSIP: build is == definite targets during time range when targets specified + build is >= target later; + Note: in the last case the target is the sum of targets between start of prior period and start of this one + """ + build = sum( + build_var[g, per] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] in m.FORECASTED_TECHS + and m.tech_tech_group[m.gen_tech[g]] == tech_group + and (g, per) in build_var + ) + + if isinstance(build, int) and build == 0: + # no matching projects found, left with literal 0 + if target == 0: + return Constraint.Skip + else: + raise ValueError( + "Target was set for {} in {}, but no matching projects are available.".format( + tech_group, per + ) + ) + + if psip and ( + m.options.psip_relax_after is None or per <= m.options.psip_relax_after + ): + # PSIP in effect: exactly match the target (possibly zero) + return build == target + elif per <= last_definite_target.get(tech_group, 0): + # PSIP not in effect, but a definite target is + return build == target + elif ( + m.options.psip_minimal_renewables and tech_group in m.RENEWABLE_TECH_GROUPS + ): + # Only build the specified amount of renewables, no more. + # This is used to apply the definite targets, but otherwise minimize renewable development. + return build == target + else: + # treat the target as a lower bound + return build >= target + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_power_target[per, tech_group] + + m.tech_group_predetermined_power_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildGen, target) + + m.Enforce_Tech_Group_Power_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + def rule(m, per, tech_group): + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = ( + m.tech_group_energy_target[per, tech_group] + + m.tech_group_predetermined_energy_cap_dict[tech_group, per] + ) + return tech_group_target_rule(m, per, tech_group, m.BuildStorageEnergy, target) + + m.Enforce_Tech_Group_Energy_Target = Constraint( + m.PERIODS, m.FORECASTED_TECH_GROUPS, rule=rule + ) + + if psip: + + def rule(m): + buildable_techs = set(m.gen_tech[g] for (g, y) in m.NEW_GEN_BLD_YRS) + if buildable_techs - set(m.FORECASTED_TECHS): + # TODO: automatically add zero-targets + m.logger.error( + "\nERROR: You need to provide at least one zero target for " + "each technology without targets in the PSIP to prevent it " + "from being built." + ) + return False + else: + return True + + m.Check_For_Buildable_Techs_Under_PSIP = BuildCheck(rule=rule) + + # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) + advanced_tech_vars = [ + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", + "BuildFuelCellMW", + ] + + def no_advanced_tech_rule_factory(v): + return lambda m, *k: (getattr(m, v)[k] == 0) + + for v in advanced_tech_vars: + try: + var = getattr(m, v) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) + except AttributeError: + pass # model doesn't have this var diff --git a/switch_model/hawaii/hi_spinning_reserves.py b/switch_model/hawaii/hi_spinning_reserves.py index 2cbdcfcda..3a323e988 100644 --- a/switch_model/hawaii/hi_spinning_reserves.py +++ b/switch_model/hawaii/hi_spinning_reserves.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ This customizes the behavior of balancing.operating_reserves.spinning_reserve @@ -8,15 +8,15 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', - 'switch_model.balancing.load_zones', - 'switch_model.balancing.operating_reserves.areas', - 'switch_model.financials', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch', - 'switch_model.generators.core.commit.operate', - 'switch_model.balancing.operating_reserves.spinning_reserve', + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.balancing.operating_reserves.areas", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.commit.operate", + "switch_model.balancing.operating_reserves.spinning_reserve", ) @@ -31,21 +31,29 @@ def define_components(m): # TODO: supply these parameters in input files # regulating reserves required, as fraction of potential output (up to limit) - m.var_gen_power_reserve = Param(['Central_PV', 'CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={ - 'Central_PV': 1.0, - 'CentralTrackingPV': 1.0, - 'DistPV': 1.0, # 0.81270193, - 'OnshoreWind': 1.0, - 'OffshoreWind': 1.0, # assumed equal to OnshoreWind - }) + m.var_gen_power_reserve = Param( + ["Central_PV", "CentralTrackingPV", "DistPV", "OnshoreWind", "OffshoreWind"], + within=NonNegativeReals, + initialize={ + "Central_PV": 1.0, + "CentralTrackingPV": 1.0, + "DistPV": 1.0, # 0.81270193, + "OnshoreWind": 1.0, + "OffshoreWind": 1.0, # assumed equal to OnshoreWind + }, + ) # maximum regulating reserves required, as fraction of installed capacity - m.var_gen_cap_reserve_limit = Param(['Central_PV', 'CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={ - 'Central_PV': 0.21288916, - 'CentralTrackingPV': 0.21288916, - 'DistPV': 0.21288916, # 0.14153171, - 'OnshoreWind': 0.21624407, - 'OffshoreWind': 0.21624407, # assumed equal to OnshoreWind - }) + m.var_gen_cap_reserve_limit = Param( + ["Central_PV", "CentralTrackingPV", "DistPV", "OnshoreWind", "OffshoreWind"], + within=NonNegativeReals, + initialize={ + "Central_PV": 0.21288916, + "CentralTrackingPV": 0.21288916, + "DistPV": 0.21288916, # 0.14153171, + "OnshoreWind": 0.21624407, + "OffshoreWind": 0.21624407, # assumed equal to OnshoreWind + }, + ) # more conservative values (found by giving 10x weight to times when we provide less reserves than GE): # [1., 1., 1., 0.25760558, 0.18027923, 0.49123101] @@ -54,23 +62,32 @@ def define_components(m): rule=lambda m, b, t: sum( m.ProjCapacityTP[g, t] * min( - m.var_gen_power_reserve[m.proj_gen_tech[g]] * m.proj_max_capacity_factor[g, t], - m.var_gen_cap_reserve_limit[m.proj_gen_tech[g]] + m.var_gen_power_reserve[m.proj_gen_tech[g]] + * m.proj_max_capacity_factor[g, t], + m.var_gen_cap_reserve_limit[m.proj_gen_tech[g]], ) for g in m.VARIABLE_PROJECTS - if (g, t) in m.VAR_DISPATCH_POINTS and b == m.zone_balancing_area[m.proj_load_zone[g]]), - doc="The spinning reserves for backing up variable generation with Hawaii rules." + if (g, t) in m.VAR_DISPATCH_POINTS + and b == m.zone_balancing_area[m.proj_load_zone[g]] + ), + doc="The spinning reserves for backing up variable generation with Hawaii rules.", + ) + m.Spinning_Reserve_Up_Requirements.append( + "HawaiiVarGenUpSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') def HawaiiLoadDownSpinningReserveRequirement_rule(m, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.lz_demand_mw - return 0.10 * sum(load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z]) + return 0.10 * sum( + load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z] + ) + m.HawaiiLoadDownSpinningReserveRequirement = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=HawaiiLoadDownSpinningReserveRequirement_rule + m.BALANCING_AREA_TIMEPOINTS, rule=HawaiiLoadDownSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Down_Requirements.append( + "HawaiiLoadDownSpinningReserveRequirement" ) - m.Spinning_Reserve_Down_Requirements.append('HawaiiLoadDownSpinningReserveRequirement') diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index e6ca248b4..df1e6b2a4 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -3,34 +3,58 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf + def define_arguments(argparser): - argparser.add_argument('--hydrogen-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from hydrogen infrastructure (e.g., 'contingency regulation'). " - "Specify 'none' to disable." + argparser.add_argument( + "--hydrogen-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from hydrogen infrastructure (e.g., 'contingency regulation'). " + "Specify 'none' to disable.", ) - argparser.add_argument('--no-hydrogen', action='store_true', default=False, - help="Don't allow construction of any hydrogen infrastructure." + argparser.add_argument( + "--no-hydrogen", + action="store_true", + default=False, + help="Don't allow construction of any hydrogen infrastructure.", ) + def define_components(m): if not m.options.no_hydrogen: define_hydrogen_components(m) + def define_hydrogen_components(m): # electrolyzer details - m.hydrogen_electrolyzer_capital_cost_per_mw = Param() - m.hydrogen_electrolyzer_fixed_cost_per_mw_year = Param(default=0.0) - m.hydrogen_electrolyzer_variable_cost_per_kg = Param(default=0.0) # assumed to include any refurbishment needed - m.hydrogen_electrolyzer_kg_per_mwh = Param() # assumed to deliver H2 at enough pressure for liquifier and daily buffering - m.hydrogen_electrolyzer_life_years = Param() + m.hydrogen_electrolyzer_capital_cost_per_mw = Param(within=NonNegativeReals) + m.hydrogen_electrolyzer_fixed_cost_per_mw_year = Param( + within=NonNegativeReals, default=0.0 + ) + # assumed to include any refurbishment needed + m.hydrogen_electrolyzer_variable_cost_per_kg = Param( + within=NonNegativeReals, default=0.0 + ) + # assumed to deliver H2 at enough pressure for liquifier and daily buffering + m.hydrogen_electrolyzer_kg_per_mwh = Param(within=NonNegativeReals) + m.hydrogen_electrolyzer_life_years = Param(within=NonNegativeReals) m.BuildElectrolyzerMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.ElectrolyzerCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildElectrolyzerMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) + m.ElectrolyzerCapacityMW = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildElectrolyzerMW[z, p_] + for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) m.RunElectrolyzerMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) - m.ProduceHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.RunElectrolyzerMW[z, t] * m.hydrogen_electrolyzer_kg_per_mwh) + m.ProduceHydrogenKgPerHour = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.RunElectrolyzerMW[z, t] + * m.hydrogen_electrolyzer_kg_per_mwh, + ) # note: we assume there is a gaseous hydrogen storage tank that is big enough to buffer # daily production, storage and withdrawals of hydrogen, but we don't include a cost @@ -38,224 +62,325 @@ def define_hydrogen_components(m): # This allows the system to do some intra-day arbitrage without going all the way to liquification # liquifier details - m.hydrogen_liquifier_capital_cost_per_kg_per_hour = Param() - m.hydrogen_liquifier_fixed_cost_per_kg_hour_year = Param(default=0.0) - m.hydrogen_liquifier_variable_cost_per_kg = Param(default=0.0) - m.hydrogen_liquifier_mwh_per_kg = Param() - m.hydrogen_liquifier_life_years = Param() - m.BuildLiquifierKgPerHour = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # capacity to build, measured in kg/hour of throughput - m.LiquifierCapacityKgPerHour = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildLiquifierKgPerHour[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) - m.LiquifyHydrogenKgPerHour = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) - m.LiquifyHydrogenMW = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.LiquifyHydrogenKgPerHour[z, t] * m.hydrogen_liquifier_mwh_per_kg + m.hydrogen_liquifier_capital_cost_per_kg_per_hour = Param(within=NonNegativeReals) + m.hydrogen_liquifier_fixed_cost_per_kg_hour_year = Param( + within=NonNegativeReals, default=0.0 + ) + m.hydrogen_liquifier_variable_cost_per_kg = Param( + within=NonNegativeReals, default=0.0 + ) + m.hydrogen_liquifier_mwh_per_kg = Param(within=NonNegativeReals) + m.hydrogen_liquifier_life_years = Param(within=NonNegativeReals) + m.BuildLiquifierKgPerHour = Var( + m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals + ) # capacity to build, measured in kg/hour of throughput + m.LiquifierCapacityKgPerHour = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildLiquifierKgPerHour[z, p_] + for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) + m.LiquifyHydrogenKgPerHour = Var( + m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals + ) + m.LiquifyHydrogenMW = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] + * m.hydrogen_liquifier_mwh_per_kg, ) # storage tank details - m.liquid_hydrogen_tank_capital_cost_per_kg = Param() - m.liquid_hydrogen_tank_minimum_size_kg = Param(default=0.0) - m.liquid_hydrogen_tank_life_years = Param() - m.BuildLiquidHydrogenTankKg = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # in kg - m.LiquidHydrogenTankCapacityKg = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildLiquidHydrogenTankKg[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) - m.StoreLiquidHydrogenKg = Expression(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - m.ts_duration_of_tp[ts] * sum(m.LiquifyHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_TS[ts]) + m.liquid_hydrogen_tank_capital_cost_per_kg = Param(within=NonNegativeReals) + m.liquid_hydrogen_tank_minimum_size_kg = Param(within=NonNegativeReals, default=0.0) + m.liquid_hydrogen_tank_life_years = Param(within=NonNegativeReals) + m.BuildLiquidHydrogenTankKg = Var( + m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals + ) # in kg + m.LiquidHydrogenTankCapacityKg = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildLiquidHydrogenTankKg[z, p_] + for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) + m.StoreLiquidHydrogenKg = Expression( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: m.ts_duration_of_tp[ts] + * sum(m.LiquifyHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_TS[ts]), + ) + m.WithdrawLiquidHydrogenKg = Var( + m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals ) - m.WithdrawLiquidHydrogenKg = Var(m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals) # note: we assume the system will be large enough to neglect boil-off # fuel cell details - m.hydrogen_fuel_cell_capital_cost_per_mw = Param() - m.hydrogen_fuel_cell_fixed_cost_per_mw_year = Param(default=0.0) - m.hydrogen_fuel_cell_variable_cost_per_mwh = Param(default=0.0) # assumed to include any refurbishment needed - m.hydrogen_fuel_cell_mwh_per_kg = Param() - m.hydrogen_fuel_cell_life_years = Param() + m.hydrogen_fuel_cell_capital_cost_per_mw = Param(within=NonNegativeReals) + m.hydrogen_fuel_cell_fixed_cost_per_mw_year = Param( + within=NonNegativeReals, default=0.0 + ) + # assumed to include any refurbishment needed + m.hydrogen_fuel_cell_variable_cost_per_mwh = Param( + within=NonNegativeReals, default=0.0 + ) + m.hydrogen_fuel_cell_mwh_per_kg = Param(within=NonNegativeReals) + m.hydrogen_fuel_cell_life_years = Param(within=NonNegativeReals) m.BuildFuelCellMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.FuelCellCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) + m.FuelCellCapacityMW = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) m.DispatchFuelCellMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) - m.ConsumeHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DispatchFuelCellMW[z, t] / m.hydrogen_fuel_cell_mwh_per_kg + m.ConsumeHydrogenKgPerHour = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DispatchFuelCellMW[z, t] + / m.hydrogen_fuel_cell_mwh_per_kg, ) # hydrogen mass balances # note: this allows for buffering of same-day production and consumption # of hydrogen without ever liquifying it - m.Hydrogen_Conservation_of_Mass_Daily = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - m.StoreLiquidHydrogenKg[z, ts] - m.WithdrawLiquidHydrogenKg[z, ts] - == - m.ts_duration_of_tp[ts] * sum( + m.Hydrogen_Conservation_of_Mass_Daily = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: m.StoreLiquidHydrogenKg[z, ts] + - m.WithdrawLiquidHydrogenKg[z, ts] + == m.ts_duration_of_tp[ts] + * sum( m.ProduceHydrogenKgPerHour[z, tp] - m.ConsumeHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_TS[ts] - ) + ), ) - m.Hydrogen_Conservation_of_Mass_Annual = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum( + m.Hydrogen_Conservation_of_Mass_Annual = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( (m.StoreLiquidHydrogenKg[z, ts] - m.WithdrawLiquidHydrogenKg[z, ts]) - * m.ts_scale_to_year[ts] + * m.ts_scale_to_year[ts] for ts in m.TS_IN_PERIOD[p] - ) == 0 + ) + == 0, ) # limits on equipment - m.Max_Run_Electrolyzer = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.RunElectrolyzerMW[z, t] <= m.ElectrolyzerCapacityMW[z, m.tp_period[t]]) - m.Max_Run_Fuel_Cell = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DispatchFuelCellMW[z, t] <= m.FuelCellCapacityMW[z, m.tp_period[t]]) - m.Max_Run_Liquifier = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.LiquifyHydrogenKgPerHour[z, t] <= m.LiquifierCapacityKgPerHour[z, m.tp_period[t]]) - - # minimum size for hydrogen tank - m.BuildAnyLiquidHydrogenTank = Var(m.LOAD_ZONES, m.PERIODS, within=Binary) - m.Set_BuildAnyLiquidHydrogenTank_Flag = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - Constraint.Skip if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 - else ( - m.BuildLiquidHydrogenTankKg[z, p] - <= - 1000 * m.BuildAnyLiquidHydrogenTank[z, p] * m.liquid_hydrogen_tank_minimum_size_kg - ) + m.Max_Run_Electrolyzer = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.RunElectrolyzerMW[z, t] + <= m.ElectrolyzerCapacityMW[z, m.tp_period[t]], ) - m.Build_Minimum_Liquid_Hydrogen_Tank = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - Constraint.Skip if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 - else ( - m.BuildLiquidHydrogenTankKg[z, p] - >= - m.BuildAnyLiquidHydrogenTank[z, p] * m.liquid_hydrogen_tank_minimum_size_kg - ) + m.Max_Run_Fuel_Cell = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DispatchFuelCellMW[z, t] + <= m.FuelCellCapacityMW[z, m.tp_period[t]], + ) + m.Max_Run_Liquifier = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] + <= m.LiquifierCapacityKgPerHour[z, m.tp_period[t]], ) + # Enforce minimum size for hydrogen tank if specified. We only define these + # variables and constraints if needed, to avoid warnings about variables + # with no values assigned. + def action(m): + if m.liquid_hydrogen_tank_minimum_size_kg != 0.0: + m.BuildAnyLiquidHydrogenTank = Var(m.LOAD_ZONES, m.PERIODS, within=Binary) + m.Set_BuildAnyLiquidHydrogenTank_Flag = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: m.BuildLiquidHydrogenTankKg[z, p] + <= 1000 + * m.BuildAnyLiquidHydrogenTank[z, p] + * m.liquid_hydrogen_tank_minimum_size_kg, + ) + m.Build_Minimum_Liquid_Hydrogen_Tank = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: m.BuildLiquidHydrogenTankKg[z, p] + >= m.BuildAnyLiquidHydrogenTank[z, p] + * m.liquid_hydrogen_tank_minimum_size_kg, + ) + + m.Apply_liquid_hydrogen_tank_minimum_size = BuildAction(rule=action) + # maximum amount that hydrogen fuel cells can contribute to system reserves # Note: we assume we can't use fuel cells for reserves unless we've also built at least half # as much electrolyzer capacity and a tank that can provide the reserves for 12 hours # (this is pretty arbitrary, but avoids just installing a fuel cell as a "free" source of reserves) m.HydrogenFuelCellMaxReservePower = Var(m.LOAD_ZONES, m.TIMEPOINTS) - m.Hydrogen_FC_Reserve_Capacity_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - m.FuelCellCapacityMW[z, m.tp_period[t]] + m.Hydrogen_FC_Reserve_Capacity_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= m.FuelCellCapacityMW[z, m.tp_period[t]], ) - m.Hydrogen_FC_Reserve_Storage_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - m.LiquidHydrogenTankCapacityKg[z, m.tp_period[t]] * m.hydrogen_fuel_cell_mwh_per_kg / 12.0 + m.Hydrogen_FC_Reserve_Storage_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= m.LiquidHydrogenTankCapacityKg[z, m.tp_period[t]] + * m.hydrogen_fuel_cell_mwh_per_kg + / 12.0, ) - m.Hydrogen_FC_Reserve_Electrolyzer_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - 2.0 * m.ElectrolyzerCapacityMW[z, m.tp_period[t]] + m.Hydrogen_FC_Reserve_Electrolyzer_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= 2.0 * m.ElectrolyzerCapacityMW[z, m.tp_period[t]], ) # how much extra power could hydrogen equipment produce or absorb on short notice (for reserves) - m.HydrogenSlackUp = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.RunElectrolyzerMW[z, t] + m.HydrogenSlackUp = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.RunElectrolyzerMW[z, t] + m.LiquifyHydrogenMW[z, t] + m.HydrogenFuelCellMaxReservePower[z, t] - - m.DispatchFuelCellMW[z, t] + - m.DispatchFuelCellMW[z, t], ) - m.HydrogenSlackDown = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.ElectrolyzerCapacityMW[z, m.tp_period[t]] - m.RunElectrolyzerMW[z, t] + m.HydrogenSlackDown = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.ElectrolyzerCapacityMW[z, m.tp_period[t]] + - m.RunElectrolyzerMW[z, t] # ignore liquifier potential since it's small and this is a low-value reserve product - + m.DispatchFuelCellMW[z, t] + + m.DispatchFuelCellMW[z, t], ) # there must be enough storage to hold _all_ the production each period (net of same-day consumption) # note: this assumes we cycle the system only once per year (store all energy, then release all energy) # alternatives: allow monthly or seasonal cycling, or directly model the whole year with inter-day linkages - m.Max_Store_Liquid_Hydrogen = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.StoreLiquidHydrogenKg[z, ts] * m.ts_scale_to_year[ts] for ts in m.TS_IN_PERIOD[p]) - <= m.LiquidHydrogenTankCapacityKg[z, p] + m.Max_Store_Liquid_Hydrogen = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.StoreLiquidHydrogenKg[z, ts] * m.ts_scale_to_year[ts] + for ts in m.TS_IN_PERIOD[p] + ) + <= m.LiquidHydrogenTankCapacityKg[z, p], ) # add electricity consumption and production to the zonal energy balance - m.Zone_Power_Withdrawals.append('RunElectrolyzerMW') - m.Zone_Power_Withdrawals.append('LiquifyHydrogenMW') - m.Zone_Power_Injections.append('DispatchFuelCellMW') + m.Zone_Power_Withdrawals.append("RunElectrolyzerMW") + m.Zone_Power_Withdrawals.append("LiquifyHydrogenMW") + m.Zone_Power_Injections.append("DispatchFuelCellMW") # add costs to the model - m.HydrogenVariableCost = Expression(m.TIMEPOINTS, rule=lambda m, t: - sum( - m.ProduceHydrogenKgPerHour[z, t] * m.hydrogen_electrolyzer_variable_cost_per_kg - + m.LiquifyHydrogenKgPerHour[z, t] * m.hydrogen_liquifier_variable_cost_per_kg + m.HydrogenVariableCost = Expression( + m.TIMEPOINTS, + rule=lambda m, t: sum( + m.ProduceHydrogenKgPerHour[z, t] + * m.hydrogen_electrolyzer_variable_cost_per_kg + + m.LiquifyHydrogenKgPerHour[z, t] + * m.hydrogen_liquifier_variable_cost_per_kg + m.DispatchFuelCellMW[z, t] * m.hydrogen_fuel_cell_variable_cost_per_mwh for z in m.LOAD_ZONES - ) + ), ) - m.HydrogenFixedCostAnnual = Expression(m.PERIODS, rule=lambda m, p: - sum( - m.ElectrolyzerCapacityMW[z, p] * ( - m.hydrogen_electrolyzer_capital_cost_per_mw * crf(m.interest_rate, m.hydrogen_electrolyzer_life_years) - + m.hydrogen_electrolyzer_fixed_cost_per_mw_year) - + m.LiquifierCapacityKgPerHour[z, p] * ( - m.hydrogen_liquifier_capital_cost_per_kg_per_hour * crf(m.interest_rate, m.hydrogen_liquifier_life_years) - + m.hydrogen_liquifier_fixed_cost_per_kg_hour_year) - + m.LiquidHydrogenTankCapacityKg[z, p] * ( - m.liquid_hydrogen_tank_capital_cost_per_kg * crf(m.interest_rate, m.liquid_hydrogen_tank_life_years)) - + m.FuelCellCapacityMW[z, p] * ( - m.hydrogen_fuel_cell_capital_cost_per_mw * crf(m.interest_rate, m.hydrogen_fuel_cell_life_years) - + m.hydrogen_fuel_cell_fixed_cost_per_mw_year) + m.HydrogenFixedCostAnnual = Expression( + m.PERIODS, + rule=lambda m, p: sum( + m.ElectrolyzerCapacityMW[z, p] + * ( + m.hydrogen_electrolyzer_capital_cost_per_mw + * crf(m.interest_rate, m.hydrogen_electrolyzer_life_years) + + m.hydrogen_electrolyzer_fixed_cost_per_mw_year + ) + + m.LiquifierCapacityKgPerHour[z, p] + * ( + m.hydrogen_liquifier_capital_cost_per_kg_per_hour + * crf(m.interest_rate, m.hydrogen_liquifier_life_years) + + m.hydrogen_liquifier_fixed_cost_per_kg_hour_year + ) + + m.LiquidHydrogenTankCapacityKg[z, p] + * ( + m.liquid_hydrogen_tank_capital_cost_per_kg + * crf(m.interest_rate, m.liquid_hydrogen_tank_life_years) + ) + + m.FuelCellCapacityMW[z, p] + * ( + m.hydrogen_fuel_cell_capital_cost_per_mw + * crf(m.interest_rate, m.hydrogen_fuel_cell_life_years) + + m.hydrogen_fuel_cell_fixed_cost_per_mw_year + ) for z in m.LOAD_ZONES - ) + ), ) - m.Cost_Components_Per_TP.append('HydrogenVariableCost') - m.Cost_Components_Per_Period.append('HydrogenFixedCostAnnual') + m.Cost_Components_Per_TP.append("HydrogenVariableCost") + m.Cost_Components_Per_Period.append("HydrogenFixedCostAnnual") # Register with spinning reserves if it is available - if [rt.lower() for rt in m.options.hydrogen_reserve_types] != ['none']: + if [rt.lower() for rt in m.options.hydrogen_reserve_types] != ["none"]: # Register with spinning reserves - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if hasattr(m, "Spinning_Reserve_Up_Provisions"): # calculate available slack from hydrogen equipment m.HydrogenSlackUpForArea = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.HydrogenSlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.HydrogenSlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) m.HydrogenSlackDownForArea = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products m.HYDROGEN_SPINNING_RESERVE_TYPES = Set( - initialize=m.options.hydrogen_reserve_types + dimen=1, initialize=m.options.hydrogen_reserve_types ) m.HydrogenSpinningReserveUp = Var( - m.HYDROGEN_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.HYDROGEN_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) m.HydrogenSpinningReserveDown = Var( - m.HYDROGEN_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.HYDROGEN_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_HydrogenSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.HydrogenSpinningReserveUp[rt, ba, tp] - for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES - ) <= m.HydrogenSlackUpForArea[ba, tp] + rule=lambda m, ba, tp: sum( + m.HydrogenSpinningReserveUp[rt, ba, tp] + for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES + ) + <= m.HydrogenSlackUpForArea[ba, tp], ) m.Limit_HydrogenSpinningReserveDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.HydrogenSpinningReserveDown[rt, ba, tp] - for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES - ) <= m.HydrogenSlackDownForArea[ba, tp] + rule=lambda m, ba, tp: sum( + m.HydrogenSpinningReserveDown[rt, ba, tp] + for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES + ) + <= m.HydrogenSlackDownForArea[ba, tp], ) - m.Spinning_Reserve_Up_Provisions.append('HydrogenSpinningReserveUp') - m.Spinning_Reserve_Down_Provisions.append('HydrogenSpinningReserveDown') + m.Spinning_Reserve_Up_Provisions.append("HydrogenSpinningReserveUp") + m.Spinning_Reserve_Down_Provisions.append("HydrogenSpinningReserveDown") else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.hydrogen_reserve_types != ['spinning']: + if m.options.hydrogen_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('HydrogenSlackUpForArea') - m.Spinning_Reserve_Down_Provisions.append('HydrogenSlackDownForArea') + m.Spinning_Reserve_Up_Provisions.append("HydrogenSlackUpForArea") + m.Spinning_Reserve_Down_Provisions.append("HydrogenSlackDownForArea") def load_inputs(m, switch_data, inputs_dir): @@ -265,8 +390,8 @@ def load_inputs(m, switch_data, inputs_dir): """ if not m.options.no_hydrogen: switch_data.load_aug( - filename=os.path.join(inputs_dir, 'hydrogen.csv'), - optional=False, auto_select=True, + filename=os.path.join(inputs_dir, "hydrogen.csv"), + optional=False, param=( m.hydrogen_electrolyzer_capital_cost_per_mw, m.hydrogen_electrolyzer_fixed_cost_per_mw_year, @@ -286,5 +411,5 @@ def load_inputs(m, switch_data, inputs_dir): m.liquid_hydrogen_tank_capital_cost_per_kg, m.liquid_hydrogen_tank_life_years, m.liquid_hydrogen_tank_minimum_size_kg, - ) + ), ) diff --git a/switch_model/hawaii/kalaeloa.py b/switch_model/hawaii/kalaeloa.py deleted file mode 100644 index 74d36a9c6..000000000 --- a/switch_model/hawaii/kalaeloa.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Special dispatch/commitment rules for Kalaeloa plant.""" - -import os -from pyomo.environ import * - -def define_arguments(argparser): - argparser.add_argument("--run-kalaeloa-even-with-high-rps", action='store_true', default=False, - help="Enforce the 75 MW minimum-output rule for Kalaeloa in all years (otherwise relaxed " - "if RPS or EV share >= 75%%). Mimics behavior from switch 2.0.0b2.") - -def define_components(m): - # force Kalaeloa_CC3 offline unless 1&2 are at max (per John Cole e-mail 9/28/16) - - # by inspection of figure 8 & 9 in the RPS Study, it appears that Kalaeloa has 3 modes: - # commit unit 1, run between 65 and 90 MW - # commit units 1 & 2, run each between 65 and 90 MW - # run both 1 & 2 at 90 MW, and run 3 at 28 MW - - m.KALAELOA_MAIN_UNITS = Set( - initialize=["Oahu_Kalaeloa_CC1", "Oahu_Kalaeloa_CC2", "Kalaeloa_CC1", "Kalaeloa_CC2"], - filter=lambda m, g: g in m.GENERATION_PROJECTS - ) - m.KALAELOA_DUCT_BURNERS = Set( - initialize=["Oahu_Kalaeloa_CC3", "Kalaeloa_CC3"], - filter=lambda m, g: g in m.GENERATION_PROJECTS - ) - - m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS = Set( - dimen=2, - initialize=lambda m: ( - (g, tp) for g in m.KALAELOA_MAIN_UNITS for tp in m.TPS_FOR_GEN[g] - ) - ) - m.KALAELOA_DUCT_BURNER_DISPATCH_POINTS = Set( - dimen=2, - initialize=lambda m: ( - (g, tp) for g in m.KALAELOA_DUCT_BURNERS for tp in m.TPS_FOR_GEN[g] - ) - ) - m.KALAELOA_ACTIVE_TIMEPOINTS = Set( - initialize=lambda m: set(tp for g, tp in m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS) - ) - - # run kalaeloa at full power or not - # (if linearized, this is the fraction of capacity that is dispatched) - m.RunKalaeloaUnitFull = Var(m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, within=Binary) - - m.Run_Kalaeloa_Unit_Full_Enforce = Constraint( # big-m constraint - m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, - rule=lambda m, g, tp: - m.DispatchGen[g, tp] - + (1 - m.RunKalaeloaUnitFull[g, tp]) * m.gen_capacity_limit_mw[g] - >= - m.GenCapacityInTP[g, tp] * m.gen_availability[g] - ) - - # only run duct burner if all main units are full-on - m.Run_Kalaeloa_Duct_Burner_Only_When_Full = Constraint( - m.KALAELOA_DUCT_BURNER_DISPATCH_POINTS, m.KALAELOA_MAIN_UNITS, - rule=lambda m, g_duct, tp, g_main: - m.DispatchGen[g_duct, tp] - <= - m.RunKalaeloaUnitFull[g_main, tp] * m.gen_capacity_limit_mw[g_duct] - ) - - # force at least one Kalaeloa unit to run at full power at all times - # (actually 75 MW, based on fig 9 of RPS Study) - # unless they are both on maintenance outage (per John Cole e-mail 9/28/16) - def Kalaeloa_Must_Run_rule(m, tp): - try: - both_units_out = ( - sum(m.gen_max_commit_fraction[g, tp] for g in m.KALAELOA_MAIN_UNITS) - == 0 - ) - except AttributeError: - both_units_out = False - - # in 2018, fossil fuel consumption was roughly 1M barrels for various - # taxable uses, 420k barrels for utility, and maybe 500k barrels for - # non-utility electricity production (Kalaeloa)? (It looks like jet - # kerosene was brought in directly.) There are two refineries that split - # the crude oil into LSFO, gasoline and other products. These are co-products, - # so it's probably not cost-effective to keep running any refinery with the - # same amount of steam if the demand for either product drops below 25% - # of the 2018 level. So we assume that Kalaeloa's must-run rule applies - # only until either consumption is below 25% of the starting level. - ev_share = m.ev_share['Oahu', m.tp_period[tp]] if hasattr(m, 'ev_share') else 0.0 - rps_level = m.rps_target_for_period[m.tp_period[tp]] if hasattr(m, 'rps_target_for_period') else 0.0 - - if both_units_out or ( - (ev_share >= 0.75 or rps_level >= 0.75) and not m.options.run_kalaeloa_even_with_high_rps - ): - return Constraint.Skip - else: - return (sum(m.DispatchGen[g, tp] for g in m.KALAELOA_MAIN_UNITS) >= 75.0) - m.Kalaeloa_Must_Run = Constraint(m.KALAELOA_ACTIVE_TIMEPOINTS, rule=Kalaeloa_Must_Run_rule) diff --git a/switch_model/hawaii/lake_wilson.py b/switch_model/hawaii/lake_wilson.py index 2a45e496b..7e9dce7ef 100644 --- a/switch_model/hawaii/lake_wilson.py +++ b/switch_model/hawaii/lake_wilson.py @@ -5,23 +5,31 @@ from __future__ import division from pyomo.environ import * + def define_components(m): def rule(m): - g = 'Oahu_Lake_Wilson' + g = "Oahu_Lake_Wilson" inflow = 10.0 if g in m.GENERATION_PROJECTS: for t in m.TPS_FOR_GEN[g]: # assign new energy balance with extra inflow, and allow spilling m.Track_State_Of_Charge[g, t] = ( m.StateOfCharge[g, t] - <= - m.StateOfCharge[g, m.tp_previous[t]] - + (m.ChargeStorage[g, t] * m.gen_storage_efficiency[g] - - m.DispatchGen[g, t]) * m.tp_duration_hrs[t] + <= m.StateOfCharge[g, m.tp_previous[t]] + + ( + m.ChargeStorage[g, t] * m.gen_storage_efficiency[g] + - m.DispatchGen[g, t] + ) + * m.tp_duration_hrs[t] # allow inflow only if capacity is built - + inflow * m.tp_duration_hrs * m.GenCapacityInTP[g] / m.gen_unit_size[g] + + inflow + * m.tp_duration_hrs + * m.GenCapacityInTP[g] + / m.gen_unit_size[g] ) + m.Add_Lake_Wilson_Inflow = BuildAction(rule=rule) + # TODO: don't allow zero crossing when calculating reserves available # see http://www.ucdenver.edu/faculty-staff/dmays/3414/Documents/Antal-MS-2014.pdf diff --git a/switch_model/hawaii/lng_conversion.py b/switch_model/hawaii/lng_conversion.py index dc5af0916..4f6a7a733 100644 --- a/switch_model/hawaii/lng_conversion.py +++ b/switch_model/hawaii/lng_conversion.py @@ -10,10 +10,16 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor +from switch_model.utilities import unique_list def define_arguments(argparser): - argparser.add_argument('--force-lng-tier', nargs='*', default=None, - help="LNG tier to use: tier [start [stop]] or 'none' to use no LNG. Optimal choices will be made if nothing specified.") + argparser.add_argument( + "--force-lng-tier", + nargs="*", + default=None, + help="LNG tier to use: tier [start [stop]] or 'none' to use no LNG. Optimal choices will be made if nothing specified.", + ) + def define_components(m): @@ -25,25 +31,34 @@ def define_components(m): # (e.g., bringing in containerized LNG for all islands) m.LNG_RFM_SUPPLY_TIERS = Set( + dimen=3, initialize=m.RFM_SUPPLY_TIERS, - filter=lambda m, rfm, per, tier: m.rfm_fuel[rfm].upper() == 'LNG' + filter=lambda m, rfm, per, tier: m.rfm_fuel[rfm].upper() == "LNG", ) m.LNG_REGIONAL_FUEL_MARKETS = Set( - initialize=lambda m: {rfm for rfm, per, tier in m.LNG_RFM_SUPPLY_TIERS} + dimen=1, + initialize=lambda m: unique_list( + rfm for rfm, per, tier in m.LNG_RFM_SUPPLY_TIERS + ), ) m.LNG_TIERS = Set( - initialize=lambda m: {tier for rfm, per, tier in m.LNG_RFM_SUPPLY_TIERS} + dimen=1, + initialize=lambda m: unique_list( + tier for rfm, per, tier in m.LNG_RFM_SUPPLY_TIERS + ), ) # force LNG to be deactivated when RPS is 100%; # this forces recovery of all costs before the 100% RPS takes effect # (otherwise the model sometimes tries to postpone recovery beyond the end of the study) - if hasattr(m, 'RPS_Enforce'): - m.No_LNG_In_100_RPS = Constraint(m.LNG_RFM_SUPPLY_TIERS, - rule=lambda m, rfm, per, tier: - (m.RFMSupplyTierActivate[rfm, per, tier] == 0) - if m.rps_target_for_period[per] >= 1.0 - else Constraint.Skip + if hasattr(m, "RPS_Enforce"): + m.No_LNG_In_100_RPS = Constraint( + m.LNG_RFM_SUPPLY_TIERS, + rule=lambda m, rfm, per, tier: ( + m.RFMSupplyTierActivate[rfm, per, tier] == 0 + ) + if m.rps_target_for_period[per] >= 1.0 + else Constraint.Skip, ) # user can study different LNG durations by specifying a tier to activate and @@ -78,13 +93,23 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # user specified a tier to activate and possibly a date range # force that active and deactivate all others. force_tier = m.options.force_lng_tier[0] - force_tier_start = float(m.options.force_lng_tier[1]) if len(m.options.force_lng_tier) > 1 else m.PERIODS.first() - force_tier_end = float(m.options.force_lng_tier[2]) if len(m.options.force_lng_tier) > 2 else m.PERIODS.last() - if force_tier.lower() == 'none': + force_tier_start = ( + float(m.options.force_lng_tier[1]) + if len(m.options.force_lng_tier) > 1 + else m.PERIODS.first() + ) + force_tier_end = ( + float(m.options.force_lng_tier[2]) + if len(m.options.force_lng_tier) > 2 + else m.PERIODS.last() + ) + if force_tier.lower() == "none": action = 0 elif force_tier not in m.LNG_TIERS: raise ValueError( - "--force-lng-tier argument '{}' does not match any LNG market tier.".format(force_tier) + "--force-lng-tier argument '{}' does not match any LNG market tier.".format( + force_tier + ) ) elif tier == force_tier and force_tier_start <= per <= force_tier_end: # force tier on @@ -98,16 +123,25 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): result = action else: if m.options.verbose: - print("{} activation of tier {}.".format('Forcing' if action else 'Blocking', (rfm, per, tier))) - result = (m.RFMSupplyTierActivate[rfm, per, tier] == action) + print( + "{} activation of tier {}.".format( + "Forcing" if action else "Blocking", (rfm, per, tier) + ) + ) + result = m.RFMSupplyTierActivate[rfm, per, tier] == action return result - m.Force_LNG_Tier = Constraint(m.LNG_RFM_SUPPLY_TIERS, rule=Force_LNG_Tier_rule) + m.Force_LNG_Tier = Constraint(m.LNG_RFM_SUPPLY_TIERS, rule=Force_LNG_Tier_rule) # list of all projects and timepoints when LNG could potentially be used - m.LNG_GEN_TIMEPOINTS = Set(dimen=2, initialize = lambda m: - ((p, t) for p in m.GENS_BY_FUEL['LNG'] for t in m.TIMEPOINTS - if (p, t) in m.GEN_TPS) + m.LNG_GEN_TIMEPOINTS = Set( + dimen=2, + initialize=lambda m: ( + (p, t) + for p in m.GENS_BY_FUEL["LNG"] + for t in m.TIMEPOINTS + if (p, t) in m.GEN_TPS + ), ) # HECO PSIP 2016-04 has only Kahe 5, Kahe 6, Kalaeloa and CC_383 burning LNG, @@ -119,17 +153,24 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # LNG if we didn't explicitly do the conversions; however, now the conversion costs # are included in the LNG supply tiers, so we don't need to worry about that. m.LNG_CONVERTED_PLANTS = Set( + dimen=1, initialize=[ - 'Oahu_Kahe_K5', 'Oahu_Kahe_K6', - 'Oahu_Kalaeloa_CC1_CC2', # used in some older models - 'Oahu_Kalaeloa_CC1', 'Oahu_Kalaeloa_CC2', 'Oahu_Kalaeloa_CC3', - 'Oahu_CC_383', 'Oahu_CC_152', 'Oahu_CT_100' - ] + "Oahu_Kahe_K5", + "Oahu_Kahe_K6", + "Oahu_Kalaeloa_CC1_CC2", # used in some older models + "Oahu_Kalaeloa_CC1", + "Oahu_Kalaeloa_CC2", + "Oahu_Kalaeloa_CC3", + "Oahu_CC_383", + "Oahu_CC_152", + "Oahu_CT_100", + ], ) - m.LNG_In_Converted_Plants_Only = Constraint(m.LNG_GEN_TIMEPOINTS, - rule=lambda m, g, tp: - Constraint.Skip if g in m.LNG_CONVERTED_PLANTS - else (m.GenFuelUseRate[g, tp, 'LNG'] == 0) + m.LNG_In_Converted_Plants_Only = Constraint( + m.LNG_GEN_TIMEPOINTS, + rule=lambda m, g, tp: Constraint.Skip + if g in m.LNG_CONVERTED_PLANTS + else (m.GenFuelUseRate[g, tp, "LNG"] == 0), ) # CODE BELOW IS DISABLED because we have abandoned the 'container' tier which cost @@ -192,7 +233,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # for f in m.FUELS_FOR_GEN[g] # if f != 'LNG' # ) - # rfm = m.zone_rfm[m.gen_load_zone[g], 'LNG'] + # rfm = m.zone_fuel_rfm[m.gen_load_zone[g], 'LNG'] # lng_market_exhausted = 1 - m.LNG_Has_Slack[rfm, m.tp_period[tp]] # return (non_lng_fuel <= big_project_lng * lng_market_exhausted) # m.Only_LNG_In_Converted_Plants = Constraint( @@ -217,7 +258,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # if g in m.LNG_CONVERTED_PLANTS: # return Constraint.Skip # # otherwise force production up to the maximum if market has slack - # rfm = m.zone_rfm[m.gen_load_zone[g], 'LNG'] + # rfm = m.zone_fuel_rfm[m.gen_load_zone[g], 'LNG'] # lng_market_exhausted = 1 - m.LNG_Has_Slack[rfm, m.tp_period[tp]] # rule = ( # m.DispatchGen[g, tp] diff --git a/switch_model/hawaii/no_central_pv.py b/switch_model/hawaii/no_central_pv.py index 776210844..7d01739f8 100644 --- a/switch_model/hawaii/no_central_pv.py +++ b/switch_model/hawaii/no_central_pv.py @@ -1,17 +1,18 @@ from pyomo.environ import * + def define_components(m): """ prevent construction of any new central PV projects """ # TODO: put these in a data file and share them between rps.py and no_renewables.py - renewable_energy_technologies = ['CentralPV', 'CentralTrackingPV'] + renewable_energy_technologies = ["CentralPV", "CentralTrackingPV"] def No_CentralPV_rule(m, g, bld_yr): if m.gen_tech[g] in renewable_energy_technologies: return m.BuildGen[g, bld_yr] == 0 else: return Constraint.Skip - m.No_CentralPV = Constraint(m.NEW_GEN_BLD_YRS, rule=No_CentralPV_rule) + m.No_CentralPV = Constraint(m.NEW_GEN_BLD_YRS, rule=No_CentralPV_rule) diff --git a/switch_model/hawaii/no_onshore_wind.py b/switch_model/hawaii/no_onshore_wind.py index 29ad7ae7c..156913502 100644 --- a/switch_model/hawaii/no_onshore_wind.py +++ b/switch_model/hawaii/no_onshore_wind.py @@ -1,13 +1,15 @@ from pyomo.environ import * + def define_components(m): """ prevent construction of new onshore wind projects """ + def No_Onshore_Wind_rule(m, g, bld_yr): - if m.gen_tech[g] == 'OnshoreWind': + if m.gen_tech[g] == "OnshoreWind": return m.BuildGen[g, bld_yr] == 0 else: return Constraint.Skip - m.No_Onshore_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Onshore_Wind_rule) + m.No_Onshore_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Onshore_Wind_rule) diff --git a/switch_model/hawaii/no_renewables.py b/switch_model/hawaii/no_renewables.py index 89af17b13..c1181da9f 100644 --- a/switch_model/hawaii/no_renewables.py +++ b/switch_model/hawaii/no_renewables.py @@ -5,20 +5,18 @@ def define_components(m): - """ - - """ + """ """ ################### # prevent construction of any new renewable projects (useful for "business as usual" baseline) ################## # TODO: put these in a data file and share them between rps.py and no_renewables.py - renewable_energy_sources = ['WND', 'SUN', 'Biocrude', 'Biodiesel', 'MLG'] + renewable_energy_sources = ["WND", "SUN", "Biocrude", "Biodiesel", "MLG"] def No_Renewables_rule(m, g, bld_yr): if m.g_energy_source[m.gen_tech[g]] in renewable_energy_sources: return m.BuildGen[g, bld_yr] == 0 else: return Constraint.Skip - m.No_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Renewables_rule) + m.No_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Renewables_rule) diff --git a/switch_model/hawaii/no_wind.py b/switch_model/hawaii/no_wind.py index 53f6f15d4..8ead0657b 100644 --- a/switch_model/hawaii/no_wind.py +++ b/switch_model/hawaii/no_wind.py @@ -10,12 +10,12 @@ def define_components(m): """ # TODO: put these in a data file and share them between rps.py and no_renewables.py - renewable_energy_sources = ['WND'] + renewable_energy_sources = ["WND"] def No_Wind_rule(m, g, bld_yr): if m.g_energy_source[m.gen_tech[g]] in renewable_energy_sources: return m.BuildGen[g, bld_yr] == 0 else: return Constraint.Skip - m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Wind_rule) + m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Wind_rule) diff --git a/switch_model/hawaii/oahu_plants.py b/switch_model/hawaii/oahu_plants.py new file mode 100644 index 000000000..8bf09ea6f --- /dev/null +++ b/switch_model/hawaii/oahu_plants.py @@ -0,0 +1,230 @@ +"""Special operating rules for individual plants on Oahu.""" + +import os +from pyomo.environ import * +from switch_model.utilities import unique_list + + +def define_arguments(argparser): + argparser.add_argument( + "--run-kalaeloa-even-with-high-rps", + action="store_true", + default=False, + help="Enforce the 75 MW minimum-output rule for Kalaeloa in all years (otherwise relaxed " + "if RPS or EV share >= 75%%). Mimics behavior from switch 2.0.0b2.", + ) + + +def define_components(m): + refineries_closed(m) + kalaeloa(m) + schofield(m) + cogen(m) + + +def refineries_closed(m): + """ + Define the REFINERIES_CLOSED_TPS set, which identifies timepoints when + oil refineries are assumed to be closed. + + In 2018, fossil fuel consumption was roughly 1M barrels for various + taxable uses, 420k barrels for utility, and maybe 500k barrels for + non-utility electricity production (Kalaeloa)? (It looks like jet + kerosene was brought in directly.) There are two refineries that split + the crude oil into LSFO, gasoline and other products. These are co-products, + so it's probably not cost-effective to keep running any refinery with the + same amount of steam if the demand for either product drops too far. + We shut these down if fossil fuel is used for less than 25% of total power + or vehicles. (Maybe 50% would be better?) + """ + + def filter(m, tp): + ev_share = ( + m.ev_share["Oahu", m.tp_period[tp]] if hasattr(m, "ev_share") else 0.0 + ) + rps_level = ( + m.rps_target_for_period[m.tp_period[tp]] + if hasattr(m, "rps_target_for_period") + else 0.0 + ) + return ev_share >= 0.75 or rps_level >= 0.75 + + m.REFINERIES_CLOSED_TPS = Set(dimen=1, initialize=m.TIMEPOINTS, filter=filter) + + +def kalaeloa(m): + """Special dispatch/commitment rules for Kalaeloa plant.""" + # force Kalaeloa_CC3 offline unless 1&2 are at max (per John Cole e-mail 9/28/16) + + # by inspection of figure 8 & 9 in the RPS Study, it appears that Kalaeloa has 3 modes: + # commit unit 1, run between 65 and 90 MW + # commit units 1 & 2, run each between 65 and 90 MW + # run both 1 & 2 at 90 MW, and run 3 at 28 MW + + m.KALAELOA_MAIN_UNITS = Set( + dimen=1, + initialize=[ + "Oahu_Kalaeloa_CC1", + "Oahu_Kalaeloa_CC2", + "Kalaeloa_CC1", + "Kalaeloa_CC2", + ], + filter=lambda m, g: g in m.GENERATION_PROJECTS, + ) + m.KALAELOA_DUCT_BURNERS = Set( + dimen=1, + initialize=["Oahu_Kalaeloa_CC3", "Kalaeloa_CC3"], + filter=lambda m, g: g in m.GENERATION_PROJECTS, + ) + + m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS = Set( + dimen=2, + initialize=lambda m: ( + (g, tp) for g in m.KALAELOA_MAIN_UNITS for tp in m.TPS_FOR_GEN[g] + ), + ) + m.KALAELOA_DUCT_BURNER_DISPATCH_POINTS = Set( + dimen=2, + initialize=lambda m: ( + (g, tp) for g in m.KALAELOA_DUCT_BURNERS for tp in m.TPS_FOR_GEN[g] + ), + ) + m.KALAELOA_ACTIVE_TIMEPOINTS = Set( + dimen=1, + initialize=lambda m: unique_list( + tp for g, tp in m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS + ), + ) + + # run kalaeloa at full power or not + # (if linearized, this is the fraction of capacity that is dispatched) + m.RunKalaeloaUnitFull = Var(m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, within=Binary) + + m.Run_Kalaeloa_Unit_Full_Enforce = Constraint( # big-m constraint + m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, + rule=lambda m, g, tp: m.DispatchGen[g, tp] + + (1 - m.RunKalaeloaUnitFull[g, tp]) * m.gen_capacity_limit_mw[g] + >= m.GenCapacityInTP[g, tp] * m.gen_availability[g], + ) + + # only run duct burner if all main units are full-on + m.Run_Kalaeloa_Duct_Burner_Only_When_Full = Constraint( + m.KALAELOA_DUCT_BURNER_DISPATCH_POINTS, + m.KALAELOA_MAIN_UNITS, + rule=lambda m, g_duct, tp, g_main: m.DispatchGen[g_duct, tp] + <= m.RunKalaeloaUnitFull[g_main, tp] * m.gen_capacity_limit_mw[g_duct], + ) + + # force at least one Kalaeloa unit to run at full power at all times + # (actually 75 MW, based on fig 9 of RPS Study) + # unless they are both on maintenance outage (per John Cole e-mail 9/28/16) + def Kalaeloa_Must_Run_rule(m, tp): + try: + both_units_out = ( + sum(m.gen_max_commit_fraction[g, tp] for g in m.KALAELOA_MAIN_UNITS) + == 0 + ) + except AttributeError: + both_units_out = False + + # We assume that Kalaeloa's must-run rule applies only until the + # refineries close, as specified in the m.oahu_refineries_closed parameter + if both_units_out or ( + tp in m.REFINERIES_CLOSED_TPS + and not m.options.run_kalaeloa_even_with_high_rps + ): + return Constraint.Skip + else: + return sum(m.DispatchGen[g, tp] for g in m.KALAELOA_MAIN_UNITS) >= 75.0 + + m.Kalaeloa_Must_Run = Constraint( + m.KALAELOA_ACTIVE_TIMEPOINTS, rule=Kalaeloa_Must_Run_rule + ) + + +def schofield(m): + """ + Require Schofield to run on at least 50% biodiesel (as required by Army). We + generalize that to 50% renewable fuel. + See https://www.power-eng.com/2017/08/21/schofield-generating-station-highlights-value-of-reciprocating-engines/ + and pp. 18-19 of https://dms.puc.hawaii.gov/dms/DocumentViewer?pid=A1001001A15I30B50504F50301 + and https://www.govtech.com/fs/Power-Plant-in-Hawaii-to-Run-Partly-on-Biofuel.html + """ + + m.SCHOFIELD_GENS = Set( + dimen=1, + initialize=m.GENERATION_PROJECTS, + filter=lambda m, g: "schofield" in g.lower(), + ) + m.One_Schofield = BuildCheck(rule=lambda m: len(m.SCHOFIELD_GENS) == 1) + + if not hasattr(m, "f_rps_eligible"): + raise RuntimeError( + "The {} module requires the hawaii.rps module.".format(__name__) + ) + + def rule(m, g, t): + if (g, t) not in m.GEN_TPS: + return Constraint.Skip # beyond retirement date + all_fuel = sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) + renewable_fuel = sum( + m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] + ) + return renewable_fuel >= 0.5 * all_fuel + + m.Schofield_50_Percent_Renewable = Constraint( + m.SCHOFIELD_GENS, m.TIMEPOINTS, rule=rule + ) + + +def cogen(m): + """ + Shutdown small cogen plants when refineries are closed. + Don't burn biodiesel in cogen plants. + """ + m.REFINERY_GENS = Set( + dimen=1, + initialize=m.GENERATION_PROJECTS, + filter=lambda m, g: any(rg in g for rg in ["Hawaii_Cogen", "Tesoro_Hawaii"]), + ) + m.Two_Refinery_Gens = BuildCheck(rule=lambda m: len(m.REFINERY_GENS) == 2) + + # relax commitment requirement when refineries are closed + def rule(m, g, tp): + if (g, tp) in m.Enforce_Commit_Lower_Limit: + print("relaxing commitment for {}, {}".format(g, tp)) + m.Enforce_Commit_Lower_Limit[g, tp].deactivate() + + m.Relax_Refinery_Cogen_Baseload_Constraint = BuildAction( + m.REFINERY_GENS, m.REFINERIES_CLOSED_TPS, rule=rule + ) + # force 0 production when refineries are closed + def rule(m, g, t): + if (g, t) not in m.GEN_TPS: + return Constraint.Skip # beyond retirement date + else: + return m.DispatchGen[g, tp] == 0 + + m.Shutdown_Refinery_Cogens = Constraint( + m.REFINERY_GENS, m.REFINERIES_CLOSED_TPS, rule=rule + ) + + m.REFINERY_BIOFUELS = Set( + dimen=1, + initialize=lambda m: unique_list( + f + for g in m.REFINERY_GENS + for f in m.FUELS_FOR_GEN[g] + if m.f_rps_eligible[f] + ), + ) + # don't burn biofuels in cogen plants + def rule(m, g, t, f): + if (g, t, f) not in m.GenFuelUseRate: + return Constraint.Skip # beyond retirement date or wrong fuel + else: + return m.GenFuelUseRate[g, t, f] == 0.0 + + m.Cogen_No_Biofuel = Constraint( + m.REFINERY_GENS, m.TIMEPOINTS, m.REFINERY_BIOFUELS, rule=rule + ) diff --git a/switch_model/hawaii/psip_2016_04.py b/switch_model/hawaii/psip_2016_04.py index 2d4e6731c..4ea850db6 100644 --- a/switch_model/hawaii/psip_2016_04.py +++ b/switch_model/hawaii/psip_2016_04.py @@ -3,15 +3,33 @@ import os from pyomo.environ import * + def define_arguments(argparser): - argparser.add_argument('--psip-force', action='store_true', default=True, - help="Force following of PSIP plans (retiring AES and building certain technologies).") - argparser.add_argument('--psip-relax', dest='psip_force', action='store_false', - help="Relax PSIP plans, to find a more optimal strategy.") - argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False, - help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).") - argparser.add_argument('--force-build', nargs=3, default=None, - help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") + argparser.add_argument( + "--psip-force", + action="store_true", + default=True, + help="Force following of PSIP plans (retiring AES and building certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + def define_components(m): ################### @@ -21,7 +39,7 @@ def define_components(m): # decide whether to enforce the PSIP preferred plan # if an environment variable is set, that takes precedence # (e.g., on a cluster to override options.txt) - psip_env_var = os.environ.get('USE_PSIP_PLAN') + psip_env_var = os.environ.get("USE_PSIP_PLAN") if psip_env_var is None: # no environment variable; use the --psip-relax flag psip = m.options.psip_force @@ -30,7 +48,11 @@ def define_components(m): elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: psip = False else: - raise ValueError('Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)'.format(psip_env_var)) + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) if psip: print("Using PSIP construction plan.") @@ -51,63 +73,71 @@ def define_components(m): # are underway and military projects are being built for their own # reasons) technology_targets_definite = [ - (2016, 'CentralTrackingPV', 27.6), # Waianae Solar by Eurus Energy America - (2018, 'IC_Schofield', 54.0), - (2018, 'IC_Barge', 100.0), # JBPHH plant - (2021, 'IC_MCBH', 27.0), + (2016, "CentralTrackingPV", 27.6), # Waianae Solar by Eurus Energy America + (2018, "IC_Schofield", 54.0), + (2018, "IC_Barge", 100.0), # JBPHH plant + (2021, "IC_MCBH", 27.0), # Distributed PV from Figure J-19 - (2016, 'DistPV', 443.993468266547 - 210), # net of 210 MW of pre-existing DistPV - (2017, 'DistPV', 92.751756737742), - (2018, 'DistPV', 27.278236032368), - (2019, 'DistPV', 26.188129564885), + ( + 2016, + "DistPV", + 443.993468266547 - 210, + ), # net of 210 MW of pre-existing DistPV + (2017, "DistPV", 92.751756737742), + (2018, "DistPV", 27.278236032368), + (2019, "DistPV", 26.188129564885), ] # technologies proposed in PSIP but which may not be built if a # better plan is found technology_targets_psip = [ - (2018, 'OnshoreWind', 24), # NPM wind - (2018, 'CentralTrackingPV', 109.6), # replacement for canceled SunEdison projects - (2018, 'OnshoreWind', 10), # CBRE wind - (2018, 'CentralTrackingPV', 15), # CBRE PV - (2020, 'OnshoreWind', 30), - (2020, 'CentralTrackingPV', 60), - (2021, 'CC_383', 383.0), - (2030, 'CentralTrackingPV', 100), - (2030, 'OffshoreWind', 200), - (2040, 'CentralTrackingPV', 200), - (2040, 'OffshoreWind', 200), - (2045, 'CentralTrackingPV', 300), - (2045, 'OffshoreWind', 400), - (2020, 'DistPV', 21.8245069017911), - (2021, 'DistPV', 15.27427771741), - (2022, 'DistPV', 12.0039583149589), - (2023, 'DistPV', 10.910655054315), - (2024, 'DistPV', 10.913851847475), - (2025, 'DistPV', 10.910655054316), - (2026, 'DistPV', 9.82054858683205), - (2027, 'DistPV', 10.910655054316), - (2028, 'DistPV', 10.910655054315), - (2029, 'DistPV', 14.1873680430859), - (2030, 'DistPV', 9.82054858683205), - (2031, 'DistPV', 10.913851847475), - (2032, 'DistPV', 9.82054858683193), - (2033, 'DistPV', 14.1841712499261), - (2034, 'DistPV', 7.64033565186492), - (2035, 'DistPV', 13.094064782442), - (2036, 'DistPV', 9.82054858683205), - (2037, 'DistPV', 10.9202454337949), - (2038, 'DistPV', 9.66989970917803), - (2039, 'DistPV', 12.1514103994531), - (2040, 'DistPV', 12.2397218104919), - (2041, 'DistPV', 11.7673956211361), - (2042, 'DistPV', 10.9106550543149), - (2043, 'DistPV', 9.82054858683205), - (2044, 'DistPV', 15.27747451057), - (2045, 'DistPV', 10.291675978754), + (2018, "OnshoreWind", 24), # NPM wind + ( + 2018, + "CentralTrackingPV", + 109.6, + ), # replacement for canceled SunEdison projects + (2018, "OnshoreWind", 10), # CBRE wind + (2018, "CentralTrackingPV", 15), # CBRE PV + (2020, "OnshoreWind", 30), + (2020, "CentralTrackingPV", 60), + (2021, "CC_383", 383.0), + (2030, "CentralTrackingPV", 100), + (2030, "OffshoreWind", 200), + (2040, "CentralTrackingPV", 200), + (2040, "OffshoreWind", 200), + (2045, "CentralTrackingPV", 300), + (2045, "OffshoreWind", 400), + (2020, "DistPV", 21.8245069017911), + (2021, "DistPV", 15.27427771741), + (2022, "DistPV", 12.0039583149589), + (2023, "DistPV", 10.910655054315), + (2024, "DistPV", 10.913851847475), + (2025, "DistPV", 10.910655054316), + (2026, "DistPV", 9.82054858683205), + (2027, "DistPV", 10.910655054316), + (2028, "DistPV", 10.910655054315), + (2029, "DistPV", 14.1873680430859), + (2030, "DistPV", 9.82054858683205), + (2031, "DistPV", 10.913851847475), + (2032, "DistPV", 9.82054858683193), + (2033, "DistPV", 14.1841712499261), + (2034, "DistPV", 7.64033565186492), + (2035, "DistPV", 13.094064782442), + (2036, "DistPV", 9.82054858683205), + (2037, "DistPV", 10.9202454337949), + (2038, "DistPV", 9.66989970917803), + (2039, "DistPV", 12.1514103994531), + (2040, "DistPV", 12.2397218104919), + (2041, "DistPV", 11.7673956211361), + (2042, "DistPV", 10.9106550543149), + (2043, "DistPV", 9.82054858683205), + (2044, "DistPV", 15.27747451057), + (2045, "DistPV", 10.291675978754), ] if m.options.force_build is not None: b = list(m.options.force_build) - b[0] = int(b[0]) # year + b[0] = int(b[0]) # year b[2] = float(b[2]) # quantity b = tuple(b) print("Forcing build: {}".format(b)) @@ -123,11 +153,18 @@ def technology_target_init(m, per, tech): start = 2000 if per == m.PERIODS.first() else per end = per + m.period_length_years[per] target = sum( - mw for (tyear, ttech, mw) in technology_targets - if ttech == tech and start <= tyear and tyear < end + mw + for (tyear, ttech, mw) in technology_targets + if ttech == tech and start <= tyear and tyear < end ) return target - m.technology_target = Param(m.PERIODS, m.GENERATION_TECHNOLOGIES, initialize=technology_target_init) + + m.technology_target = Param( + m.PERIODS, + m.GENERATION_TECHNOLOGIES, + within=Reals, + initialize=technology_target_init, + ) # with PSIP: BuildGen is zero except for technology_targets # (sum during each period or before first period) @@ -142,7 +179,9 @@ def adjust_psip_credit(g, target): # needed to exactly meet the target. # This is needed because some of the targets are based on # nominal unit sizes rather than actual max output. - return (target / m.gen_unit_size[g]) / round(target / m.gen_unit_size[g]) + return (target / m.gen_unit_size[g]) / round( + target / m.gen_unit_size[g] + ) else: return 1.0 @@ -164,52 +203,67 @@ def adjust_psip_credit(g, target): ) return Constraint.Infeasible elif psip: - return (build == target) - elif m.options.psip_minimal_renewables and any(txt in tech for txt in ["PV", "Wind", "Solar"]): + return build == target + elif m.options.psip_minimal_renewables and any( + txt in tech for txt in ["PV", "Wind", "Solar"] + ): # only build the specified amount of renewables, no more - return (build == target) + return build == target else: # treat the target as a lower bound - return (build >= target) + return build >= target + m.Enforce_Technology_Target = Constraint( m.PERIODS, m.GENERATION_TECHNOLOGIES, rule=Enforce_Technology_Target_rule ) - aes_g = 'Oahu_AES' + aes_g = "Oahu_AES" aes_size = 180 aes_bld_year = 1992 - m.AES_OPERABLE_PERIODS = Set(initialize = lambda m: - m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] + m.AES_OPERABLE_PERIODS = Set( + dimen=1, initialize=lambda m: m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] ) m.OperateAES = Var(m.AES_OPERABLE_PERIODS, within=Binary) - m.Enforce_AES_Deactivate = Constraint(m.TIMEPOINTS, rule=lambda m, tp: - Constraint.Skip if (aes_g, tp) not in m.GEN_TPS - else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size) + m.Enforce_AES_Deactivate = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: Constraint.Skip + if (aes_g, tp) not in m.GEN_TPS + else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size), ) - m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per: - 0.0 if per not in m.AES_OPERABLE_PERIODS - else - m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year] + m.AESDeactivateFixedCost = Expression( + m.PERIODS, + rule=lambda m, per: 0.0 + if per not in m.AES_OPERABLE_PERIODS + else -m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year], ) - m.Cost_Components_Per_Period.append('AESDeactivateFixedCost') + m.Cost_Components_Per_Period.append("AESDeactivateFixedCost") if psip: # keep AES active until 2022 or just before; deactivate after that - m.PSIP_Retire_AES = Constraint(m.AES_OPERABLE_PERIODS, rule=lambda m, per: - (m.OperateAES[per] == 1) if per + m.period_length_years[per] <= 2022 - else (m.OperateAES[per] == 0) + m.PSIP_Retire_AES = Constraint( + m.AES_OPERABLE_PERIODS, + rule=lambda m, per: (m.OperateAES[per] == 1) + if per + m.period_length_years[per] <= 2022 + else (m.OperateAES[per] == 0), ) # before 2040: no biodiesel, and only 100-300 GWh of non-LNG fossil fuels # period including 2040-2045: <= 300 GWh of oil; unlimited biodiesel or LNG # no biodiesel before 2040 (then phased in fast enough to meet the RPS) - m.EARLY_BIODIESEL_MARKETS = Set(dimen=2, initialize=lambda m: [ - (rfm, per) - for per in m.PERIODS if per + m.period_length_years[per] <= 2040 - for rfm in m.REGIONAL_FUEL_MARKETS if m.rfm_fuel == 'Biodiesel' - ]) - m.NoEarlyBiodiesel = Constraint(m.EARLY_BIODIESEL_MARKETS, rule=lambda m, rfm, per: - m.FuelConsumptionInMarket[rfm, per] == 0 + m.EARLY_BIODIESEL_MARKETS = Set( + dimen=2, + initialize=lambda m: [ + (rfm, per) + for per in m.PERIODS + if per + m.period_length_years[per] <= 2040 + for rfm in m.REGIONAL_FUEL_MARKETS + if m.rfm_fuel == "Biodiesel" + ], + ) + m.NoEarlyBiodiesel = Constraint( + m.EARLY_BIODIESEL_MARKETS, + rule=lambda m, rfm, per: m.FuelConsumptionInMarket[rfm, per] == 0, ) # # 100-300 GWh of non-LNG fuels in 2021-2040 (based on 2016-04 PSIP fig. 5-5) @@ -268,18 +322,27 @@ def adjust_psip_credit(g, target): # don't allow construction of any advanced technologies (e.g., batteries, pumped hydro, fuel cells) advanced_tech_vars = [ "BuildBattery", - "BuildPumpedHydroMW", "BuildAnyPumpedHydro", - "BuildElectrolyzerMW", "BuildLiquifierKgPerHour", "BuildLiquidHydrogenTankKg", + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", "BuildFuelCellMW", ] + def no_advanced_tech_rule_factory(v): return lambda m, *k: (getattr(m, v)[k] == 0) + for v in advanced_tech_vars: try: var = getattr(m, v) - setattr(m, "PSIP_No_"+v, Constraint(var._index, rule=no_advanced_tech_rule_factory(v))) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) except AttributeError: - pass # model doesn't have this var + pass # model doesn't have this var # # don't allow any changes to the fuel market, including bulk LNG # # not used now; use "--force-lng-tier container" instead diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py index 38104884d..5333b3092 100644 --- a/switch_model/hawaii/psip_2016_12.py +++ b/switch_model/hawaii/psip_2016_12.py @@ -5,25 +5,51 @@ import os from pyomo.environ import * + def TODO(note): raise NotImplementedError(dedent(note)) + def define_arguments(argparser): - argparser.add_argument('--psip-force', action='store_true', default=True, - help="Force following of PSIP plans (retiring AES and building certain technologies).") - argparser.add_argument('--psip-relax', dest='psip_force', action='store_false', - help="Relax PSIP plans, to find a more optimal strategy.") - argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False, - help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).") - argparser.add_argument('--force-build', nargs=3, default=None, - help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") - argparser.add_argument('--psip-relax-after', type=float, default=None, - help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.") + argparser.add_argument( + "--psip-force", + action="store_true", + default=True, + help="Force following of PSIP plans (retiring AES and building certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + argparser.add_argument( + "--psip-relax-after", + type=float, + default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.", + ) + def is_renewable(tech): return any(txt in tech for txt in ("PV", "Wind", "Solar")) + + def is_battery(tech): - return 'battery' in tech.lower() + return "battery" in tech.lower() + def define_components(m): ################### @@ -33,7 +59,7 @@ def define_components(m): # decide whether to enforce the PSIP preferred plan # if an environment variable is set, that takes precedence # (e.g., on a cluster to override options.txt) - psip_env_var = os.environ.get('USE_PSIP_PLAN') + psip_env_var = os.environ.get("USE_PSIP_PLAN") if psip_env_var is None: # no environment variable; use the --psip-relax flag psip = m.options.psip_force @@ -42,7 +68,11 @@ def define_components(m): elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: psip = False else: - raise ValueError('Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)'.format(psip_env_var)) + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) if m.options.verbose: if psip: @@ -108,7 +138,7 @@ def define_components(m): # add targets specified on the command line if m.options.force_build is not None: b = list(m.options.force_build) - b[0] = int(b[0]) # year + b[0] = int(b[0]) # year b[2] = float(b[2]) # quantity b = tuple(b) print("Forcing build: {}".format(b)) @@ -131,38 +161,36 @@ def define_components(m): # Not clear why it picked offshore before onshore (maybe bad resource profiles?). But # in their final plan (table 4-1), HECO changed it to 200 MW offshore in 2025 # (presumably rebuilt in 2045) and 30 MW onshore in 2045. - (2018, 'OnshoreWind', 24), # Na Pua Makani (NPM) wind - (2018, 'OnshoreWind', 10), # CBRE wind + (2018, "OnshoreWind", 24), # Na Pua Makani (NPM) wind + (2018, "OnshoreWind", 10), # CBRE wind # note: 109.6 MW SunEdison replacements are in Existing Plants workbook. - # note: RESOLVE had 53.6 MW of planned PV, which is probably Waianae (27.6), Kalaeloa (5) # and West Loch (20). Then it added these (table 3-1): 2020: 300 MW (capped, see "renewable_limits.tab"), # 2022: 48 MW, 2025: 193 MW, 2040: 541 (incl. 300 MW rebuild), 2045: 1400 MW (incl. 241 MW rebuild). # HECO converted this to 109.6 MW of replacement SunEdison waiver projects in 2018 # (we list those as "existing") and other additions shown below. - (2018, 'CentralTrackingPV', 15), # CBRE PV - (2020, 'CentralTrackingPV', 180), - (2022, 'CentralTrackingPV', 40), - (2022, 'IC_Barge', 100.0), # JBPHH plant + (2018, "CentralTrackingPV", 15), # CBRE PV + (2020, "CentralTrackingPV", 180), + (2022, "CentralTrackingPV", 40), + (2022, "IC_Barge", 100.0), # JBPHH plant # note: we moved IC_MCBH one year earlier than PSIP to reduce infeasibility in 2022 - (2022, 'IC_MCBH', 54.0), - (2025, 'CentralTrackingPV', 200), - (2025, 'OffshoreWind', 200), - (2040, 'CentralTrackingPV', 280), - (2045, 'CentralTrackingPV', 1180), - (2045, 'IC_MCBH', 68.0), # proxy for 68 MW of generic ICE capacity - + (2022, "IC_MCBH", 54.0), + (2025, "CentralTrackingPV", 200), + (2025, "OffshoreWind", 200), + (2040, "CentralTrackingPV", 280), + (2045, "CentralTrackingPV", 1180), + (2045, "IC_MCBH", 68.0), # proxy for 68 MW of generic ICE capacity # batteries (MW) # from PSIP 2016-12-23 Table 4-1; also see energy ("capacity") and power files in # "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Battery" # (note: we mistakenly treated these as MWh quantities instead of MW before 2018-02-20) - (2019, 'Battery_Conting', 90), - (2022, 'Battery_4', 426), - (2025, 'Battery_4', 29), - (2030, 'Battery_4', 165), - (2035, 'Battery_4', 168), - (2040, 'Battery_4', 420), - (2045, 'Battery_4', 1525), + (2019, "Battery_Conting", 90), + (2022, "Battery_4", 426), + (2025, "Battery_4", 29), + (2030, "Battery_4", 165), + (2035, "Battery_4", 168), + (2040, "Battery_4", 420), + (2045, "Battery_4", 1525), # RESOLVE modeled 4-hour batteries as being capable of providing reserves, # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). @@ -177,19 +205,76 @@ def define_components(m): # (for all islands). # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. - # installations based on changes in installed capacity shown in - # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/planned_installed_capacities.tab + # /s/data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/planned_installed_capacities.tab # Also see Figure J-10 of 2016-12-23 PSIP (Vol. 3), which matches these levels (excluding FIT(?)). # Note: code further below adds in reconstruction of early installations - (2020, "DistPV", 606.3-444), # net of 444 installed as of 2016 (in existing generators workbook) - (2022, "DistPV", 680.3-606.3), - (2025, "DistPV", 744.9-680.3), - (2030, "DistPV", 868.7-744.9), - (2035, "DistPV", 1015.4-868.7), - (2040, "DistPV", 1163.4-1015.4), - (2045, "DistPV", 1307.9-1163.4), + ( + 2020, + "DistPV", + 606.3 - 444, + ), # net of 444 installed as of 2016 (in existing generators workbook) + (2022, "DistPV", 680.3 - 606.3), + (2025, "DistPV", 744.9 - 680.3), + (2030, "DistPV", 868.7 - 744.9), + (2035, "DistPV", 1015.4 - 868.7), + (2040, "DistPV", 1163.4 - 1015.4), + (2045, "DistPV", 1307.9 - 1163.4), ] + TODO( + """ + Need to convert DistPV target into a joint target for FlatDistPV and + SlopedDistPV. See switch_model.heco_outlook_2019. + """ + ) + + """ + Additional notes on distributed storage (never implemented here, but + implemented using a later forecast in heco_outlook_2019). + + # NOTE: we add together all the different distributed PV programs in + # Figure J-10, on the assumption that private systems (including those + # on self-supply tariffs) will only be curtailed at times when the whole + # system is curtailed, so there's no need to model different private + # curtailment behavior. This is equivalent to assuming that HECO + # eventually offers some program to accept power from CSS and SIA + # systems when the system can use it, instead of forcing curtailment at + # those times. + + # NOTE: It is unclear from PSIP (p. J-25) whether the forecasted "New Grid + # Export" program in Fig. J-10 corresponds to the "CGS+" tariff (can + # export during day or the "Smart Export" tariff (can only export at + # night); both were introduced in late 2017 + # https://www.hawaiianelectric.com/documents/products_and_services/customer_renewable_programs/20171020_hawaii_PUC_rooftop_solar_and_storage_press_release.pdf + # We assume this corresponds to CGS+. + + # Distributed energy storage (DESS) forecasted in PSIP Table J-27, p. + # J-65, "O'ahu Self-Supply DESS Forecast Cumulative Installed Capacity". + # PSIP p. G-12 reports that distributed batteries have two hour life, + # but that seems short for long-term system design, so we use 4 hours. + (2020, "DistBattery", ((56)/4, 4)), + (2022, "DistBattery", ((79-56)/4, 4)), + (2025, "DistBattery", ((108-79)/4, 4)), + (2030, "DistBattery", ((157-108)/4, 4)), + (2035, "DistBattery", ((213-157)/4, 4)), + (2040, "DistBattery", ((264-213)/4, 4)), + (2045, "DistBattery", ((306-264)/4, 4)), + # TODO: We could potentially model part of the DESS as being paired with + # some amount of PV from the CSS pool. (PSIP p. J-25 says distributed + # energy storage systems (DESS) were paired with DGPV for small + # customers and sized optimally, but large customers were assumed not to + # need it because they could take daytime load reductions directly.) + # However, since PSIP reports that storage sizes were optimized, we + # assume these batteries are able to serve load as effectively as + # centralized batteries, so we just model them as generic batteries. + + # NOTE: PSIP p. J-25 says "Additional stand-alone DESS, not necessarily + # paired with PV, were projected to participate in Demand Response + # programs". PSIP doesn't show these quantities and they are not in the + # RESOLVE inputs (the PV-paired DESS weren't in RESOLVE either). We + # assume these are part of the pool of bulk storage selected by Switch, + # since they participate on an economic basis. + """ # Rebuild renewable projects at retirement (20 years), as specified in the PSIP # note: this doesn't include DistPV, because those are part of a forecast, not a plan, so they already @@ -222,23 +307,33 @@ def define_components(m): ] existing_techs += technology_targets_definite existing_techs += technology_targets_psip - # rebuild all renewables at retirement (20 years for RE, 15 years for batteries) + TODO( + """ + Need to read lifetime of projects and rebuild at retirement. + """ + ) + # rebuild everything at retirement + rebuild_targets = [ - (y+20, tech, cap) for y, tech, cap in existing_techs if is_renewable(tech) + (y + 20, tech, cap) for y, tech, cap in existing_techs if is_renewable(tech) ] + [ - (y+15, tech, cap) for y, tech, cap in existing_techs if is_battery(tech) - ] # note: early batteries won't quite need 2 replacements + (y + 15, tech, cap) for y, tech, cap in existing_techs if is_battery(tech) + ] # note: early batteries won't quite need 2 replacements # don't schedule rebuilding past end of study rebuild_targets = [t for t in rebuild_targets if t[0] <= 2045] technology_targets_psip += rebuild_targets # make sure LNG is turned off if psip and getattr(m.options, "force_lng_tier", []) != ["none"]: - raise RuntimeError('You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.') + raise RuntimeError( + 'You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.' + ) if psip: if m.options.psip_relax_after is not None: - psip_targets = [t for t in technology_targets_psip if t[0] <= m.options.psip_relax_after] + psip_targets = [ + t for t in technology_targets_psip if t[0] <= m.options.psip_relax_after + ] else: psip_targets = technology_targets_psip technology_targets = technology_targets_definite + psip_targets @@ -246,12 +341,17 @@ def define_components(m): technology_targets = technology_targets_definite # make a special list including all standard generation technologies plus "LoadShiftBattery" - m.GEN_TECHS_AND_BATTERIES = Set(initialize=lambda m: [g for g in m.GENERATION_TECHNOLOGIES] + ["LoadShiftBattery"]) + m.GEN_TECHS_AND_BATTERIES = Set( + dimen=1, + initialize=lambda m: [g for g in m.GENERATION_TECHNOLOGIES] + + ["LoadShiftBattery"], + ) # make a list of renewable technologies m.RENEWABLE_TECHNOLOGIES = Set( + dimen=1, initialize=m.GENERATION_TECHNOLOGIES, - filter=lambda m, tech: is_renewable(tech) + filter=lambda m, tech: is_renewable(tech), ) def technology_target_init(m, per, tech): @@ -260,11 +360,18 @@ def technology_target_init(m, per, tech): start = 2000 if per == m.PERIODS.first() else m.PERIODS.prev(per) end = per target = sum( - mw for (tyear, ttech, mw) in technology_targets - if ttech == tech and start < tyear and tyear <= end + mw + for (tyear, ttech, mw) in technology_targets + if ttech == tech and start < tyear and tyear <= end ) return target - m.technology_target = Param(m.PERIODS, m.GEN_TECHS_AND_BATTERIES, initialize=technology_target_init) + + m.technology_target = Param( + m.PERIODS, + m.GEN_TECHS_AND_BATTERIES, + within=NonNegativeReals, + initialize=technology_target_init, + ) def MakeGenTechDicts_rule(m): # get unit sizes of all technologies @@ -273,14 +380,17 @@ def MakeGenTechDicts_rule(m): tech = m.gen_tech[g] if tech in unit_sizes: if unit_sizes[tech] != unit_size: - raise ValueError("Generation technology {} uses different unit sizes for different projects.") + raise ValueError( + "Generation technology {} uses different unit sizes for different projects." + ) else: unit_sizes[tech] = unit_size # get predetermined capacity for all technologies predet_cap = m.gen_tech_predetermined_cap_dict = defaultdict(float) - for (g, per), cap in m.gen_predetermined_cap.items(): + for (g, per), cap in m.build_gen_predetermined.items(): tech = m.gen_tech[g] predet_cap[tech, per] += cap + m.MakeGenTechDicts = BuildAction(rule=MakeGenTechDicts_rule) # with PSIP: BuildGen is zero except for technology_targets @@ -291,18 +401,27 @@ def Enforce_Technology_Target_rule(m, per, tech): # get target, including any capacity specified in the predetermined builds, # so the target will be additional to those - target = m.technology_target[per, tech] + m.gen_tech_predetermined_cap_dict[tech, per] + target = ( + m.technology_target[per, tech] + + m.gen_tech_predetermined_cap_dict[tech, per] + ) # convert target to closest integral number of units # (some of the targets are based on nominal unit sizes rather than actual max output) if m.gen_tech_unit_size_dict[tech] > 0.0: - target = round(target / m.gen_tech_unit_size_dict[tech]) * m.gen_tech_unit_size_dict[tech] + target = ( + round(target / m.gen_tech_unit_size_dict[tech]) + * m.gen_tech_unit_size_dict[tech] + ) if tech == "LoadShiftBattery": # special treatment for batteries, which are not a standard technology - if hasattr(m, 'BuildBattery'): + if hasattr(m, "BuildBattery"): # note: BuildBattery is in MWh, so we convert to MW - build = sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES) / m.battery_min_discharge_time + build = ( + sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES) + / m.battery_min_discharge_time + ) else: build = 0 else: @@ -322,54 +441,69 @@ def Enforce_Technology_Target_rule(m, per, tech): "Model will be infeasible.".format(tech, per) ) return Constraint.Infeasible - elif psip and per <= m.options.psip_relax_after: - return (build == target) + elif psip and ( + m.options.psip_relax_after is None or per <= m.options.psip_relax_after + ): + return build == target elif m.options.psip_minimal_renewables and tech in m.RENEWABLE_TECHNOLOGIES: # only build the specified amount of renewables, no more - return (build == target) + return build == target else: # treat the target as a lower bound - return (build >= target) + return build >= target + m.Enforce_Technology_Target = Constraint( m.PERIODS, m.GEN_TECHS_AND_BATTERIES, rule=Enforce_Technology_Target_rule ) - aes_g = 'Oahu_AES' + aes_g = "Oahu_AES" aes_size = 180 aes_bld_year = 1992 - m.AES_OPERABLE_PERIODS = Set(initialize = lambda m: - m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] + m.AES_OPERABLE_PERIODS = Set( + dimen=1, initialize=lambda m: m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] ) m.OperateAES = Var(m.AES_OPERABLE_PERIODS, within=Binary) - m.Enforce_AES_Deactivate = Constraint(m.TIMEPOINTS, rule=lambda m, tp: - Constraint.Skip if (aes_g, tp) not in m.GEN_TPS - else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size) + m.Enforce_AES_Deactivate = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: Constraint.Skip + if (aes_g, tp) not in m.GEN_TPS + else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size), ) - m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per: - 0.0 if per not in m.AES_OPERABLE_PERIODS - else - m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year] + m.AESDeactivateFixedCost = Expression( + m.PERIODS, + rule=lambda m, per: 0.0 + if per not in m.AES_OPERABLE_PERIODS + else -m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year], ) - m.Cost_Components_Per_Period.append('AESDeactivateFixedCost') + m.Cost_Components_Per_Period.append("AESDeactivateFixedCost") if psip: # keep AES active until 9/2022; deactivate after that # note: since a period starts in 2022, we retire before that - m.PSIP_Retire_AES = Constraint(m.AES_OPERABLE_PERIODS, rule=lambda m, per: - (m.OperateAES[per] == 1) if per + m.period_length_years[per] <= 2022 - else (m.OperateAES[per] == 0) + m.PSIP_Retire_AES = Constraint( + m.AES_OPERABLE_PERIODS, + rule=lambda m, per: (m.OperateAES[per] == 1) + if per + m.period_length_years[per] <= 2022 + else (m.OperateAES[per] == 0), ) # before 2040: no biodiesel, and only 100-300 GWh of non-LNG fossil fuels # period including 2040-2045: <= 300 GWh of oil; unlimited biodiesel or LNG # no biodiesel before 2040 (then phased in fast enough to meet the RPS) - m.EARLY_BIODIESEL_MARKETS = Set(dimen=2, initialize=lambda m: [ - (rfm, per) - for per in m.PERIODS if per + m.period_length_years[per] <= 2040 - for rfm in m.REGIONAL_FUEL_MARKETS if m.rfm_fuel == 'Biodiesel' - ]) - m.NoEarlyBiodiesel = Constraint(m.EARLY_BIODIESEL_MARKETS, rule=lambda m, rfm, per: - m.FuelConsumptionInMarket[rfm, per] == 0 + m.EARLY_BIODIESEL_MARKETS = Set( + dimen=2, + initialize=lambda m: [ + (rfm, per) + for per in m.PERIODS + if per + m.period_length_years[per] <= 2040 + for rfm in m.REGIONAL_FUEL_MARKETS + if m.rfm_fuel == "Biodiesel" + ], + ) + m.NoEarlyBiodiesel = Constraint( + m.EARLY_BIODIESEL_MARKETS, + rule=lambda m, rfm, per: m.FuelConsumptionInMarket[rfm, per] == 0, ) # # 100-300 GWh of non-LNG fuels in 2021-2040 (based on 2016-04 PSIP fig. 5-5) @@ -427,18 +561,27 @@ def Enforce_Technology_Target_rule(m, per, tech): # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) advanced_tech_vars = [ - "BuildPumpedHydroMW", "BuildAnyPumpedHydro", - "BuildElectrolyzerMW", "BuildLiquifierKgPerHour", "BuildLiquidHydrogenTankKg", + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", "BuildFuelCellMW", ] + def no_advanced_tech_rule_factory(v): return lambda m, *k: (getattr(m, v)[k] == 0) + for v in advanced_tech_vars: try: var = getattr(m, v) - setattr(m, "PSIP_No_"+v, Constraint(var._index, rule=no_advanced_tech_rule_factory(v))) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) except AttributeError: - pass # model doesn't have this var + pass # model doesn't have this var # # don't allow any changes to the fuel market, including bulk LNG # # not used now; use "--force-lng-tier container" instead diff --git a/switch_model/hawaii/pumped_hydro.py b/switch_model/hawaii/pumped_hydro.py index dc8e8373f..3948cfb06 100644 --- a/switch_model/hawaii/pumped_hydro.py +++ b/switch_model/hawaii/pumped_hydro.py @@ -3,44 +3,61 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf + def define_arguments(argparser): - argparser.add_argument("--ph-mw", type=float, default=None, - help="Force construction of a certain total capacity of pumped storage hydro during one or more periods chosen by Switch") - argparser.add_argument("--ph-year", type=int, default=None, - help="Force all pumped storage hydro to be constructed during one particular year (must be in the list of periods)") + argparser.add_argument( + "--ph-mw", + type=float, + default=None, + help="Force construction of a certain total capacity of pumped storage hydro during one or more periods chosen by Switch", + ) + argparser.add_argument( + "--ph-year", + type=int, + default=None, + help="Force all pumped storage hydro to be constructed during one particular year (must be in the list of periods)", + ) + def define_components(m): - m.PH_GENS = Set() + m.PH_GENS = Set(dimen=1) - m.ph_load_zone = Param(m.PH_GENS) + m.ph_load_zone = Param(m.PH_GENS, within=m.LOAD_ZONES) m.ph_capital_cost_per_mw = Param(m.PH_GENS, within=NonNegativeReals) m.ph_project_life = Param(m.PH_GENS, within=NonNegativeReals) # annual O&M cost for pumped hydro project, percent of capital cost - m.ph_fixed_om_percent = Param(m.PH_GENS, within=NonNegativeReals) + m.ph_fixed_om_percent = Param(m.PH_GENS, within=Reals) # total annual cost - m.ph_fixed_cost_per_mw_per_year = Param(m.PH_GENS, initialize=lambda m, p: - m.ph_capital_cost_per_mw[p] * - (crf(m.interest_rate, m.ph_project_life[p]) + m.ph_fixed_om_percent[p]) + m.ph_fixed_cost_per_mw_per_year = Param( + m.PH_GENS, + within=Reals, + initialize=lambda m, p: m.ph_capital_cost_per_mw[p] + * (crf(m.interest_rate, m.ph_project_life[p]) + m.ph_fixed_om_percent[p]), ) # round-trip efficiency of the pumped hydro facility - m.ph_efficiency = Param(m.PH_GENS) + m.ph_efficiency = Param(m.PH_GENS, within=PercentFraction) # average energy available from water inflow each day # (system must balance energy net of this each day) - m.ph_inflow_mw = Param(m.PH_GENS) + m.ph_inflow_mw = Param(m.PH_GENS, within=Reals) # maximum size of pumped hydro project - m.ph_max_capacity_mw = Param(m.PH_GENS) + m.ph_max_capacity_mw = Param(m.PH_GENS, within=NonNegativeReals) # How much pumped hydro to build m.BuildPumpedHydroMW = Var(m.PH_GENS, m.PERIODS, within=NonNegativeReals) - m.Pumped_Hydro_Proj_Capacity_MW = Expression(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - sum(m.BuildPumpedHydroMW[g, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[pe]) + m.Pumped_Hydro_Proj_Capacity_MW = Expression( + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: sum( + m.BuildPumpedHydroMW[g, pp] + for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[pe] + ), ) # flag indicating whether any capacity is added to each project each year @@ -53,92 +70,144 @@ def define_components(m): # constraints on construction of pumped hydro # don't build more than the max allowed capacity - m.Pumped_Hydro_Max_Build = Constraint(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - m.Pumped_Hydro_Proj_Capacity_MW[g, pe] <= m.ph_max_capacity_mw[g] + m.Pumped_Hydro_Max_Build = Constraint( + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: m.Pumped_Hydro_Proj_Capacity_MW[g, pe] + <= m.ph_max_capacity_mw[g], ) # force the build flag on for the year(s) when pumped hydro is built - m.Pumped_Hydro_Set_Build_Flag = Constraint(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - m.BuildPumpedHydroMW[g, pe] <= m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g] + m.Pumped_Hydro_Set_Build_Flag = Constraint( + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: m.BuildPumpedHydroMW[g, pe] + <= m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g], ) # only build in one year (can be deactivated to allow incremental construction) - m.Pumped_Hydro_Build_Once = Constraint(m.PH_GENS, rule=lambda m, g: - sum(m.BuildAnyPumpedHydro[g, pe] for pe in m.PERIODS) <= 1) + m.Pumped_Hydro_Build_Once = Constraint( + m.PH_GENS, + rule=lambda m, g: sum(m.BuildAnyPumpedHydro[g, pe] for pe in m.PERIODS) <= 1, + ) # only build full project size (deactivated by default, to allow smaller projects) - m.Pumped_Hydro_Build_All_Or_None = Constraint(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - m.BuildPumpedHydroMW[g, pe] == m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g] + m.Pumped_Hydro_Build_All_Or_None = Constraint( + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: m.BuildPumpedHydroMW[g, pe] + == m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g], ) # m.Deactivate_Pumped_Hydro_Build_All_Or_None = BuildAction(rule=lambda m: # m.Pumped_Hydro_Build_All_Or_None.deactivate() # ) # limits on pumping and generation - m.Pumped_Hydro_Max_Generate_Rate = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, g, t: - m.PumpedHydroProjGenerateMW[g, t] - <= - m.Pumped_Hydro_Proj_Capacity_MW[g, m.tp_period[t]] + m.Pumped_Hydro_Max_Generate_Rate = Constraint( + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, g, t: m.PumpedHydroProjGenerateMW[g, t] + <= m.Pumped_Hydro_Proj_Capacity_MW[g, m.tp_period[t]], ) - m.Pumped_Hydro_Max_Store_Rate = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, g, t: - m.PumpedHydroProjStoreMW[g, t] - <= - m.Pumped_Hydro_Proj_Capacity_MW[g, m.tp_period[t]] + m.Pumped_Hydro_Max_Store_Rate = Constraint( + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, g, t: m.PumpedHydroProjStoreMW[g, t] + <= m.Pumped_Hydro_Proj_Capacity_MW[g, m.tp_period[t]], ) # return reservoir to at least the starting level every day, net of any inflow # it can also go higher than starting level, which indicates spilling surplus water - m.Pumped_Hydro_Daily_Balance = Constraint(m.PH_GENS, m.TIMESERIES, rule=lambda m, g, ts: - sum( + m.Pumped_Hydro_Daily_Balance = Constraint( + m.PH_GENS, + m.TIMESERIES, + rule=lambda m, g, ts: sum( m.PumpedHydroProjStoreMW[g, tp] * m.ph_efficiency[g] + m.ph_inflow_mw[g] - m.PumpedHydroProjGenerateMW[g, tp] for tp in m.TPS_IN_TS[ts] - ) >= 0 + ) + >= 0, ) - m.GeneratePumpedHydro = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - sum(m.PumpedHydroProjGenerateMW[g, t] for g in m.PH_GENS if m.ph_load_zone[g]==z) + m.GeneratePumpedHydro = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.PumpedHydroProjGenerateMW[g, t] + for g in m.PH_GENS + if m.ph_load_zone[g] == z + ), ) - m.StorePumpedHydro = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - sum(m.PumpedHydroProjStoreMW[g, t] for g in m.PH_GENS if m.ph_load_zone[g]==z) + m.StorePumpedHydro = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.PumpedHydroProjStoreMW[g, t] for g in m.PH_GENS if m.ph_load_zone[g] == z + ), ) # calculate costs - m.Pumped_Hydro_Fixed_Cost_Annual = Expression(m.PERIODS, rule=lambda m, pe: - sum(m.ph_fixed_cost_per_mw_per_year[g] * m.Pumped_Hydro_Proj_Capacity_MW[g, pe] for g in m.PH_GENS) + m.Pumped_Hydro_Fixed_Cost_Annual = Expression( + m.PERIODS, + rule=lambda m, pe: sum( + m.ph_fixed_cost_per_mw_per_year[g] * m.Pumped_Hydro_Proj_Capacity_MW[g, pe] + for g in m.PH_GENS + ), ) - m.Cost_Components_Per_Period.append('Pumped_Hydro_Fixed_Cost_Annual') + m.Cost_Components_Per_Period.append("Pumped_Hydro_Fixed_Cost_Annual") # add pumped hydro to zonal energy balance - m.Zone_Power_Injections.append('GeneratePumpedHydro') - m.Zone_Power_Withdrawals.append('StorePumpedHydro') + m.Zone_Power_Injections.append("GeneratePumpedHydro") + m.Zone_Power_Withdrawals.append("StorePumpedHydro") # total pumped hydro capacity in each zone each period (for reporting) - m.Pumped_Hydro_Capacity_MW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, pe: - sum(m.Pumped_Hydro_Proj_Capacity_MW[g, pe] for g in m.PH_GENS if m.ph_load_zone[g]==z) + m.Pumped_Hydro_Capacity_MW = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, pe: sum( + m.Pumped_Hydro_Proj_Capacity_MW[g, pe] + for g in m.PH_GENS + if m.ph_load_zone[g] == z + ), ) # force construction of a fixed amount of pumped hydro if m.options.ph_mw is not None: - print("Forcing construction of {m} MW of pumped hydro.".format(m=m.options.ph_mw)) - m.Build_Pumped_Hydro_MW = Constraint(m.LOAD_ZONES, rule=lambda m, z: - m.Pumped_Hydro_Capacity_MW[z, m.PERIODS.last()] == m.options.ph_mw + print( + "Forcing construction of {m} MW of pumped hydro.".format(m=m.options.ph_mw) + ) + m.Build_Pumped_Hydro_MW = Constraint( + m.LOAD_ZONES, + rule=lambda m, z: m.Pumped_Hydro_Capacity_MW[z, m.PERIODS.last()] + == m.options.ph_mw, ) # force construction of pumped hydro only in a certain period if m.options.ph_year is not None: - print("Allowing construction of pumped hydro only in {p}.".format(p=m.options.ph_year)) + print( + "Allowing construction of pumped hydro only in {p}.".format( + p=m.options.ph_year + ) + ) m.Build_Pumped_Hydro_Year = Constraint( - m.PH_GENS, m.PERIODS, - rule=lambda m, g, pe: - m.BuildPumpedHydroMW[g, pe] == 0.0 if pe != m.options.ph_year else Constraint.Skip + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: m.BuildPumpedHydroMW[g, pe] == 0.0 + if pe != m.options.ph_year + else Constraint.Skip, ) def load_inputs(m, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'pumped_hydro.csv'), - autoselect=True, + filename=os.path.join(inputs_dir, "pumped_hydro.csv"), index=m.PH_GENS, param=( - m.ph_load_zone, m.ph_capital_cost_per_mw, m.ph_project_life, m.ph_fixed_om_percent, - m.ph_efficiency, m.ph_inflow_mw, m.ph_max_capacity_mw)) + m.ph_load_zone, + m.ph_capital_cost_per_mw, + m.ph_project_life, + m.ph_fixed_om_percent, + m.ph_efficiency, + m.ph_inflow_mw, + m.ph_max_capacity_mw, + ), + ) diff --git a/switch_model/hawaii/register_hi_storage_reserves.py b/switch_model/hawaii/register_hi_storage_reserves.py index 508ff28b2..5513d6bb2 100644 --- a/switch_model/hawaii/register_hi_storage_reserves.py +++ b/switch_model/hawaii/register_hi_storage_reserves.py @@ -11,129 +11,155 @@ # But eventually those modules should use the standard storage module and # extend that as needed. + def define_arguments(argparser): - argparser.add_argument('--hawaii-storage-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from " # hydrogen and/or - "pumped-hydro storage " - "(e.g., 'contingency regulation'). " - "Default is generic 'spinning'. Specify 'none' to disable." + argparser.add_argument( + "--hawaii-storage-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from " # hydrogen and/or + "pumped-hydro storage " + "(e.g., 'contingency regulation'). " + "Default is generic 'spinning'. Specify 'none' to disable.", ) + def define_components(m): - if [rt.lower() for rt in m.options.hawaii_storage_reserve_types] != ['none']: - if hasattr(m, 'PumpedHydroProjGenerateMW'): + if [rt.lower() for rt in m.options.hawaii_storage_reserve_types] != ["none"]: + if hasattr(m, "PumpedHydroProjGenerateMW"): m.PumpedStorageCharging = Var(m.PH_GENS, m.TIMEPOINTS, within=Binary) - m.Set_PumpedStorageCharging_Flag = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, phg, tp: - m.PumpedHydroProjGenerateMW[phg, tp] - <= - m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) + m.Set_PumpedStorageCharging_Flag = Constraint( + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedHydroProjGenerateMW[phg, tp] + <= m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]), ) # choose how much pumped storage reserves to provide each hour, without reversing direction - m.PumpedStorageSpinningUpReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals) + m.PumpedStorageSpinningUpReserves = Var( + m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals + ) m.Limit_PumpedStorageSpinningUpReserves_When_Charging = Constraint( - m.PH_GENS, m.TIMEPOINTS, - rule=lambda m, phg, tp: - m.PumpedStorageSpinningUpReserves[phg, tp] - <= - m.PumpedHydroProjStoreMW[phg, tp] - + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) # relax when discharging + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedStorageSpinningUpReserves[phg, tp] + <= m.PumpedHydroProjStoreMW[phg, tp] + + m.ph_max_capacity_mw[phg] + * (1 - m.PumpedStorageCharging[phg, tp]), # relax when discharging ) m.Limit_PumpedStorageSpinningUpReserves_When_Discharging = Constraint( - m.PH_GENS, m.TIMEPOINTS, - rule=lambda m, phg, tp: - m.PumpedStorageSpinningUpReserves[phg, tp] - <= - m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] - m.PumpedHydroProjGenerateMW[phg, tp] - + m.ph_max_capacity_mw[phg] * m.PumpedStorageCharging[phg, tp] # relax when charging + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedStorageSpinningUpReserves[phg, tp] + <= m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] + - m.PumpedHydroProjGenerateMW[phg, tp] + + m.ph_max_capacity_mw[phg] + * m.PumpedStorageCharging[phg, tp], # relax when charging + ) + m.PumpedStorageSpinningDownReserves = Var( + m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals, bounds=(0, 0) ) - m.PumpedStorageSpinningDownReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals, bounds=(0,0)) m.Limit_PumpedStorageSpinningDownReserves_When_Charging = Constraint( - m.PH_GENS, m.TIMEPOINTS, - rule=lambda m, phg, tp: - m.PumpedStorageSpinningDownReserves[phg, tp] - <= - m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] - m.PumpedHydroProjStoreMW[phg, tp] - + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) # relax when discharging + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedStorageSpinningDownReserves[phg, tp] + <= m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] + - m.PumpedHydroProjStoreMW[phg, tp] + + m.ph_max_capacity_mw[phg] + * (1 - m.PumpedStorageCharging[phg, tp]), # relax when discharging ) m.Limit_PumpedStorageSpinningDownReserves_When_Discharging = Constraint( - m.PH_GENS, m.TIMEPOINTS, - rule=lambda m, phg, tp: - m.PumpedStorageSpinningDownReserves[phg, tp] - <= - m.PumpedHydroProjGenerateMW[phg, tp] - + m.ph_max_capacity_mw[phg] * m.PumpedStorageCharging[phg, tp] # relax when charging + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedStorageSpinningDownReserves[phg, tp] + <= m.PumpedHydroProjGenerateMW[phg, tp] + + m.ph_max_capacity_mw[phg] + * m.PumpedStorageCharging[phg, tp], # relax when charging ) # Register with spinning reserves - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): # using spinning_reserves_advanced + if hasattr( + m, "Spinning_Reserve_Up_Provisions" + ): # using spinning_reserves_advanced # calculate available slack from hawaii storage def up_expr(m, a, tp): avail = 0.0 # now handled in hydrogen module: # if hasattr(m, 'HydrogenSlackUp'): # avail += sum(m.HydrogenSlackUp[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) - if hasattr(m, 'PumpedStorageSpinningUpReserves'): + if hasattr(m, "PumpedStorageSpinningUpReserves"): avail += sum( m.PumpedStorageSpinningUpReserves[phg, tp] for phg in m.PH_GENS if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] ) return avail - m.HawaiiStorageSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=up_expr) + + m.HawaiiStorageSlackUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, rule=up_expr + ) + def down_expr(m, a, tp): avail = 0.0 # if hasattr(m, 'HydrogenSlackDown'): # avail += sum(m.HydrogenSlackDown[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) - if hasattr(m, 'PumpedStorageSpinningDownReserves'): + if hasattr(m, "PumpedStorageSpinningDownReserves"): avail += sum( m.PumpedStorageSpinningDownReserves[phg, tp] for phg in m.PH_GENS if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] ) return avail - m.HawaiiStorageSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=down_expr) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + m.HawaiiStorageSlackDown = Expression( + m.BALANCING_AREA_TIMEPOINTS, rule=down_expr + ) + + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products m.HI_STORAGE_SPINNING_RESERVE_TYPES = Set( - initialize=m.options.hawaii_storage_reserve_types + dimen=1, initialize=m.options.hawaii_storage_reserve_types ) m.HawaiiStorageSpinningReserveUp = Var( - m.HI_STORAGE_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.HI_STORAGE_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) m.HawaiiStorageSpinningReserveDown = Var( - m.HI_STORAGE_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.HI_STORAGE_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_HawaiiStorageSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.HawaiiStorageSpinningReserveUp[rt, ba, tp] - for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES - ) <= m.HawaiiStorageSlackUp[ba, tp] + rule=lambda m, ba, tp: sum( + m.HawaiiStorageSpinningReserveUp[rt, ba, tp] + for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES + ) + <= m.HawaiiStorageSlackUp[ba, tp], ) m.Limit_HawaiiStorageSpinningReserveDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.HawaiiStorageSpinningReserveDown[rt, ba, tp] - for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES - ) <= m.HawaiiStorageSlackDown[ba, tp] + rule=lambda m, ba, tp: sum( + m.HawaiiStorageSpinningReserveDown[rt, ba, tp] + for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES + ) + <= m.HawaiiStorageSlackDown[ba, tp], + ) + m.Spinning_Reserve_Up_Provisions.append( + "HawaiiStorageSpinningReserveUp" + ) + m.Spinning_Reserve_Down_Provisions.append( + "HawaiiStorageSpinningReserveDown" ) - m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSpinningReserveUp') - m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSpinningReserveDown') else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.hawaii_storage_reserve_types != ['spinning']: + if m.options.hawaii_storage_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSlackUp') - m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSlackDown') + m.Spinning_Reserve_Up_Provisions.append("HawaiiStorageSlackUp") + m.Spinning_Reserve_Down_Provisions.append("HawaiiStorageSlackDown") diff --git a/switch_model/hawaii/reserves.py b/switch_model/hawaii/reserves.py index a089561d1..284a5bfcb 100644 --- a/switch_model/hawaii/reserves.py +++ b/switch_model/hawaii/reserves.py @@ -8,17 +8,33 @@ # TODO: use standard reserves module for this + def define_arguments(argparser): - argparser.add_argument('--reserves-from-storage', action='store_true', default=True, - help="Allow storage (batteries and hydrogen) to provide up- and down-reserves.") - argparser.add_argument('--no-reserves-from-storage', dest='reserves_from_storage', - action='store_false', - help="Don't allow storage (batteries and hydrogen) to provide up- and down-reserves.") - argparser.add_argument('--reserves-from-demand-response', action='store_true', default=True, - help="Allow demand response to provide up- and down-reserves.") - argparser.add_argument('--no-reserves-from-demand-response', dest='reserves_from_demand_response', - action='store_false', - help="Don't allow demand response to provide up- and down-reserves.") + argparser.add_argument( + "--reserves-from-storage", + action="store_true", + default=True, + help="Allow storage (batteries and hydrogen) to provide up- and down-reserves.", + ) + argparser.add_argument( + "--no-reserves-from-storage", + dest="reserves_from_storage", + action="store_false", + help="Don't allow storage (batteries and hydrogen) to provide up- and down-reserves.", + ) + argparser.add_argument( + "--reserves-from-demand-response", + action="store_true", + default=True, + help="Allow demand response to provide up- and down-reserves.", + ) + argparser.add_argument( + "--no-reserves-from-demand-response", + dest="reserves_from_demand_response", + action="store_false", + help="Don't allow demand response to provide up- and down-reserves.", + ) + def define_components(m): """ @@ -32,20 +48,20 @@ def define_components(m): # projects that can provide reserves # TODO: add batteries, hydrogen and pumped storage to this m.FIRM_GENS = Set( + dimen=1, initialize=m.GENERATION_PROJECTS, - #filter=lambda m, p: m.gen_energy_source[p] not in ['Wind', 'Solar'] + # filter=lambda m, p: m.gen_energy_source[p] not in ['Wind', 'Solar'] ) m.FIRM_GEN_TPS = Set( - initialize=m.GEN_TPS, - filter=lambda m, p, tp: p in m.FIRM_GENS + dimen=2, initialize=m.GEN_TPS, filter=lambda m, p, tp: p in m.FIRM_GENS ) m.CONTINGENCY_GENS = Set( + dimen=1, initialize=m.GENERATION_PROJECTS, - filter=lambda m, p: p in m.DISCRETELY_SIZED_GENS + filter=lambda m, p: p in m.DISCRETELY_SIZED_GENS, ) m.CONTINGENCY_GEN_TPS = Set( - initialize=m.GEN_TPS, - filter=lambda m, p, tp: p in m.CONTINGENCY_GENS + initialize=m.GEN_TPS, dimen=2, filter=lambda m, p, tp: p in m.CONTINGENCY_GENS ) # Calculate spinning reserve requirements. @@ -57,31 +73,44 @@ def define_components(m): # TODO: supply these parameters in input files # regulating reserves required, as fraction of potential output (up to limit) - m.regulating_reserve_fraction = Param(['CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={ - 'CentralTrackingPV': 1.0, - 'DistPV': 1.0, # 0.81270193, - 'OnshoreWind': 1.0, - 'OffshoreWind': 1.0, # assumed equal to OnshoreWind - }) + m.regulating_reserve_fraction = Param( + ["CentralTrackingPV", "DistPV", "OnshoreWind", "OffshoreWind"], + within=NonNegativeReals, + initialize={ + "CentralTrackingPV": 1.0, + "DistPV": 1.0, # 0.81270193, + "OnshoreWind": 1.0, + "OffshoreWind": 1.0, # assumed equal to OnshoreWind + }, + ) # maximum regulating reserves required, as fraction of installed capacity - m.regulating_reserve_limit = Param(['CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={ - 'CentralTrackingPV': 0.21288916, - 'DistPV': 0.21288916, # 0.14153171, - 'OnshoreWind': 0.21624407, - 'OffshoreWind': 0.21624407, # assumed equal to OnshoreWind - }) + m.regulating_reserve_limit = Param( + ["CentralTrackingPV", "DistPV", "OnshoreWind", "OffshoreWind"], + within=NonNegativeReals, + initialize={ + "CentralTrackingPV": 0.21288916, + "DistPV": 0.21288916, # 0.14153171, + "OnshoreWind": 0.21624407, + "OffshoreWind": 0.21624407, # assumed equal to OnshoreWind + }, + ) # more conservative values (found by giving 10x weight to times when we provide less reserves than GE): # [1., 1., 1., 0.25760558, 0.18027923, 0.49123101] - m.RegulatingReserveRequirementMW = Expression(m.TIMEPOINTS, rule=lambda m, tp: sum( - m.GenCapacity[g, m.tp_period[tp]] - * min( - m.regulating_reserve_fraction[m.gen_tech[g]] * m.gen_max_capacity_factor[g, tp], - m.regulating_reserve_limit[m.gen_tech[g]] - ) + m.RegulatingReserveRequirementMW = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: sum( + m.GenCapacity[g, m.tp_period[tp]] + * min( + m.regulating_reserve_fraction[m.gen_tech[g]] + * m.gen_max_capacity_factor[g, tp], + m.regulating_reserve_limit[m.gen_tech[g]], + ) for g in m.GENERATION_PROJECTS - if m.gen_tech[g] in m.regulating_reserve_fraction and (g, tp) in m.GEN_TPS - )) + if m.gen_tech[g] in m.regulating_reserve_fraction and (g, tp) in m.GEN_TPS + ), + ) + def define_dynamic_components(m): # these are defined late, so they can check whether various components have been defined by other modules @@ -98,14 +127,15 @@ def define_dynamic_components(m): m.CommitGenFlag = Var(m.CONTINGENCY_GEN_TPS, within=Binary) m.Set_CommitGenFlag = Constraint( m.CONTINGENCY_GEN_TPS, - rule = lambda m, g, tp: - m.CommitGen[g, tp] <= m.CommitGenFlag[g, tp] * m.gen_capacity_limit_mw[g] + rule=lambda m, g, tp: m.CommitGen[g, tp] + <= m.CommitGenFlag[g, tp] * m.gen_capacity_limit_mw[g], ) m.ContingencyReserveUpRequirement_Calculate = Constraint( m.CONTINGENCY_GEN_TPS, rule=lambda m, g, tp: - # m.ContingencyReserveUpRequirement[tp] >= m.CommitGen[g, tp] - m.ContingencyReserveUpRequirement[tp] >= m.CommitGenFlag[g, tp] * m.gen_unit_size[g] + # m.ContingencyReserveUpRequirement[tp] >= m.CommitGen[g, tp] + m.ContingencyReserveUpRequirement[tp] + >= m.CommitGenFlag[g, tp] * m.gen_unit_size[g], ) m.ContingencyReserveDownRequirement = Var(m.TIMEPOINTS, within=NonNegativeReals) @@ -119,23 +149,26 @@ def define_dynamic_components(m): # So we just assume we could lose 10% of all loads of any type, at any time.) m.ContingencyReserveDownRequirement_Calculate = Constraint( m.TIMEPOINTS, - rule=lambda m, tp: - m.ContingencyReserveDownRequirement[tp] >= - 0.1 * sum(getattr(m, x)[z, tp] for x in m.Zone_Power_Withdrawals for z in m.LOAD_ZONES) + rule=lambda m, tp: m.ContingencyReserveDownRequirement[tp] + >= 0.1 + * sum( + getattr(m, x)[z, tp] for x in m.Zone_Power_Withdrawals for z in m.LOAD_ZONES + ), ) # Calculate total spinning reserve requirements - m.SpinningReserveUpRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.RegulatingReserveRequirementMW[tp] + m.ContingencyReserveUpRequirement[tp] + m.SpinningReserveUpRequirement = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: m.RegulatingReserveRequirementMW[tp] + + m.ContingencyReserveUpRequirement[tp], ) - m.SpinningReserveDownRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.ContingencyReserveDownRequirement[tp] + m.SpinningReserveDownRequirement = Expression( + m.TIMEPOINTS, rule=lambda m, tp: m.ContingencyReserveDownRequirement[tp] ) - # Available reserves def expr(m, tp): - STORAGE_GENS = getattr(m, 'STORAGE_GENS', []) + STORAGE_GENS = getattr(m, "STORAGE_GENS", []) # all regular generators; omit storage because they'll be added separately if needed avail = sum( m.DispatchSlackUp[g, tp] @@ -144,9 +177,9 @@ def expr(m, tp): ) if m.options.reserves_from_storage: # hawaii battery and hydrogen modules - if hasattr(m, 'BatterySlackUp'): + if hasattr(m, "BatterySlackUp"): avail += sum(m.BatterySlackUp[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'HydrogenSlackUp'): + if hasattr(m, "HydrogenSlackUp"): avail += sum(m.HydrogenSlackUp[z, tp] for z in m.LOAD_ZONES) # standard storage module (can stop charging and raise output to max) avail += sum( @@ -155,21 +188,29 @@ def expr(m, tp): if (g, tp) in m.GEN_TPS ) if m.options.reserves_from_demand_response: - if hasattr(m, 'DemandUpReserves'): + if hasattr(m, "DemandUpReserves"): avail += sum(m.DemandUpReserves[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'ShiftDemand'): - avail += sum(m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES) - if hasattr(m, 'ChargeEVs') and hasattr(m.options, 'ev_timing') and m.options.ev_timing=='optimal': + if hasattr(m, "ShiftDemand"): + avail += sum( + m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES + ) + if ( + hasattr(m, "ChargeEVs") + and hasattr(m.options, "ev_timing") + and m.options.ev_timing == "optimal" + ): avail += sum(m.ChargeEVs[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'UnservedUpReserves'): + if hasattr(m, "UnservedUpReserves"): avail += m.UnservedUpReserves[tp] # if tp == 2045012604: # print "inspect avail to see up reserve calculation" # import pdb; pdb.set_trace() return avail + m.SpinningReservesUpAvailable = Expression(m.TIMEPOINTS, rule=expr) + def expr(m, tp): - STORAGE_GENS = getattr(m, 'STORAGE_GENS', []) + STORAGE_GENS = getattr(m, "STORAGE_GENS", []) # all regular generators; omit storage because they'll be added separately if needed avail = sum( m.DispatchSlackDown[g, tp] @@ -177,9 +218,9 @@ def expr(m, tp): if (g, tp) in m.GEN_TPS and g not in STORAGE_GENS ) if m.options.reserves_from_storage: - if hasattr(m, 'BatterySlackDown'): + if hasattr(m, "BatterySlackDown"): avail += sum(m.BatterySlackDown[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'HydrogenSlackDown'): + if hasattr(m, "HydrogenSlackDown"): avail += sum(m.HydrogenSlackDown[z, tp] for z in m.LOAD_ZONES) # standard storage module (can stop producing power and raise charging to max) avail += sum( @@ -191,32 +232,38 @@ def expr(m, tp): ) if m.options.reserves_from_demand_response: - if hasattr(m, 'DemandDownReserves'): + if hasattr(m, "DemandDownReserves"): avail += sum(m.DemandDownReserves[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'ShiftDemand'): + if hasattr(m, "ShiftDemand"): # avail += sum(m.ShiftDemand[z, tp].ub - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES) avail += sum( - 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] + 24 / 3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES ) # note: we currently ignore down-reserves (option of increasing consumption) # from EVs since it's not clear how high they could go; we could revisit this if # down-reserves have a positive price at equilibrium (probabably won't) - if hasattr(m, 'UnservedDownReserves'): + if hasattr(m, "UnservedDownReserves"): avail += m.UnservedDownReserves[tp] return avail + m.SpinningReservesDownAvailable = Expression(m.TIMEPOINTS, rule=expr) # Meet the reserve requirements (we use zero on RHS to enforce the right sign for the duals) - m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp: - m.SpinningReservesUpAvailable[tp] - m.SpinningReserveUpRequirement[tp] >= 0 + m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: m.SpinningReservesUpAvailable[tp] + - m.SpinningReserveUpRequirement[tp] + >= 0, ) - m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp: - m.SpinningReservesDownAvailable[tp] - m.SpinningReserveDownRequirement[tp] >= 0 + m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: m.SpinningReservesDownAvailable[tp] + - m.SpinningReserveDownRequirement[tp] + >= 0, ) - # NOTE: the shutdown constraints below are not used, because they conflict with # the baseload status set in build_scenario_data.py. You should set the plant type # to "Off" in "source_data/Hawaii RPS Study Generator Table OCR.xlsx" instead. @@ -253,10 +300,8 @@ def expr(m, tp): # print list(m.CYCLING_PLANTS_TIMEPOINTS) # m.ShowCyclingPlants = BuildAction(rule=show_it) + # def load_inputs(m, switch_data, inputs_dir): # switch_data.load_aug( # filename=os.path.join(inputs_dir, 'reserve_requirements.csv'), -# auto_select=True, # param=(m.RegulatingReserveRequirementMW)) - - diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 014e02511..ba3505ad9 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -7,48 +7,92 @@ import switch_model.utilities as utilities from .util import get + def define_arguments(argparser): - argparser.add_argument('--biofuel-limit', type=float, default=1.0, - help="Maximum fraction of power that can be obtained from biofuel in any period (default=1.0)") - argparser.add_argument('--biofuel-switch-threshold', type=float, default=1.0, - help="RPS level at which all thermal plants switch to biofuels (0.0-1.0, default=1.0); use with --rps-allocation fuel_switch_at_high_rps") - argparser.add_argument('--rps-activate', default='activate', - dest='rps_level', action='store_const', const='activate', - help="Activate RPS (on by default).") - argparser.add_argument('--rps-deactivate', - dest='rps_level', action='store_const', const='deactivate', - help="Deactivate RPS.") - argparser.add_argument('--rps-exact', - dest='rps_level', action='store_const', const='exact', - help="Require exact satisfaction of RPS target (no excess or shortfall).") - argparser.add_argument('--rps-no-new-renewables', - dest='rps_level', action='store_const', const='no_new_renewables', - help="Deactivate RPS and don't allow any new renewables except to replace existing capacity.") - argparser.add_argument('--rps-no-new-wind', action='store_true', default=False, - help="Don't allow any new wind capacity except to replace existing capacity.") - argparser.add_argument('--rps-no-wind', action='store_true', default=False, - help="Don't allow any new wind capacity or replacement of existing capacity.") - argparser.add_argument('--rps-prefer-dist-pv', action='store_true', default=False, - help="Don't allow any new large solar capacity unless 90%% of distributed PV ('*DistPV') capacity has been developed.") argparser.add_argument( - '--rps-allocation', default=None, + "--biofuel-limit", + type=float, + default=1.0, + help="Maximum fraction of power that can be obtained from biofuel in any period (default=1.0)", + ) + argparser.add_argument( + "--biofuel-switch-threshold", + type=float, + default=1.0, + help="RPS level at which all thermal plants switch to biofuels (0.0-1.0, default=1.0); use with --rps-allocation fuel_switch_at_high_rps", + ) + argparser.add_argument( + "--rps-activate", + default="activate", + dest="rps_level", + action="store_const", + const="activate", + help="Activate RPS (on by default).", + ) + argparser.add_argument( + "--rps-deactivate", + dest="rps_level", + action="store_const", + const="deactivate", + help="Deactivate RPS.", + ) + argparser.add_argument( + "--rps-exact", + dest="rps_level", + action="store_const", + const="exact", + help="Require exact satisfaction of RPS target (no excess or shortfall).", + ) + argparser.add_argument( + "--rps-no-new-renewables", + dest="rps_level", + action="store_const", + const="no_new_renewables", + help="Deactivate RPS and don't allow any new renewables except to replace existing capacity.", + ) + argparser.add_argument( + "--rps-no-new-wind", + action="store_true", + default=False, + help="Don't allow any new wind capacity except to replace existing capacity.", + ) + argparser.add_argument( + "--rps-no-wind", + action="store_true", + default=False, + help="Don't allow any new wind capacity or replacement of existing capacity.", + ) + argparser.add_argument( + "--rps-prefer-dist-pv", + action="store_true", + default=False, + help="Don't allow any new large solar capacity unless 90%% of distributed PV ('*DistPV') capacity has been developed.", + ) + argparser.add_argument( + "--rps-allocation", + default=None, choices=[ - 'quadratic', - 'fuel_switch_by_period', 'fuel_switch_by_timeseries', - 'full_load_heat_rate', - 'split_commit', - 'relaxed_split_commit', - 'fuel_switch_at_high_rps', + "quadratic", + "fuel_switch_by_period", + "fuel_switch_by_timeseries", + "full_load_heat_rate", + "split_commit", + "relaxed_split_commit", + "fuel_switch_at_high_rps", ], help="Method to use to allocate power output among fuels. Default is fuel_switch_by_period for models " - + "with unit commitment, full_load_heat_rate for models without." + + "with unit commitment, full_load_heat_rate for models without.", ) - argparser.add_argument('--rps-targets', nargs='*', default=None, + argparser.add_argument( + "--rps-targets", + nargs="*", + default=None, help="Targets to use for RPS, specified as --rps-targets year1 level1 year2 level2 ..., " "where years are transition years and levels are fractions between 0 and 1. " - "If not specified, values from rps_targets.csv will be used." + "If not specified, values from rps_targets.csv will be used.", ) + # TODO: make this work with progressive hedging as follows: # add a variable indexed over all weather scenarios and all cost scenarios, # which shows how much of the RPS will be allocated to each scenario. @@ -62,34 +106,43 @@ def define_arguments(argparser): # Could do the same with hydrogen storage: require average hydrogen stored across all scenarios # to be less than the size of the storage built. -def define_components(m): - """ - """ +def define_components(m): + """ """ ################### # RPS calculation ################## m.f_rps_eligible = Param(m.FUELS, within=Binary, default=False) - m.RPS_ENERGY_SOURCES = Set(initialize=lambda m: - [s for s in m.NON_FUEL_ENERGY_SOURCES if s.lower() != 'battery'] - + [f for f in m.FUELS if m.f_rps_eligible[f]] + m.RPS_ENERGY_SOURCES = Set( + dimen=1, + initialize=lambda m: [ + s for s in m.NON_FUEL_ENERGY_SOURCES if s.lower() != "battery" + ] + + [f for f in m.FUELS if m.f_rps_eligible[f]], ) - m.RPS_YEARS = Set(ordered=True) - m.rps_target = Param(m.RPS_YEARS) + m.RPS_YEARS = Set(ordered=True, dimen=1) + m.rps_target = Param(m.RPS_YEARS, within=PercentFraction) def rps_target_for_period_rule(m, p): """find the last target that is in effect before the _end_ of the period""" - latest_target = max(y for y in m.RPS_YEARS if y < m.period_start[p] + m.period_length_years[p]) + latest_target = max( + y for y in m.RPS_YEARS if y < m.period_start[p] + m.period_length_years[p] + ) return m.rps_target[latest_target] - m.rps_target_for_period = Param(m.PERIODS, initialize=rps_target_for_period_rule) + + m.rps_target_for_period = Param( + m.PERIODS, within=NonNegativeReals, initialize=rps_target_for_period_rule + ) # maximum share of (bio)fuels in rps # note: using Infinity as the upper limit causes the solution to take forever # m.rps_fuel_limit = Param(default=float("inf"), mutable=True) - m.rps_fuel_limit = Param(initialize=m.options.biofuel_limit, mutable=True) + m.rps_fuel_limit = Param( + within=NonNegativeReals, initialize=m.options.biofuel_limit, mutable=True + ) # calculate amount of pre-existing capacity in each generation project; # used when we want to restrict expansion @@ -98,7 +151,7 @@ def rps_target_for_period_rule(m, p): rule=lambda m, g: ( m.GenCapacity[g, m.PERIODS.first()] - get(m.BuildGen, (g, m.PERIODS.first()), 0) - ) + ), ) # Define DispatchGenRenewableMW, which shows the amount of power produced @@ -106,12 +159,13 @@ def rps_target_for_period_rule(m, p): define_DispatchGenRenewableMW(m) # calculate amount of power produced from renewable fuels during each period - m.RPSFuelPower = Expression(m.PERIODS, rule=lambda m, per: - sum( + m.RPSFuelPower = Expression( + m.PERIODS, + rule=lambda m, per: sum( m.DispatchGenRenewableMW[g, tp] * m.tp_weight[tp] for g in m.FUEL_BASED_GENS for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] - ) + ), ) # Note: this rule ignores pumped hydro and batteries, so it could be gamed by producing extra @@ -121,92 +175,113 @@ def rps_target_for_period_rule(m, p): # sum(getattr(m, component)[z, t] for z in m.LOAD_ZONES) for component in m.Zone_Power_Injections) # power production that can be counted toward the RPS each period - m.RPSEligiblePower = Expression(m.PERIODS, rule=lambda m, per: - m.RPSFuelPower[per] - + - sum( + m.RPSEligiblePower = Expression( + m.PERIODS, + rule=lambda m, per: m.RPSFuelPower[per] + + sum( m.DispatchGen[g, tp] * m.tp_weight[tp] - for f in m.NON_FUEL_ENERGY_SOURCES if f in m.RPS_ENERGY_SOURCES + for f in m.NON_FUEL_ENERGY_SOURCES + if f in m.RPS_ENERGY_SOURCES for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[f] for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] - ) + ), ) # total power production each period (against which RPS is measured) # note: we exclude production from storage - m.RPSTotalPower = Expression(m.PERIODS, rule=lambda m, per: - sum( + m.RPSTotalPower = Expression( + m.PERIODS, + rule=lambda m, per: sum( m.DispatchGen[g, tp] * m.tp_weight[tp] - for g in m.GENERATION_PROJECTS if g not in getattr(m, 'STORAGE_GENS', []) + for g in m.GENERATION_PROJECTS + if g not in getattr(m, "STORAGE_GENS", []) for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] - ) + ), ) # note: we completely skip creating the constraint if the RPS is not activated. # this makes it easy for other modules to check whether there's an RPS in effect # (if we deactivated the RPS after it is constructed, then other modules would # have to postpone checking until then) - if m.options.rps_level in {'activate', 'exact'}: - if m.options.rps_level == 'exact': - rule = lambda m, p: m.RPSEligiblePower[p] == m.rps_target_for_period[p] * m.RPSTotalPower[p] + if m.options.rps_level in {"activate", "exact"}: + if m.options.rps_level == "exact": + rule = ( + lambda m, p: m.RPSEligiblePower[p] + == m.rps_target_for_period[p] * m.RPSTotalPower[p] + ) else: - rule = lambda m, p: m.RPSEligiblePower[p] >= m.rps_target_for_period[p] * m.RPSTotalPower[p] + rule = ( + lambda m, p: m.RPSEligiblePower[p] + >= m.rps_target_for_period[p] * m.RPSTotalPower[p] + ) m.RPS_Enforce = Constraint(m.PERIODS, rule=rule) - elif m.options.rps_level == 'no_new_renewables': + elif m.options.rps_level == "no_new_renewables": # prevent construction of any new exclusively-renewable projects, but allow # replacement of existing ones # (doesn't ban use of biofuels in existing or multi-fuel projects, but that could # be done with --biofuel-limit 0) - m.No_New_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g]) + m.No_New_Renewables = Constraint( + m.NEW_GEN_BLD_YRS, + rule=lambda m, g, bld_yr: ( + m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g] + ) if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES - else Constraint.Skip + else Constraint.Skip, ) - wind_energy_sources = {'WND'} + wind_energy_sources = {"WND"} if m.options.rps_no_new_wind: # limit wind to existing capacity - m.No_New_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g]) + m.No_New_Wind = Constraint( + m.NEW_GEN_BLD_YRS, + rule=lambda m, g, bld_yr: ( + m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g] + ) if m.gen_energy_source[g] in wind_energy_sources - else Constraint.Skip + else Constraint.Skip, ) if m.options.rps_no_wind: # don't build any new capacity or replace existing - m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.BuildGen[g, bld_yr] == 0.0) + m.No_Wind = Constraint( + m.NEW_GEN_BLD_YRS, + rule=lambda m, g, bld_yr: (m.BuildGen[g, bld_yr] == 0.0) if m.gen_energy_source[g] in wind_energy_sources - else Constraint.Skip + else Constraint.Skip, ) if m.options.rps_prefer_dist_pv: - m.DIST_PV_GENS = Set(initialize=lambda m: [ - g for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE['SUN'] - if 'DistPV' in m.gen_tech[g] - ]) - m.LARGE_PV_GENS = Set(initialize=lambda m: [ - g for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE['SUN'] - if g not in m.DIST_PV_GENS - ]) + m.DIST_PV_GENS = Set( + dimen=1, + initialize=lambda m: [ + g + for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE["SUN"] + if "DistPV" in m.gen_tech[g] + ], + ) + m.LARGE_PV_GENS = Set( + dimen=1, + initialize=lambda m: [ + g + for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE["SUN"] + if g not in m.DIST_PV_GENS + ], + ) # LargePVAllowed must be 1 to allow large PV to be built - m.LargePVAllowed = Var(m.PERIODS, within=Binary) # + m.LargePVAllowed = Var(m.PERIODS, within=Binary) # # LargePVAllowed can only be 1 if 90% of the available rooftop PV has been built m.Set_LargePVAllowed = Constraint( m.PERIODS, - rule=lambda m, p: - sum(m.GenCapacity[g, p] for g in m.DIST_PV_GENS) - >= - m.LargePVAllowed[p] - * 0.9 - * sum(m.gen_capacity_limit_mw[g] for g in m.DIST_PV_GENS) + rule=lambda m, p: sum(m.GenCapacity[g, p] for g in m.DIST_PV_GENS) + >= m.LargePVAllowed[p] + * 0.9 + * sum(m.gen_capacity_limit_mw[g] for g in m.DIST_PV_GENS), ) m.Apply_LargePVAllowed = Constraint( - m.LARGE_PV_GENS, m.PERIODS, - rule=lambda m, g, p: - m.GenCapacity[g, p] - <= - m.LargePVAllowed[p] * m.gen_capacity_limit_mw[g] - + m.gen_pre_existing_capacity[g] + m.LARGE_PV_GENS, + m.PERIODS, + rule=lambda m, g, p: m.GenCapacity[g, p] + <= m.LargePVAllowed[p] * m.gen_capacity_limit_mw[g] + + m.gen_pre_existing_capacity[g], ) # Don't allow (bio)fuels to provide more than a certain percentage of the system's energy @@ -229,10 +304,13 @@ def rps_target_for_period_rule(m, p): # transmission losses, the cycling costs for batteries are too high and pumped storage is only # adopted on a small scale. - m.RPS_Fuel_Cap = Constraint(m.PERIODS, rule = lambda m, per: - m.RPSFuelPower[per] <= m.rps_fuel_limit * m.RPSTotalPower[per] + m.RPS_Fuel_Cap = Constraint( + m.PERIODS, + rule=lambda m, per: m.RPSFuelPower[per] + <= m.rps_fuel_limit * m.RPSTotalPower[per], ) + def define_DispatchGenRenewableMW(m): # Define DispatchGenRenewableMW, which shows the amount of power produced # by each project from each fuel during each time step. @@ -240,31 +318,36 @@ def define_DispatchGenRenewableMW(m): # This can get complex when a project uses multiple fuels and incremental # heat rate curves. if m.options.rps_allocation is None: - if hasattr(m, 'FUEL_USE_SEGMENTS_FOR_GEN'): + if hasattr(m, "FUEL_USE_SEGMENTS_FOR_GEN"): # using heat rate curves and possibly startup fuel; # have to do more advanced allocation of power to fuels - m.options.rps_allocation = 'fuel_switch_by_period' + m.options.rps_allocation = "fuel_switch_by_period" else: # only using full load heat rate; use simpler allocation strategy - m.options.rps_allocation = 'full_load_heat_rate' + m.options.rps_allocation = "full_load_heat_rate" if m.options.verbose: - print("Using {} method to allocate DispatchGenRenewableMW".format(m.options.rps_allocation)) + print( + "Using {} method to allocate DispatchGenRenewableMW".format( + m.options.rps_allocation + ) + ) - if m.options.rps_allocation == 'full_load_heat_rate': + if m.options.rps_allocation == "full_load_heat_rate": simple_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'quadratic': + elif m.options.rps_allocation == "quadratic": quadratic_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'fuel_switch_by_period': + elif m.options.rps_allocation == "fuel_switch_by_period": binary_by_period_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'fuel_switch_by_timeseries': + elif m.options.rps_allocation == "fuel_switch_by_timeseries": binary_by_timeseries_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'split_commit': + elif m.options.rps_allocation == "split_commit": split_commit_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'relaxed_split_commit': + elif m.options.rps_allocation == "relaxed_split_commit": relaxed_split_commit_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'fuel_switch_at_high_rps': + elif m.options.rps_allocation == "fuel_switch_at_high_rps": fuel_switch_at_high_rps_DispatchGenRenewableMW(m) + def simple_DispatchGenRenewableMW(m): # Allocate the power produced during each timepoint among the fuels. # When not using heat rate curves, this can be calculated directly from @@ -272,13 +355,10 @@ def simple_DispatchGenRenewableMW(m): # multiple fuels in the same project at the same time. m.DispatchGenRenewableMW = Expression( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, t: - sum( - m.GenFuelUseRate[g, t, f] - for f in m.FUELS_FOR_GEN[g] - if m.f_rps_eligible[f] - ) - / m.gen_full_load_heat_rate[g] + rule=lambda m, g, t: sum( + m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] + ) + / m.gen_full_load_heat_rate[g], ) @@ -301,78 +381,79 @@ def split_commit_DispatchGenRenewableMW(m): # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + m.DispatchGenRenewableMW_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) # a portion of every startup and shutdown must be designated as renewable m.CommitGenRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.CommitGenRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.CommitGenRenewable[g, tp] <= m.CommitGen[g, tp] + m.CommitGenRenewable_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.CommitGenRenewable[g, tp] <= m.CommitGen[g, tp], ) m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] + m.StartupGenCapacityRenewable_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] + <= m.StartupGenCapacity[g, tp], ) m.ShutdownGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.ShutdownGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.ShutdownGenCapacityRenewable[g, tp] <= m.ShutdownGenCapacity[g, tp] + m.ShutdownGenCapacityRenewable_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.ShutdownGenCapacityRenewable[g, tp] + <= m.ShutdownGenCapacity[g, tp], ) # chain commitments, startup and shutdown for renewables m.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.CommitGenRenewable[g, m.tp_previous[tp]] - + m.StartupGenCapacityRenewable[g, tp] - - m.ShutdownGenCapacityRenewable[g, tp] - == m.CommitGenRenewable[g, tp] + rule=lambda m, g, tp: m.CommitGenRenewable[g, m.tp_previous[tp]] + + m.StartupGenCapacityRenewable[g, tp] + - m.ShutdownGenCapacityRenewable[g, tp] + == m.CommitGenRenewable[g, tp], ) # must use committed capacity for renewable production m.Enforce_Dispatch_Upper_Limit_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.CommitGenRenewable[g, tp] + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.CommitGenRenewable[g, tp], ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - >= - (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) - * m.gen_min_load_fraction_TP[g, tp] + rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + >= (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) + * m.gen_min_load_fraction_TP[g, tp], ) # use standard heat rate calculations for renewable and non-renewable parts m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if f in m.RPS_ENERGY_SOURCES - ) - >= - m.StartupGenCapacityRenewable[g, tp] * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * m.CommitGenRenewable[g, tp] - + incremental_heat_rate * m.DispatchGenRenewableMW[g, tp] + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if f in m.RPS_ENERGY_SOURCES + ) + >= m.StartupGenCapacityRenewable[g, tp] + * m.gen_startup_fuel[g] + / m.tp_duration_hrs[tp] + + intercept * m.CommitGenRenewable[g, tp] + + incremental_heat_rate * m.DispatchGenRenewableMW[g, tp], ) m.ProjNonRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if f not in m.RPS_ENERGY_SOURCES - ) - >= - (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) - + incremental_heat_rate * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if f not in m.RPS_ENERGY_SOURCES + ) + >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) + * m.gen_startup_fuel[g] + / m.tp_duration_hrs[tp] + + intercept * (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) + + incremental_heat_rate + * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]), ) + def relaxed_split_commit_DispatchGenRenewableMW(m): # This is similar to the split_commit approach, but allows startup fuel # to be freely allocated between renewable and non-renewable fuels. @@ -383,24 +464,23 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + m.DispatchGenRenewableMW_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] + m.StartupGenCapacityRenewable_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] + <= m.StartupGenCapacity[g, tp], ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - >= - (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - * m.gen_min_load_fraction_TP[g, tp] + rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + >= (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + * m.gen_min_load_fraction_TP[g, tp], ) # rule=lambda m, g, t, intercept, incremental_heat_rate: ( @@ -420,82 +500,93 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # for renewables and one slice for non-renewable, equal to the amount of power from each. m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if f in m.RPS_ENERGY_SOURCES - ) - >= - m.StartupGenCapacityRenewable[g, tp] * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * m.DispatchGenRenewableMW[g, tp] - + incremental_heat_rate * m.DispatchGenRenewableMW[g, tp] + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if f in m.RPS_ENERGY_SOURCES + ) + >= m.StartupGenCapacityRenewable[g, tp] + * m.gen_startup_fuel[g] + / m.tp_duration_hrs[tp] + + intercept * m.DispatchGenRenewableMW[g, tp] + + incremental_heat_rate * m.DispatchGenRenewableMW[g, tp], ) m.ProjNonRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if f not in m.RPS_ENERGY_SOURCES - ) - >= - (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - + incremental_heat_rate * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if f not in m.RPS_ENERGY_SOURCES + ) + >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) + * m.gen_startup_fuel[g] + / m.tp_duration_hrs[tp] + + intercept * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + + incremental_heat_rate + * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]), ) # don't allow any non-renewable fuel if RPS is 100% - if m.options.rps_level == 'activate': + if m.options.rps_level == "activate": # find all dispatch points for non-renewable fuels during periods with 100% RPS m.FULL_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( dimen=3, initialize=lambda m: [ (g, tp, f) - for per in m.PERIODS if m.rps_target_for_period[per] == 1.0 - for g in m.FUEL_BASED_GENS if (g, per) in m.GEN_PERIODS - for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] + for per in m.PERIODS + if m.rps_target_for_period[per] == 1.0 + for g in m.FUEL_BASED_GENS + if (g, per) in m.GEN_PERIODS + for f in m.FUELS_FOR_GEN[g] + if not m.f_rps_eligible[f] for tp in m.TPS_IN_PERIOD[per] - ] + ], ) m.No_Fossil_Fuel_With_Full_RPS = Constraint( m.FULL_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS, - rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 + rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0, ) def fuel_switch_at_high_rps_DispatchGenRenewableMW(m): - """ switch all plants to biofuel (and count toward RPS) if and only if rps is above threshold """ + """switch all plants to biofuel (and count toward RPS) if and only if rps is above threshold""" - if m.options.rps_level == 'activate': + if m.options.rps_level == "activate": # find all dispatch points for non-renewable fuels during periods with 100% RPS m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( dimen=3, initialize=lambda m: [ (g, tp, f) - for p in m.PERIODS if m.rps_target_for_period[p] >= m.options.biofuel_switch_threshold - for g in m.FUEL_BASED_GENS if (g, p) in m.GEN_PERIODS - for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] - for tp in m.TPS_IN_PERIOD[p] - ] + for p in m.PERIODS + if m.rps_target_for_period[p] >= m.options.biofuel_switch_threshold + for g in m.FUEL_BASED_GENS + if (g, p) in m.GEN_PERIODS + for f in m.FUELS_FOR_GEN[g] + if not m.f_rps_eligible[f] + for tp in m.TPS_IN_PERIOD[p] + ], ) m.No_Fossil_Fuel_With_High_RPS = Constraint( m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS, - rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 + rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0, ) # count full dispatch toward RPS during non-fossil periods, otherwise give no credit def rule(m, g, tp): - if m.rps_target_for_period[m.tp_period[tp]] >= m.options.biofuel_switch_threshold: + if ( + m.rps_target_for_period[m.tp_period[tp]] + >= m.options.biofuel_switch_threshold + ): return m.DispatchGen[g, tp] else: return 0.0 + m.DispatchGenRenewableMW = Expression(m.FUEL_BASED_GEN_TPS, rule=rule) else: m.DispatchGenRenewableMW = Expression( - m.FUEL_BASED_GEN_TPS, within=NonNegativeReals, - rule=lambda m, g, tp: 0.0 + m.FUEL_BASED_GEN_TPS, within=NonNegativeReals, rule=lambda m, g, tp: 0.0 ) + def binary_by_period_DispatchGenRenewableMW(m): # NOTE: this could be extended to handle fuel blends (e.g., 50% biomass/50% coal) # by assigning an RPS eligibility level to each fuel (e.g., 50%), then @@ -507,35 +598,39 @@ def binary_by_period_DispatchGenRenewableMW(m): # and choosing the amount to produce from each eligibility level (similar to the # renewable/non-renewable distinction here, but with a 50% renewable category) - m.GEN_WITH_FUEL_ACTIVE_PERIODS = Set(dimen=2, initialize=lambda m: { - (g, pe) - for g in m.FUEL_BASED_GENS for pe in m.PERIODS - if (g, m.TPS_IN_PERIOD[pe].first()) in m.FUEL_BASED_GEN_TPS - }) + m.GEN_WITH_FUEL_ACTIVE_PERIODS = Set( + dimen=2, + initialize=lambda m: { + (g, pe) + for g in m.FUEL_BASED_GENS + for pe in m.PERIODS + if (g, m.TPS_IN_PERIOD[pe].first()) in m.FUEL_BASED_GEN_TPS + }, + ) # choose whether to run (only) on renewable fuels during each period m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_PERIODS, within=Binary) # force flag on or off when the RPS is simple (to speed computation) def rule(m, g, p): - if m.rps_target_for_period[pe]==1.0: + if m.rps_target_for_period[pe] == 1.0: # 100% RPS; use only renewable fuels - return (m.DispatchRenewableFlag[g, pe] == 1) - elif m.rps_target_for_period[pe]==0.0 or m.options.rps_level != 'activate': + return m.DispatchRenewableFlag[g, pe] == 1 + elif m.rps_target_for_period[pe] == 0.0 or m.options.rps_level != "activate": # no RPS, don't bother counting renewable fuels - return (m.DispatchRenewableFlag[g, pe] == 0) + return m.DispatchRenewableFlag[g, pe] == 0 else: return Constraint.Skip + m.Force_DispatchRenewableFlag = Constraint( m.GEN_WITH_FUEL_ACTIVE_PERIODS, - rule=lambda m, g, pe: - (m.DispatchRenewableFlag[g, pe] == 0) - if (m.rps_target_for_period[pe]==0.0 or m.options.rps_level != 'activate') - else ( - (m.DispatchRenewableFlag[g, pe] == 1) - if m.rps_target_for_period[pe]==1.0 - else Constraint.Skip - ) + rule=lambda m, g, pe: (m.DispatchRenewableFlag[g, pe] == 0) + if (m.rps_target_for_period[pe] == 0.0 or m.options.rps_level != "activate") + else ( + (m.DispatchRenewableFlag[g, pe] == 1) + if m.rps_target_for_period[pe] == 1.0 + else Constraint.Skip + ), ) # count amount of renewable power produced from project @@ -544,16 +639,13 @@ def rule(m, g, p): # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= - m.DispatchRenewableFlag[g, m.tp_period[tp]] * m.gen_capacity_limit_mw[g] + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, m.tp_period[tp]] * m.gen_capacity_limit_mw[g], ) # prevent use of non-renewable fuels during renewable timepoints @@ -569,19 +661,24 @@ def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): return ( m.GenFuelUseRate[g, tp, f] + m.DispatchRenewableFlag[g, m.tp_period[tp]] * big_fuel - <= - big_fuel + <= big_fuel ) + m.Enforce_DispatchRenewableFlag = Constraint( m.GEN_TP_FUELS, rule=Enforce_DispatchRenewableFlag_rule ) + def binary_by_timeseries_DispatchGenRenewableMW(m): - m.GEN_WITH_FUEL_ACTIVE_TIMESERIES = Set(dimen=2, initialize=lambda m: { - (g, ts) - for g in m.FUEL_BASED_GENS for ts in m.TIMESERIES - if (g, m.TPS_IN_TS[ts].first()) in m.FUEL_BASED_GEN_TPS - }) + m.GEN_WITH_FUEL_ACTIVE_TIMESERIES = Set( + dimen=2, + initialize=lambda m: { + (g, ts) + for g in m.FUEL_BASED_GENS + for ts in m.TIMESERIES + if (g, m.TPS_IN_TS[ts].first()) in m.FUEL_BASED_GEN_TPS + }, + ) # choose whether to run (only) on renewable fuels during each period m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, within=Binary) @@ -589,12 +686,13 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): # force flag on or off depending on RPS status (to speed computation) m.Force_DispatchRenewableFlag = Constraint( m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, - rule=lambda m, g, ts: - (m.DispatchRenewableFlag[g, ts] == 0) if m.rps_target_for_period[m.ts_period[ts]]==0.0 - else ( - (m.DispatchRenewableFlag[g, ts] == 1) if m.rps_target_for_period[m.ts_period[ts]]==1.0 - else Constraint.Skip - ) + rule=lambda m, g, ts: (m.DispatchRenewableFlag[g, ts] == 0) + if m.rps_target_for_period[m.ts_period[ts]] == 0.0 + else ( + (m.DispatchRenewableFlag[g, ts] == 1) + if m.rps_target_for_period[m.ts_period[ts]] == 1.0 + else Constraint.Skip + ), ) # count amount of renewable power produced from project @@ -603,37 +701,34 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= - m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g] + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g], ) # prevent use of non-renewable fuels during renewable timepoints m.Enforce_DispatchRenewableFlag = Constraint( m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - Constraint.Skip if m.f_rps_eligible[f] - else ( - # original code, rewritten to get numerical parts on rhs - # m.GenFuelUseRate[g, tp, f] - # <= - # (1-m.DispatchRenewableFlag[g, m.tp_ts[tp]]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - m.GenFuelUseRate[g, tp, f] - + m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - <= - m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - ) + rule=lambda m, g, tp, f: Constraint.Skip + if m.f_rps_eligible[f] + else ( + # original code, rewritten to get numerical parts on rhs + # m.GenFuelUseRate[g, tp, f] + # <= + # (1-m.DispatchRenewableFlag[g, m.tp_ts[tp]]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] + m.GenFuelUseRate[g, tp, f] + + m.DispatchRenewableFlag[g, m.tp_ts[tp]] + * m.gen_capacity_limit_mw[g] + * m.gen_full_load_heat_rate[g] + <= m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] + ), ) - def advanced2_DispatchGenRenewableMW(m): # choose whether to run (only) on renewable fuels during each timepoint m.DispatchRenewableFlag = Var(m.FUEL_BASED_GEN_TPS, within=Binary) @@ -644,27 +739,26 @@ def advanced2_DispatchGenRenewableMW(m): # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= - m.DispatchRenewableFlag[g, tp] * m.gen_capacity_limit_mw[g] + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, tp] * m.gen_capacity_limit_mw[g], ) # prevent use of non-renewable fuels during renewable timepoints m.Enforce_DispatchRenewableFlag = Constraint( m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - Constraint.Skip if m.f_rps_eligible[f] - else ( - m.GenFuelUseRate[g, tp, f] - <= - (1-m.DispatchRenewableFlag[g, tp]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - ) + rule=lambda m, g, tp, f: Constraint.Skip + if m.f_rps_eligible[f] + else ( + m.GenFuelUseRate[g, tp, f] + <= (1 - m.DispatchRenewableFlag[g, tp]) + * m.gen_capacity_limit_mw[g] + * m.gen_full_load_heat_rate[g] + ), ) @@ -675,36 +769,34 @@ def advanced1_DispatchGenRenewableMW(m): # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) - == - m.DispatchGen[g, tp] + rule=lambda m, g, tp: sum( + m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g] + ) + == m.DispatchGen[g, tp], ) # choose a single fuel to use during each timestep m.DispatchFuelFlag = Var(m.GEN_TP_FUELS, within=Binary) m.DispatchFuelFlag_Total = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - sum(m.DispatchFuelFlag[g, tp, f] for f in m.FUELS_FOR_GEN[g]) - == - 1 + rule=lambda m, g, tp: sum( + m.DispatchFuelFlag[g, tp, f] for f in m.FUELS_FOR_GEN[g] + ) + == 1, ) # consume only the selected fuel and allocate all production to that fuel (big-M constraints) m.Allocate_Dispatch_Output = Constraint( m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - m.DispatchGenRenewableMW[g, tp, f] - <= - m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g] + rule=lambda m, g, tp, f: m.DispatchGenRenewableMW[g, tp, f] + <= m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g], ) m.Allocate_Dispatch_Fuel = Constraint( m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - m.GenFuelUseRate[g, tp, f] - <= - m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] + rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] + <= m.DispatchFuelFlag[g, tp, f] + * m.gen_capacity_limit_mw[g] + * m.gen_full_load_heat_rate[g], ) # note: in cases where a project has a single fuel, the presolver should force @@ -738,6 +830,7 @@ def advanced1_DispatchGenRenewableMW(m): # * m.GenFuelUseRate[g, t, f] # ) + def quadratic_DispatchGenRenewableMW(m): # choose how much power to obtain from renewables during each timepoint m.DispatchRenewableFraction = Var(m.FUEL_BASED_GEN_TPS, within=PercentFraction) @@ -747,28 +840,22 @@ def quadratic_DispatchGenRenewableMW(m): # don't overcount renewable power production m.Set_DispatchRenewableFraction = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= - m.DispatchRenewableFraction[g, tp] * m.DispatchGen[g, tp] + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFraction[g, tp] * m.DispatchGen[g, tp], ) m.Enforce_DispatchRenewableFraction = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if m.f_rps_eligible[f] - ) - >= - m.DispatchRenewableFraction[g, tp] * - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - ) + rule=lambda m, g, tp: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if m.f_rps_eligible[f] + ) + >= m.DispatchRenewableFraction[g, tp] + * sum(m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g]), ) + def quadratic1_DispatchGenRenewableMW(m): # Allocate the power produced during each timepoint among the fuels. m.DispatchGenRenewableMW = Var(m.GEN_TP_FUELS, within=NonNegativeReals) @@ -776,38 +863,39 @@ def quadratic1_DispatchGenRenewableMW(m): # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) - == - m.DispatchGen[g, tp] + rule=lambda m, g, tp: sum( + m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g] + ) + == m.DispatchGen[g, tp], ) m.DispatchGenRenewableMW_Allocate = Constraint( m.GEN_TP_FUELS, - rule = lambda m, g, t, f: - m.DispatchGenRenewableMW[g, t, f] - * sum(m.GenFuelUseRate[g, t, _f] for _f in m.FUELS_FOR_GEN[g]) - <= - m.DispatchGen[g, t] - * m.GenFuelUseRate[g, t, f] + rule=lambda m, g, t, f: m.DispatchGenRenewableMW[g, t, f] + * sum(m.GenFuelUseRate[g, t, _f] for _f in m.FUELS_FOR_GEN[g]) + <= m.DispatchGen[g, t] * m.GenFuelUseRate[g, t, f], ) + def load_inputs(m, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'fuels.csv'), - select=('fuel', 'rps_eligible'), - param=(m.f_rps_eligible,)) + filename=os.path.join(inputs_dir, "fuels.csv"), + select=("fuel", "rps_eligible"), + param=(m.f_rps_eligible,), + ) if m.options.rps_targets is None: switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'rps_targets.csv'), - autoselect=True, + filename=os.path.join(inputs_dir, "rps_targets.csv"), index=m.RPS_YEARS, - param=(m.rps_target,)) + param=(m.rps_target,), + ) else: # construct data from a target specified as 'year1 level1 year2 level2 ...' iterator = iter(m.options.rps_targets) - rps_targets = {int(year): float(target) for year, target in zip(iterator, iterator)} - switch_data.data()['RPS_YEARS'] = {None: sorted(rps_targets.keys())} - switch_data.data()['rps_target'] = rps_targets + rps_targets = { + int(year): float(target) for year, target in zip(iterator, iterator) + } + switch_data.data()["RPS_YEARS"] = {None: sorted(rps_targets.keys())} + switch_data.data()["rps_target"] = rps_targets diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index 2c6f7d773..4f2c5bc57 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -26,26 +26,44 @@ import switch_model.hawaii.util as util import switch_model.financials as financials + def define_components(m): # Make sure the model has a dual suffix if not hasattr(m, "dual"): m.dual = Suffix(direction=Suffix.IMPORT) + def post_solve(m, outputs_dir): write_results(m, outputs_dir) + def summary_headers(m): return ( ("scenario", "max_demand_response_share", "total_cost", "cost_per_kwh") - +tuple('cost_per_kwh_'+str(p) for p in m.PERIODS) - +((("renewable_share_all_years",) + tuple('renewable_share_'+str(p) for p in m.PERIODS)) - if hasattr(m, 'RPSEligiblePower') else tuple()) - +((("biofuel_share_all_years",) + tuple('biofuel_share_'+str(p) for p in m.PERIODS)) - if hasattr(m, 'RPSEligiblePower') else tuple()) + + tuple("cost_per_kwh_" + str(p) for p in m.PERIODS) + + ( + ( + ("renewable_share_all_years",) + + tuple("renewable_share_" + str(p) for p in m.PERIODS) + ) + if hasattr(m, "RPSEligiblePower") + else tuple() + ) + + ( + ( + ("biofuel_share_all_years",) + + tuple("biofuel_share_" + str(p) for p in m.PERIODS) + ) + if hasattr(m, "RPSEligiblePower") + else tuple() + ) ) + def summary_values(m): - demand_components = [c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs') if hasattr(m, c)] + demand_components = [ + c for c in ("zone_demand_mw", "ShiftDemand", "ChargeEVs") if hasattr(m, c) + ] values = [] # Cache SystemCostPerPeriod and SystemCost to speed up saving large models @@ -59,68 +77,83 @@ def summary_values(m): SystemCost = sum(SystemCostPerPeriod[p] for p in m.PERIODS) # scenario name and looping variables - values.extend([ - str(m.options.scenario_name), - m.demand_response_max_share if hasattr(m, 'demand_response_max_share') else 0.0, - ]) + values.extend( + [ + str(m.options.scenario_name), + m.demand_response_max_share + if hasattr(m, "demand_response_max_share") + else 0.0, + ] + ) # total cost (all periods) - values.append(SystemCost) # m.SystemCost) + values.append(SystemCost) # m.SystemCost) # NPV of total cost / NPV of kWh generated (equivalent to spreading # all costs uniformly over all generation) values.append( - SystemCost # m.SystemCost + SystemCost # m.SystemCost / sum( - m.bring_timepoint_costs_to_base_year[t] * 1000.0 * - sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) + m.bring_timepoint_costs_to_base_year[t] + * 1000.0 + * sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) for t in m.TIMEPOINTS ) ) # total cost / kWh generated in each period # (both discounted to today, so the discounting cancels out) - values.extend([ - SystemCostPerPeriod[p] # m.SystemCostPerPeriod[p] - / sum( - m.bring_timepoint_costs_to_base_year[t] * 1000.0 * - sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) - for t in m.TPS_IN_PERIOD[p] - ) - for p in m.PERIODS - ]) + values.extend( + [ + SystemCostPerPeriod[p] # m.SystemCostPerPeriod[p] + / sum( + m.bring_timepoint_costs_to_base_year[t] + * 1000.0 + * sum( + getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES + ) + for t in m.TPS_IN_PERIOD[p] + ) + for p in m.PERIODS + ] + ) - if hasattr(m, 'RPSEligiblePower'): + if hasattr(m, "RPSEligiblePower"): # total renewable share over all periods values.append( sum(m.RPSEligiblePower[p] for p in m.PERIODS) - /sum(m.RPSTotalPower[p] for p in m.PERIODS) + / sum(m.RPSTotalPower[p] for p in m.PERIODS) ) # renewable share during each period - values.extend([m.RPSEligiblePower[p]/m.RPSTotalPower[p] for p in m.PERIODS]) + values.extend([m.RPSEligiblePower[p] / m.RPSTotalPower[p] for p in m.PERIODS]) # total biofuel share over all periods values.append( sum(m.RPSFuelPower[p] for p in m.PERIODS) - /sum(m.RPSTotalPower[p] for p in m.PERIODS) + / sum(m.RPSTotalPower[p] for p in m.PERIODS) ) # biofuel share during each period - values.extend([m.RPSFuelPower[p]/m.RPSTotalPower[p] for p in m.PERIODS]) + values.extend([m.RPSFuelPower[p] / m.RPSTotalPower[p] for p in m.PERIODS]) return values + def annualize_present_value_period_cost(m, period, val): # convert a discounted, total cost per-period into an annual stream of costs discount_factor = ( # this term is straight from financials.py # Conversion to lump sum at beginning of period financials.uniform_series_to_present_value( - m.discount_rate, m.period_length_years[period]) * + m.discount_rate, m.period_length_years[period] + ) + * # Conversion to base year financials.future_to_present_value( - m.discount_rate, (m.period_start[period] - m.base_financial_year)) + m.discount_rate, (m.period_start[period] - m.base_financial_year) + ) ) return val / discount_factor + def DispatchGenByFuel(m, g, tp, fuel): """This is a replacement for mod.DispatchGenByFuel, which is only defined in project.no_commit, not project.unitcommit.fuel_use. In the unit commitment version @@ -146,27 +179,29 @@ def DispatchGenByFuel(m, g, tp, fuel): result = value(m.GenFuelUseRate[g, tp, fuel]) * dispatch / total_fuel return result + def write_results(m, outputs_dir): tag = "_" + m.options.scenario_name if m.options.scenario_name else "" - util.write_table(m, + util.write_table( + m, output_file=os.path.join(outputs_dir, "summary{t}.csv".format(t=tag)), headings=summary_headers(m), - values=lambda m: summary_values(m) + values=lambda m: summary_values(m), ) - if hasattr(m, 'Spinning_Reserve_Up_Requirements'): + if hasattr(m, "Spinning_Reserve_Up_Requirements"): # pre-calculate amount of reserves provided and needed for each balancing area and timepoint spinning_reserve_provisions = defaultdict(float) spinning_reserve_requirements = defaultdict(float) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # advanced module for component in m.Spinning_Reserve_Up_Provisions: for (rt, ba, tp), val in getattr(m, component).items(): spinning_reserve_provisions[ba, tp] += val for component in m.Spinning_Reserve_Up_Requirements: for (rt, ba, tp), val in getattr(m, component).items(): spinning_reserve_requirements[ba, tp] += val - else: # basic module + else: # basic module for component in m.Spinning_Reserve_Up_Provisions: for (ba, tp), val in getattr(m, component).items(): spinning_reserve_provisions[ba, tp] += val @@ -188,150 +223,207 @@ def write_results(m, outputs_dir): non_fuel_techs = tuple(sorted(set(m.gen_tech[g] for g in m.NON_FUEL_BASED_GENS))) # get a list of ad-hoc technologies (not included in standard generation projects) ad_hoc_sources = tuple( - s for s in m.Zone_Power_Injections - if s not in {'ZoneTotalCentralDispatch', 'ZoneTotalDistributedDispatch'} + s + for s in m.Zone_Power_Injections + if s not in {"ZoneTotalCentralDispatch", "ZoneTotalDistributedDispatch"} + ) + avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES)) / len( + m.TIMESERIES ) - avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) util.write_table( - m, m.LOAD_ZONES, m.TIMEPOINTS, + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "energy_sources{t}.csv".format(t=tag)), - headings= - ("load_zone", "period", "timepoint_label") - +tuple(m.FUELS) - +tuple(m.NON_FUEL_ENERGY_SOURCES) - +non_fuel_techs - +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) - +tuple(m.Zone_Power_Injections) - +tuple(m.Zone_Power_Withdrawals) - +("spinning_reserve_provision", "spinning_reserve_requirement") - +("marginal_cost", "peak_day"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) - +tuple( - sum( - DispatchGenByFuel(m, p, t, f) - for p in m.GENS_BY_FUEL[f] - if (p, t) in m.GEN_TPS and m.gen_load_zone[p] == z - ) - for f in m.FUELS + headings=("load_zone", "period", "timepoint_label") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + non_fuel_techs + + tuple("curtail_" + s for s in m.NON_FUEL_ENERGY_SOURCES) + + tuple(m.Zone_Power_Injections) + + tuple(m.Zone_Power_Withdrawals) + + ("spinning_reserve_provision", "spinning_reserve_requirement") + + ("marginal_cost", "peak_day"), + values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) + + tuple( + sum( + DispatchGenByFuel(m, p, t, f) + for p in m.GENS_BY_FUEL[f] + if (p, t) in m.GEN_TPS and m.gen_load_zone[p] == z ) - +tuple( - sum( - util.get(m.DispatchGen, (p, t), 0.0) - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - if m.gen_load_zone[p] == z - ) - for s in m.NON_FUEL_ENERGY_SOURCES + for f in m.FUELS + ) + + tuple( + sum( + util.get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[p] == z ) - +tuple( - sum( - util.get(m.DispatchGen, (g, t), 0.0) - for g in m.GENS_BY_TECHNOLOGY[tech] - if m.gen_load_zone[g] == z - ) - for tech in non_fuel_techs + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( + sum( + util.get(m.DispatchGen, (g, t), 0.0) + for g in m.GENS_BY_TECHNOLOGY[tech] + if m.gen_load_zone[g] == z ) - +tuple( - sum( - util.get(m.DispatchUpperLimit, (p, t), 0.0) - util.get(m.DispatchGen, (p, t), 0.0) - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - if m.gen_load_zone[p] == z - ) - for s in m.NON_FUEL_ENERGY_SOURCES + for tech in non_fuel_techs + ) + + tuple( + sum( + util.get(m.DispatchUpperLimit, (p, t), 0.0) + - util.get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[p] == z ) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) - +( # save spinning reserve requirements and provisions; note: this assumes one zone per balancing area - (spinning_reserve_provisions[m.zone_balancing_area[z], t], spinning_reserve_requirements[m.zone_balancing_area[z], t]) - if hasattr(m, 'Spinning_Reserve_Up_Requirements') - else (0.0, 0.0) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + + ( # save spinning reserve requirements and provisions; note: this assumes one zone per balancing area + ( + spinning_reserve_provisions[m.zone_balancing_area[z], t], + spinning_reserve_requirements[m.zone_balancing_area[z], t], ) - +(util.get(m.dual, m.Zone_Energy_Balance[z, t], 0.0)/m.bring_timepoint_costs_to_base_year[t], - # note: this uses 0.0 if no dual available, i.e., with glpk solver - 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical') + if hasattr(m, "Spinning_Reserve_Up_Requirements") + else (0.0, 0.0) + ) + + ( + ( + ( + m.dual[m.Zone_Energy_Balance[z, t]] + / m.bring_timepoint_costs_to_base_year[t] + ) + if m.Zone_Energy_Balance[z, t] in m.dual + else "" # no dual available, e.g., with glpk solver + ), + "peak" if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else "typical", + ), ) - if hasattr(m, 'Spinning_Reserve_Up_Requirements') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + if hasattr(m, "Spinning_Reserve_Up_Requirements") and hasattr( + m, "GEN_SPINNING_RESERVE_TYPES" + ): # advanced module # write the reserve values util.write_table( - m, m.BALANCING_AREAS, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "up_reserve_sources{t}.csv".format(t=tag)), - headings= - ("balancing_area", "period", "timepoint_label") - +tuple(m.FUELS) - +tuple(m.NON_FUEL_ENERGY_SOURCES) - +tuple(m.Spinning_Reserve_Up_Provisions) - +tuple(m.Spinning_Reserve_Up_Requirements) - +tuple("marginal_cost_"+rt for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) - +("peak_day",), - values=lambda m, ba, t: - (ba, m.tp_period[t], m.tp_timestamp[t]) - +tuple( - ( + m, + m.BALANCING_AREAS, + m.TIMEPOINTS, + output_file=os.path.join( + outputs_dir, "up_reserve_sources{t}.csv".format(t=tag) + ), + headings=("balancing_area", "period", "timepoint_label") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + tuple(m.Spinning_Reserve_Up_Provisions) + + tuple(m.Spinning_Reserve_Up_Requirements) + + tuple( + "marginal_cost_" + rt + for rt in sorted(m.SPINNING_RESERVE_TYPES_FROM_GENS) + ) + + ("peak_day",), + values=lambda m, ba, t: (ba, m.tp_period[t], m.tp_timestamp[t]) + + tuple( + ( + sum( + # total reserve production sum( - # total reserve production - sum( - m.CommitGenSpinningReservesUp[rt, p, t] - for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] - ) - # prorated by energy source used - * DispatchGenByFuel(m, p, t, f) / m.DispatchGen[p, t] - for p in m.GENS_BY_FUEL[f] - if (p, t) in m.GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba + m.CommitGenSpinningReservesUp[rt, p, t] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] ) + # prorated by energy source used + * DispatchGenByFuel(m, p, t, f) / m.DispatchGen[p, t] + for p in m.GENS_BY_FUEL[f] + if (p, t) in m.GEN_TPS + and m.zone_balancing_area[m.gen_load_zone[p]] == ba ) - for f in m.FUELS ) - +tuple( - sum( - m.CommitGenSpinningReservesUp[rt, p, t] - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - if (p, t) in m.SPINNING_RESERVE_CAPABLE_GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba - for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] - ) - for s in m.NON_FUEL_ENERGY_SOURCES + for f in m.FUELS + ) + + tuple( + sum( + m.CommitGenSpinningReservesUp[rt, p, t] + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if (p, t) in m.SPINNING_RESERVE_CAPABLE_GEN_TPS + and m.zone_balancing_area[m.gen_load_zone[p]] == ba + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] ) - +tuple( - sum(util.get(getattr(m, component), (rt, ba, t), 0.0) for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) - for component in m.Spinning_Reserve_Up_Provisions + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( + sum( + util.get(getattr(m, component), (rt, ba, t), 0.0) + for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS + ) + for component in m.Spinning_Reserve_Up_Provisions + ) + + tuple( + sum( + util.get(getattr(m, component), (rt, ba, t), 0.0) + for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS ) - +tuple( - sum(util.get(getattr(m, component), (rt, ba, t), 0.0) for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) - for component in m.Spinning_Reserve_Up_Requirements + for component in m.Spinning_Reserve_Up_Requirements + ) + + tuple( + ( + "" + if val is None # no dual available, i.e., with glpk solver + else val / m.bring_timepoint_costs_to_base_year[t] ) - +tuple( + for val in [ util.get( m.dual, - util.get(m.Satisfy_Spinning_Reserve_Up_Requirement, (rt, ba, t), None), - 0.0 # note: this uses 0.0 if no dual available, i.e., with glpk solver - ) / m.bring_timepoint_costs_to_base_year[t] - for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS - ) - +(('peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical'),) + util.get( + m.Satisfy_Spinning_Reserve_Up_Requirement, (rt, ba, t), None + ), + None, + ) + for rt in sorted(m.SPINNING_RESERVE_TYPES_FROM_GENS) + ] + ) + + ( + ( + "peak" + if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale + else "typical" + ), + ), ) sorted_projects = tuple(sorted(g for g in m.GENERATION_PROJECTS)) util.write_table( - m, m.TIMEPOINTS, + m, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "gen_dispatch{t}.csv".format(t=tag)), - headings=("period", "timepoint_label")+sorted_projects, - values=lambda m, t: - (m.tp_period[t], m.tp_timestamp[t]) - + tuple(util.get(m.DispatchGen, (p, t), 0.0) for p in sorted_projects) + headings=("period", "timepoint_label") + sorted_projects, + values=lambda m, t: (m.tp_period[t], m.tp_timestamp[t]) + + tuple(util.get(m.DispatchGen, (p, t), 0.0) for p in sorted_projects), ) # installed capacity information def gen_energy_source(g): return ( - '/'.join(sorted(m.FUELS_FOR_GEN[g])) + "/".join(sorted(m.FUELS_FOR_GEN[g])) if m.gen_uses_fuel[g] else m.gen_energy_source[g] ) - built_gens = tuple(sorted(set( - g for pe in m.PERIODS for g in m.GENERATION_PROJECTS if value(m.GenCapacity[g, pe]) > 0.001 - ))) + + built_gens = tuple( + sorted( + set( + g + for pe in m.PERIODS + for g in m.GENERATION_PROJECTS + # starting 2021-04-29, we report all techs, whether built or not, + # so that the columns are the same across scenarios + # if value(m.GenCapacity[g, pe]) > 0.001 + ) + ) + ) active_periods_for_gen = defaultdict(set) - used_cap = getattr(m, 'CommitGen', m.DispatchGen) # use CommitGen if available, otherwise DispatchGen + used_cap = getattr( + m, "CommitGen", m.DispatchGen + ) # use CommitGen if available, otherwise DispatchGen for (g, tp) in m.GEN_TPS: if value(used_cap[g, tp]) > 0.001: active_periods_for_gen[g].add(m.tp_period[tp]) @@ -344,67 +436,171 @@ def gen_energy_source(g): if start <= p <= end and value(m.GenCapacity[g, p]) > 0: operate_gen_in_period.add((g, p)) + storage_gens = getattr(m, "STORAGE_GENS", set()) built_tech = tuple(sorted(set(m.gen_tech[g] for g in built_gens))) + build_tech_and_storage = tuple( + sorted( + set(built_tech) + | set(m.gen_tech[g] + "_MWh" for g in built_gens if g in storage_gens) + ) + + ["hydro", "fuel cells"] + ) + built_energy_source = tuple(sorted(set(gen_energy_source(g) for g in built_gens))) - battery_capacity_mw = lambda m, z, pe: ( - (m.Battery_Capacity[z, pe] / m.battery_min_discharge_time) - if hasattr(m, "Battery_Capacity") else 0.0 + tech_cap = defaultdict(float) + for (g, p), cap in m.GenCapacity.items(): + tech_cap[m.gen_load_zone[g], m.gen_tech[g], p] += cap + if hasattr(m, "StorageEnergyCapacity"): + for (g, p), cap in m.StorageEnergyCapacity.items(): + tech_cap[m.gen_load_zone[g], m.gen_tech[g] + "_MWh", p] += cap + if hasattr(m, "Pumped_Hydro_Capacity_MW"): + for (z, p), cap in m.Pumped_Hydro_Capacity_MW.items(): + tech_cap[z, "hydro", p] += cap + if hasattr(m, "FuelCellCapacityMW"): + for (z, p), cap in m.FuelCellCapacityMW.items(): + tech_cap[z, "fuel cells", p] += cap + + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "capacity_by_technology{t}.csv".format(t=tag) + ), + headings=("load_zone", "period") + build_tech_and_storage, + values=lambda m, z, pe: ( + (z, pe) + tuple(tech_cap[z, t, pe] for t in build_tech_and_storage) + ), + ) + + util.write_table( + m, + m.LOAD_ZONES, + build_tech_and_storage, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "capacity_by_technology_vertical{t}.csv".format(t=tag) + ), + headings=("load_zone", "technology", "period", "capacity"), + values=lambda m, z, t, pe: (z, t, pe, tech_cap[z, t, pe]), ) - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "capacity_by_technology{t}.csv".format(t=tag)), - headings=("load_zone", "period") + built_tech + ("hydro", "batteries", "fuel cells"), - values=lambda m, z, pe: (z, pe,) + tuple( + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "capacity_used_by_technology{t}.csv".format(t=tag) + ), + headings=("load_zone", "period") + built_tech + ("hydro", "fuel cells"), + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( sum( (m.GenCapacity[g, pe] if ((g, pe) in operate_gen_in_period) else 0.0) - for g in built_gens - if m.gen_tech[g] == t and m.gen_load_zone[g] == z + for g in built_gens + if m.gen_tech[g] == t and m.gen_load_zone[g] == z ) for t in built_tech - ) + ( - m.Pumped_Hydro_Capacity_MW[z, pe] if hasattr(m, "Pumped_Hydro_Capacity_MW") else 0, - battery_capacity_mw(m, z, pe), - m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0 ) + + ( + m.Pumped_Hydro_Capacity_MW[z, pe] + if hasattr(m, "Pumped_Hydro_Capacity_MW") + else 0, + m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0, + ), ) - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "capacity_by_energy_source{t}.csv".format(t=tag)), - headings=("load_zone", "period") + built_energy_source + ("hydro", "batteries", "fuel cells"), - values=lambda m, z, pe: (z, pe,) + tuple( + + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "capacity_by_energy_source{t}.csv".format(t=tag) + ), + headings=("load_zone", "period") + + built_energy_source + + ("hydro", "fuel cells"), + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( + sum( + m.GenCapacity[g, pe] + for g in built_gens + if gen_energy_source(g) == s and m.gen_load_zone[g] == z + ) + for s in built_energy_source + ) + + ( + m.Pumped_Hydro_Capacity_MW[z, pe] + if hasattr(m, "Pumped_Hydro_Capacity_MW") + else 0, + m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0, + ), + ) + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "capacity_used_by_energy_source{t}.csv".format(t=tag) + ), + headings=("load_zone", "period") + + built_energy_source + + ("hydro", "fuel cells"), + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( sum( (m.GenCapacity[g, pe] if ((g, pe) in operate_gen_in_period) else 0.0) - for g in built_gens - if gen_energy_source(g) == s and m.gen_load_zone[g] == z + for g in built_gens + if gen_energy_source(g) == s and m.gen_load_zone[g] == z ) for s in built_energy_source - ) + ( - m.Pumped_Hydro_Capacity_MW[z, pe] if hasattr(m, "Pumped_Hydro_Capacity_MW") else 0, - battery_capacity_mw(m, z, pe), - m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0 ) + + ( + m.Pumped_Hydro_Capacity_MW[z, pe] + if hasattr(m, "Pumped_Hydro_Capacity_MW") + else 0, + m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0, + ), ) - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "production_by_technology{t}.csv".format(t=tag)), + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "production_by_technology{t}.csv".format(t=tag) + ), headings=("load_zone", "period") + built_tech + ad_hoc_sources, - values=lambda m, z, pe: - (z, pe,) - + tuple( - sum( - m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh - for g in built_gens if m.gen_tech[g] == t and m.gen_load_zone[g] == z - for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] - ) - for t in built_tech + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( + sum( + m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in built_gens + if m.gen_tech[g] == t and m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] ) - + tuple( # ad hoc techs: hydrogen, pumped storage, etc. - sum( - comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 - for tp in m.TPS_IN_PERIOD[pe] - ) - for comp in [getattr(m, cname) for cname in ad_hoc_sources] + for t in built_tech + ) + + tuple( # ad hoc techs: hydrogen, pumped storage, etc. + sum( + comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 + for tp in m.TPS_IN_PERIOD[pe] ) + for comp in [getattr(m, cname) for cname in ad_hoc_sources] + ), ) # option 1: make separate tables of production_by_technology and production_by_energy_source, @@ -415,44 +611,50 @@ def gen_energy_source(g): # use a database format rather than a table format, which will then require post-processing # by pandas or an Excel pivot table. # For now, we go with option 1. - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "production_by_energy_source{t}.csv".format(t=tag)), - headings= - ("load_zone", "period") - + tuple(m.FUELS) - + tuple(m.NON_FUEL_ENERGY_SOURCES) - + ad_hoc_sources, - values=lambda m, z, pe: - (z, pe,) - + tuple( - sum( - DispatchGenByFuel(m, g, tp, f) * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh - for g in m.GENS_BY_FUEL[f] - if m.gen_load_zone[g] == z - for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] - ) - for f in m.FUELS + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "production_by_energy_source{t}.csv".format(t=tag) + ), + headings=("load_zone", "period") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + ad_hoc_sources, + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( + sum( + DispatchGenByFuel(m, g, tp, f) + * m.tp_weight_in_year[tp] + * 0.001 # MWh -> GWh + for g in m.GENS_BY_FUEL[f] + if m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] ) - + tuple( - sum( - m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh - for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - if m.gen_load_zone[g] == z - for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] - ) - for s in m.NON_FUEL_ENERGY_SOURCES + for f in m.FUELS + ) + + tuple( + sum( + m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] ) - + tuple( # ad hoc techs: hydrogen, pumped storage, etc. - sum( - comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 - for tp in m.TPS_IN_PERIOD[pe] - ) - for comp in [getattr(m, cname) for cname in ad_hoc_sources] + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( # ad hoc techs: hydrogen, pumped storage, etc. + sum( + comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 + for tp in m.TPS_IN_PERIOD[pe] ) + for comp in [getattr(m, cname) for cname in ad_hoc_sources] + ), ) - - # def cost_breakdown_details(m, z, pe): # values = [z, pe] # # capacity built, conventional plants @@ -583,15 +785,15 @@ def gen_energy_source(g): # values=lambda m, pe: (pe,) + tuple(m.GenCapacity[g, pe] for g in built_gens) # ) - - if hasattr(m, 'RFMSupplyTierActivate'): - util.write_table(m, m.RFM_SUPPLY_TIERS, + if hasattr(m, "RFMSupplyTierActivate"): + util.write_table( + m, + m.RFM_SUPPLY_TIERS, output_file=os.path.join(outputs_dir, "rfm_activate{t}.csv".format(t=tag)), headings=("market", "period", "tier", "activate"), - values=lambda m, r, p, st: (r, p, st, m.RFMSupplyTierActivate[r, p, st]) + values=lambda m, r, p, st: (r, p, st, m.RFMSupplyTierActivate[r, p, st]), ) - # import pprint # b=[(g, pe, value(m.BuildGen[g, pe]), m.gen_tech[g], m.gen_overnight_cost[g, pe]) for (g, pe) in m.BuildGen if value(m.BuildGen[g, pe]) > 0] # bt=set(x[3] for x in b) # technologies diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index c0da6d92b..8272f4b39 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -1,4 +1,19 @@ from __future__ import print_function + +import time, sys, collections, os, itertools +from textwrap import dedent +from switch_model import __version__ as switch_version +from switch_model.utilities import iteritems + +# use database settings from operating environment +# (this code isn't really needed at all, but gives us a chance to override later) +# note: password for this user should be specified in ~/.pgpass +pghost = os.getenv("PGHOST", "") +pgdatabase = os.getenv("PGDATABASE", "") +pguser = os.getenv("PGUSER", "") + +# TODO: switch over to Google BigQuery database + # TODO: make this get data from the redr server via an HTTP api instead of psycopg2, as follows: # create a .rpy script on the redr server that can accept form data (the args dict) via POST @@ -25,11 +40,6 @@ # https://bytes.com/topic/python/answers/692751-module-zipfile-writestr-line-endings-issue # https://stackoverflow.com/questions/2613800/how-to-convert-dos-windows-newline-crlf-to-unix-newline-n-in-a-bash-script -import time, sys, collections, os -from textwrap import dedent -from switch_model import __version__ as switch_version -from switch_model.utilities import iteritems - # NOTE: instead of using the python csv writer, this directly writes tables to # file in a customized, pyomo-friendly .csv format. This uses commas between columns # and the standard line break for the system it is run on. This does the following @@ -46,39 +56,202 @@ # NOTE: this does not use the python csv writer because it doesn't support the quoting # or null behaviors described above. - # NOTE: ANSI SQL specifies single quotes for literal strings, and postgres conforms # to this, so all the queries below should use single quotes around strings. # NOTE: write_table() will automatically convert null values to '.', # so pyomo will recognize them as missing data -# NOTE: the code below could be made more generic, e.g., a list of -# table names and queries, which are then processed at the end. -# But that would be harder to debug, and wouldn't allow for ad hoc -# calculations or writing .dat files (which are used for a few parameters) -def write_tables(**args): +def write_tables(*pos_args, **kw_args): + if pos_args or "args" in kw_args: + # pass arguments through + return write_tables_implementation(*pos_args, **kw_args) + else: + # called with **args (obsolete) + print( + "WARNING: write_tables should now be called with a dict of " + "arguments, not key-value pairs." + ) + # gather the arguments back into a dictionary and call correctly + return write_tables_implementation(kw_args) + + +def write_tables_implementation(args, alt_args={}, scenarios=[]): + """ + Save base tables, alternative tables and scenarios.txt + + Settings in `args` (dict) are used to define the base tables. + + Settings in `alt_args` (list of dicts) are used to define alternative + tables. Each dict in `alt_args` is used to update `args`. Then, any queries + that are altered by this adjustment are re-run, and the resulting csv files + are saved with modified names. The names are modified by adding the `tag` + specified in the alt_args dict before the .csv extension, e.g., + fuels_low_cost.csv instead of fuels.csv. + + Finally, scenarios.txt is written, based on the `scenarios` list. Each item + in `scenarios` should be a tuple of command line arguments (as a string) and + a list or tuple of tags to apply to this scenario. The tags must match tags + specified in `alt_args`. For each scenario in `scenarios`, a line is written + in scenarios.txt, consisting of the command line string followed by + '--input-alias[es]' and the file substitutions corresponding to the + specified data tags. A warning is issued if two tags cause conflicting file + substitutions. + """ + # Note: this works by comparing the final queries used to generate each + # table, so it will reuse tables from the base model if they are identical + # to an alternative scenario. This may be confusing, but it is simple and + # effective and avoids problems that would crop up if we tried to identify + # which tables are affected by each argument by analyzing the placeholders + # in the query. That method would fail to catch situations where the + # get_query() code uses arguments directly, either to inject values into the + # query or to control which tables are defined or add or remove clauses from + # the query. + + # The code below could be streamlined a little by retrieving the query list + # from write_base_tables and then passing it to write_alternative_tables, + # but it works pretty well as is. + + # write version marker file + with open(make_file_path("switch_inputs_version.txt", args), "w") as f: + f.write(switch_version) + + # write base tables + write_base_tables(args) + + # write alternative tables + data_aliases = {} + for a in alt_args: + data_aliases[a["tag"]] = write_alternative_tables(args, a) + + # write scenarios.txt + scenario_args = [] + for cmds, data_tags in scenarios: + active_aliases = [] + for t in data_tags: + for orig, alias in data_aliases.get(t, []): + if any(a[0] == orig for a in active_aliases): + print( + "WARNING: multiple aliases specified for {} in scenario {}.".format( + orig, scenario_args.split(" ")[1] + ) + ) + active_aliases.append((orig, alias)) + + if active_aliases: + cmds += ( + " --input-alias " if len(active_aliases) == 1 else " --input-aliases " + ) + cmds += " ".join("=".join(pair) for pair in active_aliases) + scenario_args.append(cmds) + + if scenarios: + with open("scenarios.txt", "w") as f: + f.writelines(s + "\n" for s in scenario_args) + + +def write_base_tables(args): + queries = get_queries(args) + for table, query in queries: + write_table(make_file_path(table, args), query) + + +def write_alternative_tables(base_args, alt_args): + # add alt_args to base_args, then check for queries that are + # added, modified or dropped + base_queries = dict(get_queries(base_args)) + full_alt_args = dict(itertools.chain(base_args.items(), alt_args.items())) + alt_queries = dict(get_queries(full_alt_args)) + # get location for files created by alt_args, relative to files created by base_args + alt_relative_path = os.path.relpath( + make_file_path(".", full_alt_args), start=make_file_path(".", base_args) + ) + # find differences and run alt queries + aliases = [] + for table, query in alt_queries.items(): + if table not in base_queries or query != base_queries[table]: + # new or altered table + # specify name and location relative to base_args inputs directory + if alt_relative_path == ".": + # file going in same directory as base file; give it a new name + table_base, table_ext = os.path.splitext(table) + new_table = table_base + "." + full_alt_args["tag"] + table_ext + else: + new_table = os.path.join(alt_relative_path, table) + write_table(make_file_path(new_table, base_args), query) + # note: if regular files are in inputs and alternative files are in + # inputs_alt, then this will set file.csv=../inputs_alt/file.csv, + # and then --input-alias will just do a simple translation of + # file.csv, resulting in inputs/../inputs_alt/file.csv + aliases.append((table, new_table)) + # exclude tables that are omitted in the alternative case + aliases.extend((t, "none") for t, q in base_queries.items() if t not in alt_queries) + return aliases + + +def get_queries(args): + """ + Return a list of queries based on `args`. + + Each entry in the list is a tuple of (table name, sql query code). + """ + + # print( + # "WARNING: need a more general way to identify non-fuel energy sources " + # "in scenario_data. See references to MSW, WND, etc." + # ) + + queries = [] # TODO: any arguments that are defined with default values below (args.get()) could # have those default values assigned here. Then they can be used directly in queries # instead of using them to create snippets that are used in the queries. This would # also document the available arguments a little better. + args = args.copy() + # catch obsolete arguments (otherwise they would be silently ignored) - if 'ev_scen_id' in args: - raise ValueError("ev_scen_id argument is no longer supported; use ev_scenario instead.") + updated_args = [ + ("fuel_scen_id", "fuel_scenario"), + ("cap_cost_scen_id", "tech_scenario"), + ("tech_scen_id", "tech_scenario"), + ("load_scen_id", "load_scenario"), + ("ev_scen_id", "ev_scenario"), + ] + for old, new in updated_args: + if old in args: + if new in args: + raise ValueError( + "{} and {} arguments are redundant and ambiguous.".format(old, new) + ) + else: + print( + 'DEPRECATION WARNING: The "{}" argument has been ' + 'renamed to "{}". Please update your code.'.format(old, new) + ) + args[new] = args.pop(old) - if 'cap_cost_scen_id' in args and 'tech_scen_id' not in args: - print( - 'DEPRECATION WARNING: The "cap_cost_scen_id" argument has been ' - 'renamed to "tech_scen_id". Please update your code.' + if "ev_charge_profile" not in args: + print("No ev_charge_profile specified; using das_2015") + args["ev_charge_profile"] = "das_2015" + + if "enable_must_run" in args and "enable_must_run_before" in args: + raise ValueError( + "You may specify enable_must_run or enable_must_run_before, but not both." ) - args['tech_scen_id'] = args['cap_cost_scen_id'] + elif "enable_must_run" in args: + args["enable_must_run_before"] = 999999 if args["enable_must_run"] else 0 + else: + args["enable_must_run_before"] = args.get("enable_must_run_before", 0) - # write version marker file - with open(make_file_path('switch_inputs_version.txt', args), 'w') as f: - f.write(switch_version) + # fill in default arguments (use a dummy entry '-' if none supplied) + args["exclude_technologies"] = args.get("exclude_technologies", ("-",)) + args["exclude_land_classes"] = args.get("exclude_land_classes", ("-",)) + args["exclude_slope_classes"] = args.get("exclude_slope_classes", ("-",)) + for a in ["exclude_technologies", "exclude_land_classes", "exclude_slope_classes"]: + if not isinstance(args[a], tuple): + raise ValueError(f"Argument {a} must be a tuple or omitted.") ######################### # timescales @@ -86,21 +259,24 @@ def write_tables(**args): # reusable clause to calculate the length of each period # If this is within 1% of an integer number of years, it rounds to the integer, # to allow for weights that add up to 365 or 365.25 days per year - with_period_length = """ - WITH period_length as ( + period_length = """ + period_length_raw AS ( SELECT period, - -- note: for some reason modulo doesn't work on real values in postgresql - CASE WHEN mod((sum(ts_scale_to_period)/365.25)::numeric, 1) BETWEEN -0.01 and 0.01 - THEN - -- integer number of years - round(sum(ts_scale_to_period)/365.25) - ELSE - -- make a decent guess about number of years - sum(ts_scale_to_period)/365.25 - END as period_length - FROM study_date WHERE time_sample = %(time_sample)s + -- make a decent guess about number of years + SUM(ts_scale_to_period)/365.25 AS period_length + FROM timeseries WHERE time_sample = %(time_sample)s GROUP BY 1 + ), + period_length AS ( + SELECT + period, + CASE WHEN + period_length > 0.9 AND + period_length - ROUND(period_length) BETWEEN -0.01 and 0.01 + THEN ROUND(period_length) ELSE period_length + END as period_length + FROM period_length_raw ) """ @@ -112,77 +288,81 @@ def write_tables(**args): # note: despite the comments above, this rounded period_end to # the nearest whole number until 2018-02-17. This was removed to # support fractional years for monthly batches in production-cost models. - write_table('periods.csv', - with_period_length + """ + + add_query( + queries, + "periods.csv", + "WITH " + + period_length + + """ SELECT p.period AS "INVESTMENT_PERIOD", p.period as period_start, p.period + period_length as period_end - FROM study_periods p JOIN period_length l USING (period) + FROM periods p JOIN period_length l USING (period) WHERE time_sample = %(time_sample)s ORDER by 1; - """, args) + """, + args, + ) - write_table('timeseries.csv', """ - SELECT study_date as "TIMESERIES", period as ts_period, + add_query( + queries, + "timeseries.csv", + """ + SELECT timeseries as "TIMESERIES", period as ts_period, ts_duration_of_tp, ts_num_tps, ts_scale_to_period - FROM study_date + FROM timeseries WHERE time_sample = %(time_sample)s ORDER BY 1; - """, args) + """, + args, + ) - write_table('timepoints.csv', """ - SELECT h.study_hour as timepoint_id, + # note: query below needs dash instead of space in date format if creating + # .tab files, but this works well for .csv files (8/2019 and later) + add_query( + queries, + "timepoints.csv", + """ + SELECT h.timepoint as timepoint_id, to_char(date_time + (period - extract(year from date_time)) * interval '1 year', - 'YYYY-MM-DD-HH24:MI') as timestamp, - h.study_date as timeseries - FROM study_hour h JOIN study_date d USING (study_date, time_sample) + 'YYYY-MM-DD HH24:MI') as timestamp, + h.timeseries + FROM timepoints h JOIN timeseries d USING (timeseries, time_sample) WHERE h.time_sample = %(time_sample)s - ORDER BY period, extract(doy from date), study_hour; - """, args) - - # double-check that arguments are valid - cur = db_cursor() - for table in ['generator_costs_by_year', 'generator_info']: - cur.execute( - 'select * from {} where tech_scen_id = %(tech_scen_id)s limit 1'.format(table), - args - ) - if len(list(cur)) == 0: - print("================================================================") - print("WARNING: no records found in {} for tech_scen_id='{}'".format(table, args['tech_scen_id'])) - print("================================================================") - time.sleep(2) - del cur + ORDER BY period, extract(doy from date), timepoint; + """, + args, + ) ######################### - # create temporary tables that can be referenced by other queries - # to identify available projects and technologies - db_cursor().execute(""" - DROP TABLE IF EXISTS study_length; - CREATE TEMPORARY TABLE study_length AS - {} + # clauses that identify available projects and technologies + study_info = ( + period_length + + "," + + """ + study_length AS ( SELECT min(period)::real as study_start, max(period+period_length)::real AS study_end - FROM period_length; - - DROP TABLE IF EXISTS study_projects; - CREATE TEMPORARY TABLE study_projects AS + FROM period_length + ), + study_projects AS ( SELECT DISTINCT CONCAT_WS('_', load_zone, p.technology, nullif(site, 'na'), nullif(orientation, 'na')) AS "GENERATION_PROJECT", p.*, - g.tech_scen_id - FROM project p + g.tech_scenario + FROM projects p JOIN generator_info g USING (technology) CROSS JOIN study_length -- existing projects still in use during the study - LEFT JOIN proj_existing_builds e ON ( + LEFT JOIN gen_build_predetermined e ON ( e.project_id = p.project_id - AND e.build_year + g.max_age_years > study_start + AND e.build_year + g.gen_max_age > study_start AND e.build_year < study_end ) -- projects that could be built during the study - LEFT JOIN generator_costs_by_year c ON ( - c.tech_scen_id = g.tech_scen_id + LEFT JOIN gen_build_costs c ON ( + c.tech_scenario = g.tech_scenario AND c.technology = g.technology AND (g.min_vintage_year IS NULL OR c.year >= g.min_vintage_year) AND c.year >= study_start @@ -190,25 +370,26 @@ def write_tables(**args): ) WHERE (e.project_id IS NOT NULL OR c.technology IS NOT NULL) AND p.load_zone in %(load_zones)s - AND g.tech_scen_id IN ('all', %(tech_scen_id)s) - AND g.technology NOT IN %(exclude_technologies)s; - - DROP TABLE IF EXISTS study_generator_info; - CREATE TEMPORARY TABLE study_generator_info AS + AND p.technology NOT IN %(exclude_technologies)s + -- exclude some land_class and slope_class values, but allow nulls through + AND (p.land_class IS NULL OR p.land_class NOT IN %(exclude_land_classes)s) + AND (p.slope_class IS NULL OR p.slope_class NOT IN %(exclude_slope_classes)s) + AND g.tech_scenario IN ('all', %(tech_scenario)s) + ), + study_generator_info AS ( SELECT DISTINCT g.* - FROM generator_info g JOIN study_projects p USING (tech_scen_id, technology); - """.format(with_period_length), args) - - # import pdb; pdb.set_trace() + FROM generator_info g JOIN study_projects p USING (tech_scenario, technology) + )""" + ) ######################### # financials - - # this just uses a dat file, not a table (and the values are not in a database for now) - write_simple_csv( - 'financials.csv', - ['base_financial_year', 'interest_rate', 'discount_rate'], - args + # (values are not in a database for now) + add_one_row_literal( + queries, + "financials.csv", + ["base_financial_year", "interest_rate", "discount_rate"], + args, ) ######################### @@ -217,59 +398,89 @@ def write_tables(**args): # note: we don't provide the following fields in this version: # zone_cost_multipliers, zone_ccs_distance_km, zone_dbid, # existing_local_td, local_td_annual_cost_per_mw - write_table('load_zones.csv', """ + add_query( + queries, + "load_zones.csv", + """ SELECT load_zone as "LOAD_ZONE" - FROM load_zone + FROM load_zones WHERE load_zone in %(load_zones)s - """, args) + ORDER BY 1; + """, + args, + ) # NOTE: we don't provide zone_peak_loads.csv (sometimes used by local_td.py) in this version. # get system loads, scaled from the historical years to the model years # note: 'offset' is a keyword in postgresql, so we use double-quotes to specify the column name - write_table('loads.csv', """ + add_query( + queries, + "loads.csv", + """ SELECT l.load_zone AS "LOAD_ZONE", - study_hour AS "TIMEPOINT", + timepoint AS "TIMEPOINT", GREATEST(0, system_load * scale + "offset") AS zone_demand_mw - FROM study_date d - JOIN study_hour h USING (time_sample, study_date) - JOIN system_load l USING (date_time) - JOIN system_load_scale s ON ( + FROM timeseries d + JOIN timepoints h USING (time_sample, timeseries) + JOIN loads l USING (date_time) + JOIN load_scale s ON ( s.load_zone = l.load_zone AND s.year_hist = extract(year from l.date_time) AND s.year_fore = d.period) WHERE l.load_zone in %(load_zones)s AND d.time_sample = %(time_sample)s - AND load_scen_id = %(load_scen_id)s; - """, args) - + AND load_scenario = %(load_scenario)s + ORDER BY 1, 2; + """, + args, + ) ######################### # fuels - write_table('non_fuel_energy_sources.csv', """ - SELECT DISTINCT fuel AS "NON_FUEL_ENERGY_SOURCES" + add_query( + queries, + "non_fuel_energy_sources.csv", + "WITH " + + study_info + + """ + SELECT DISTINCT gen_energy_source AS "NON_FUEL_ENERGY_SOURCES" FROM study_generator_info - WHERE fuel NOT IN (SELECT fuel_type FROM fuel_costs); - """, args) + WHERE gen_energy_source NOT IN (SELECT fuel FROM fuel_costs) + ORDER by 1; + """, + args, + ) # gather info on fuels - write_table('fuels.csv', """ - SELECT DISTINCT replace(c.fuel_type, ' ', '_') AS fuel, co2_intensity, 0.0 AS upstream_co2_intensity, rps_eligible - FROM fuel_costs c JOIN energy_source_properties p on (p.energy_source = c.fuel_type) - WHERE load_zone in %(load_zones)s AND fuel_scen_id=%(fuel_scen_id)s + add_query( + queries, + "fuels.csv", + """ + SELECT DISTINCT + replace(c.fuel, ' ', '_') AS fuel, + co2_intensity, 0.0 AS upstream_co2_intensity, + rps_eligible + FROM fuel_costs c + JOIN energy_sources p on (p.energy_source = c.fuel) + WHERE load_zone in %(load_zones)s + AND fuel_scenario=%(fuel_scenario)s ORDER BY 1; - """, args) + """, + args, + ) ######################### # rps targets - write_csv_file( - 'rps_targets.csv', - headers=('year', 'rps_target'), - data=[(y, args['rps_targets'][y]) for y in sorted(args['rps_targets'].keys())], - arguments=args + add_literal_table( + queries, + "rps_targets.csv", + headers=("year", "rps_target"), + data=[(y, args["rps_targets"][y]) for y in sorted(args["rps_targets"].keys())], + arguments=args, ) ######################### @@ -279,12 +490,23 @@ def write_tables(**args): # from 2013 (forecast base year) to model base year. (ugh) # TODO: add a flag to fuel_costs indicating whether forecasts are real or nominal, # and base year, and possibly inflation rate. - if args['fuel_scen_id'] in ('1', '2', '3'): - raise ValueError("fuel_scen_ids '1', '2' and '3' (specified in nominal dollars) are no longer supported.") + if args["fuel_scenario"] in ("1", "2", "3"): + raise ValueError( + "fuel_scenarios '1', '2' and '3' (specified in nominal dollars) are " + "no longer supported." + ) + + per_timepoint_fuel_costs = args.get("use_per_timepoint_fuel_costs", False) + simple_fuel_costs = args.get("use_simple_fuel_costs", False) - if args.get("use_simple_fuel_costs", False): + if per_timepoint_fuel_costs and not simple_fuel_costs: + raise NotImplementedError( + "The use_per_timepoint_fuel_costs flag currently can only be used " + "with the use_simple_fuel_costs option." + ) + + if simple_fuel_costs: # simple fuel markets with no bulk LNG expansion option (use fuel_cost module) - # TODO: get monthly fuel costs from Karl Jandoc spreadsheet if "use_bulk_lng_for_simple_fuel_costs" in args: raise ValueError( "use_bulk_lng_for_simple_fuel_costs argument is no longer supported; " @@ -298,62 +520,126 @@ def write_tables(**args): else: lng_selector = "false" - write_table('fuel_cost.csv', - with_period_length + """ - SELECT load_zone, replace(fuel_type, ' ', '_') as fuel, p.period, - avg( - price_mmbtu - * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) - + COALESCE(fixed_cost, 0.00) - ) as fuel_cost - FROM fuel_costs c, study_periods p JOIN period_length l USING (period) - WHERE load_zone in %(load_zones)s - AND fuel_scen_id = %(fuel_scen_id)s - AND p.time_sample = %(time_sample)s - AND (fuel_type != 'LNG' OR {lng_selector}) - AND c.year >= p.period AND c.year < p.period + l.period_length - GROUP BY 1, 2, 3 - ORDER BY 1, 2, 3; - """.format(lng_selector=lng_selector), args) - else: + if per_timepoint_fuel_costs: + # month column in fuel_cost table can optionally be filled in; + # when getting per-timepoint costs, we link this month to + # timeseries.month_of_year and average across all the years in each + # period. + add_query( + queries, + "fuel_cost_per_timepoint.csv", + "WITH " + + period_length + + """ + SELECT load_zone, replace(fuel, ' ', '_') as fuel, h.timepoint, + avg(price_mmbtu + * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) + + COALESCE(fixed_cost, 0.00) + ) as timepoint_fuel_cost + FROM fuel_costs c + CROSS JOIN timepoints h + JOIN timeseries d USING (timeseries, time_sample) + JOIN periods p USING (period, time_sample) + JOIN period_length l USING (period) + WHERE load_zone in %(load_zones)s + AND fuel_scenario = %(fuel_scenario)s + AND p.time_sample = %(time_sample)s + AND c.month = d.month_of_year + AND (fuel != 'LNG' OR {lng_selector}) + AND c.year >= p.period AND c.year < p.period + l.period_length + GROUP BY 1, 2, 3 + ORDER BY 1, 2, 3; + """.format( + lng_selector=lng_selector + ), + args, + ) + else: + # Note: if monthly prices have been specified for this fuel_scenario + # in the fuel_cost table, they get averaged together with equal + # weight as part of the general averaging across all the years of + # the period (which is good). + add_query( + queries, + "fuel_cost.csv", + "WITH " + + period_length + + """ + SELECT load_zone, replace(fuel, ' ', '_') as fuel, p.period, + avg( + price_mmbtu + * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) + + COALESCE(fixed_cost, 0.00) + ) as fuel_cost + FROM fuel_costs c, periods p JOIN period_length l USING (period) + WHERE load_zone in %(load_zones)s + AND fuel_scenario = %(fuel_scenario)s + AND p.time_sample = %(time_sample)s + AND (fuel != 'LNG' OR {lng_selector}) + AND c.year >= p.period AND c.year < p.period + l.period_length + GROUP BY 1, 2, 3 + ORDER BY 1, 2, 3; + """.format( + lng_selector=lng_selector + ), + args, + ) + else: # not simple_fuel_costs # advanced fuel markets with LNG expansion options (used by forward-looking models) # (use fuel_markets module) - write_table('regional_fuel_markets.csv', """ + add_query( + queries, + "regional_fuel_markets.csv", + """ SELECT DISTINCT - concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market, - replace(fuel_type, ' ', '_') AS fuel + concat('Hawaii_', replace(fuel, ' ', '_')) AS regional_fuel_market, + replace(fuel, ' ', '_') AS fuel FROM fuel_costs - WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; - """, args) + WHERE load_zone in %(load_zones)s AND fuel_scenario = %(fuel_scenario)s + ORDER BY 1, 2; + """, + args, + ) - write_table('fuel_supply_curves.csv', - with_period_length + """ - SELECT concat('Hawaii_', replace(fuel_type, ' ', '_')) as regional_fuel_market, - replace(fuel_type, ' ', '_') as fuel, + add_query( + queries, + "fuel_supply_curves.csv", + "WITH " + + period_length + + """ + SELECT concat('Hawaii_', replace(fuel, ' ', '_')) as regional_fuel_market, + replace(fuel, ' ', '_') as fuel, tier, p.period, avg(price_mmbtu * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year)) as unit_cost, avg(max_avail_at_cost) as max_avail_at_cost, avg(fixed_cost) as fixed_cost, avg(max_age) as max_age - FROM fuel_costs c, study_periods p JOIN period_length l USING (period) + FROM fuel_costs c, periods p JOIN period_length l USING (period) WHERE load_zone in %(load_zones)s - AND fuel_scen_id = %(fuel_scen_id)s + AND fuel_scenario = %(fuel_scenario)s AND p.time_sample = %(time_sample)s AND (c.year >= p.period AND c.year < p.period + l.period_length) GROUP BY 1, 2, 3, 4 ORDER BY 1, 2, 3, 4; - """, args) + """, + args, + ) - write_table('zone_to_regional_fuel_market.csv', """ - SELECT DISTINCT load_zone, concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market + add_query( + queries, + "zone_to_regional_fuel_market.csv", + """ + SELECT DISTINCT load_zone, concat('Hawaii_', replace(fuel, ' ', '_')) AS regional_fuel_market FROM fuel_costs - WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; - """, args) + WHERE load_zone in %(load_zones)s AND fuel_scenario = %(fuel_scenario)s + ORDER BY 1, 2; + """, + args, + ) # TODO: (when multi-island) add fuel_cost_adders for each zone - ######################### # investment.gen_build and part of operation.unitcommit.commit @@ -365,21 +651,22 @@ def write_tables(**args): # Some of these are actually single-fuel, but this approach is simpler than sorting # them out within each query, and it doesn't add any complexity to the model. - if args.get('wind_capital_cost_escalator', 0.0) or args.get('pv_capital_cost_escalator', 0.0): + if args.get("wind_capital_cost_escalator", 0.0) or args.get( + "pv_capital_cost_escalator", 0.0 + ): # user supplied a non-zero escalator raise ValueError( - 'wind_capital_cost_escalator and pv_capital_cost_escalator arguments are ' - 'no longer supported by scenario_data.write_tables(); ' - 'assign time-varying costs in the generator_costs_by_year table instead.' + "wind_capital_cost_escalator and pv_capital_cost_escalator arguments are " + "no longer supported by scenario_data.write_tables(); " + "assign time-varying costs in the gen_build_costs table instead." ) - if args.get('generator_costs_base_year', 0): + if args.get("generator_costs_base_year", 0): # user supplied a generator_costs_base_year raise ValueError( - 'generator_costs_base_year is no longer supported by scenario_data.write_tables(); ' - 'assign base_year in the generator_costs_by_year table instead.' + "generator_costs_base_year is no longer supported by scenario_data.write_tables(); " + "assign base_year in the gen_build_costs table instead." ) - # TODO: make sure the heat rates are null for non-fuel projects in the upstream database, # and remove the correction code from here @@ -387,138 +674,209 @@ def write_tables(**args): # TODO: convert 'MSW' to a proper fuel, possibly with a negative cost, instead of ignoring it # Omit full load heat rates if we are providing heat rate curves instead - if args.get('use_incremental_heat_rates', False): - full_load_heat_rate = 'null' + if args.get("use_incremental_heat_rates", False): + full_load_heat_rate = "null" else: - full_load_heat_rate = '0.001*heat_rate' + full_load_heat_rate = "0.001*gen_full_load_heat_rate" - if args.get('report_forced_outage_rates', False): - forced_outage_rate = 'forced_outage_rate' + if args.get("report_forced_outage_rates", False): + forced_outage_rate = "gen_forced_outage_rate" else: - forced_outage_rate = '0' + forced_outage_rate = "0" # if needed, follow the query below with another one that specifies # COALESCE(gen_connect_cost_per_mw, 0.0) AS gen_connect_cost_per_mw - write_table('generation_projects_info.csv', """ + add_query( + queries, + "gen_info.csv", + "WITH " + + study_info + + """ SELECT "GENERATION_PROJECT", load_zone AS gen_load_zone, technology AS gen_tech, - spur_line_cost_per_mw + 1000 * substation_cost_per_kw AS gen_connect_cost_per_mw, - max_capacity AS gen_capacity_limit_mw, - unit_size as gen_unit_size, - max_age_years as gen_max_age, - scheduled_outage_rate as gen_scheduled_outage_rate, + (spur_line_cost_per_mw + 1000.0 * substation_cost_per_kw) + * power(1.0+%(inflation_rate)s, %(base_financial_year)s-base_year) + AS gen_connect_cost_per_mw, + gen_capacity_limit_mw, + gen_unit_size, + gen_min_build_capacity, + gen_max_age, + gen_scheduled_outage_rate, {fo} as gen_forced_outage_rate, - intermittent as gen_is_variable, - baseload as gen_is_baseload, + gen_is_variable, + gen_is_baseload, -- 0 as gen_is_flexible_baseload, - cogen as gen_is_cogen, + gen_is_cogen, -- non_cycling as gen_non_cycling, - variable_o_m * 1000.0 AS gen_variable_om, - CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') THEN fuel ELSE 'multiple' END AS gen_energy_source, - CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') THEN null ELSE {flhr} END AS gen_full_load_heat_rate, - min_uptime as gen_min_uptime, - min_downtime as gen_min_downtime, - startup_energy / unit_size as gen_startup_fuel, + (1000.0 * variable_o_m) + * power(1.0+%(inflation_rate)s, %(base_financial_year)s-base_year) + AS gen_variable_om, + CASE + WHEN gen_energy_source IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') + THEN gen_energy_source + ELSE 'multiple' + END AS gen_energy_source, + CASE + WHEN gen_energy_source IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') + THEN null + ELSE {flhr} + END AS gen_full_load_heat_rate, + gen_min_uptime, + gen_min_downtime, + gen_startup_fuel, gen_storage_efficiency, gen_storage_energy_to_power_ratio, - gen_storage_max_cycles_per_year + gen_storage_max_cycles_per_year, + land_class as gen_land_class, + land_area as gen_land_area, + slope_class as gen_slope_class FROM study_projects JOIN study_generator_info USING (technology) ORDER BY 2, 3, 1; - """.format(fo=forced_outage_rate, flhr=full_load_heat_rate), args) + """.format( + fo=forced_outage_rate, flhr=full_load_heat_rate + ), + args, + ) - write_table('gen_build_predetermined.csv', """ + add_query( + queries, + "gen_build_predetermined.csv", + "WITH " + + study_info + + """ SELECT "GENERATION_PROJECT", build_year, - SUM(proj_existing_cap) as gen_predetermined_cap - FROM study_projects JOIN proj_existing_builds USING (project_id) + SUM(gen_predetermined_cap) as build_gen_predetermined, + SUM(gen_predetermined_storage_energy_mwh) as build_gen_energy_predetermined + FROM study_projects JOIN gen_build_predetermined USING (project_id) GROUP BY 1, 2 ORDER BY 1, 2; - """, args) - - # NOTE: these costs must be expressed in $/MW, $/MWh or $/MW-year, - # not $/kW, $/kWh or $/kW-year. - # NOTE: for now, we only specify storage costs per unit of power, not - # on per unit of energy, so we insert $0 as the energy cost here. - # NOTE: projects should have NULL for overnight cost and fixed O&M in - # proj_existing_builds if they have an entry for the same year in - # generator_costs_by_year. If they have costs in both, they will both - # get passed through to the data table, and Switch will raise an error - # (as it should, because costs are ambiguous in this case). - write_table('gen_build_costs.csv', """ - WITH gen_build_costs AS ( - SELECT - i.technology, - c.year AS build_year, - c.capital_cost_per_kw * 1000.0 - * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) - AS gen_overnight_cost, - c.capital_cost_per_kwh * 1000.0 AS gen_storage_energy_overnight_cost, - c.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) - AS gen_fixed_o_m, - i.min_vintage_year -- used for build_year filter below - FROM study_generator_info i - JOIN generator_costs_by_year c USING (technology, tech_scen_id) - ORDER BY 1, 2 + """, + args, + ) + + def adjust_cost(cost_term, cost_adjustment_table=None): + """ + Generate cost expression including the following adjustments: + - multiply by 1000.0 to convert from cost per kW or kWh to cost per MW or MWh + - adjust for inflation between project base year and study base year + - optionally apply project-specific cost adjustment terms + + Assumes base_year exists in the table referenced by cost_term. + Applies cost_multiplier and cost_offset terms from cost_adjustment_table if + specified. + """ + cost_table = cost_term.split(".")[0] + "." if "." in cost_term else "" + + if cost_adjustment_table: + cat = cost_adjustment_table + cost_term = f"({cost_term}*{cat}.cost_multiplier + {cat}.cost_offset)" + + return ( + f"(" + f"{cost_term}" + f" * 1000.0" + f" * power(1.0+%(inflation_rate)s, %(base_financial_year)s-{cost_table}base_year)" + f")" ) - SELECT -- costs specified in proj_existing_builds + + add_query( + queries, + "gen_build_costs.csv", + "WITH " + + study_info + + """ + -- For projects in gen_build_predetermined, apply average cost of all + -- projects built in the same year (looking up generic costs if needed) + SELECT "GENERATION_PROJECT", b.build_year, SUM( - power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) - * b.proj_overnight_cost * 1000.0 * proj_existing_cap - ) / SUM(proj_existing_cap) + COALESCE({b_capital_cost_per_mw}, {c_capital_cost_per_mw}) + * gen_predetermined_cap + ) / SUM(gen_predetermined_cap) AS gen_overnight_cost, - null AS gen_storage_energy_overnight_cost, SUM( - power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) - * b.proj_fixed_om * 1000.0 * proj_existing_cap - ) / SUM(proj_existing_cap) + COALESCE({b_capital_cost_per_mwh}, {c_capital_cost_per_mwh}) + * gen_predetermined_storage_energy_mwh + ) / SUM(gen_predetermined_storage_energy_mwh) + AS gen_storage_energy_overnight_cost, + SUM( + COALESCE({b_fixed_o_m}, {c_fixed_o_m}) + * gen_predetermined_cap + ) / SUM(gen_predetermined_cap) AS gen_fixed_om - FROM study_projects p - JOIN proj_existing_builds b USING (project_id) - WHERE (b.proj_overnight_cost IS NOT NULL OR b.proj_fixed_om IS NOT NULL) + FROM gen_build_predetermined b + JOIN study_projects p USING (project_id) + JOIN study_generator_info i USING (technology) + LEFT JOIN gen_build_costs c + ON c.technology=i.technology AND c.year=b.build_year AND c.tech_scenario=i.tech_scenario GROUP BY 1, 2 UNION - SELECT -- costs specified in generator_costs_by_year - "GENERATION_PROJECT", c.build_year, gen_overnight_cost, - gen_storage_energy_overnight_cost, gen_fixed_o_m - FROM study_projects proj - JOIN gen_build_costs c USING (technology) - LEFT JOIN study_periods per ON (per.time_sample = %(time_sample)s AND c.build_year = per.period) - LEFT JOIN proj_existing_builds e ON (e.project_id = proj.project_id AND e.build_year = c.build_year) + -- For each project in each period after the min vintage year, if no + -- predetermined build is specified (above), use generic prices if + -- available. If no prices are found, it means the project can't + -- be expanded. + SELECT + "GENERATION_PROJECT", + c.year AS build_year, + {c_capital_cost_per_mw} AS gen_overnight_cost, + {c_capital_cost_per_mwh} AS gen_storage_energy_overnight_cost, + {c_fixed_o_m} AS gen_fixed_o_m + FROM study_projects p + JOIN study_generator_info i USING (technology) + JOIN gen_build_costs c ON c.technology=i.technology AND c.tech_scenario=i.tech_scenario + JOIN periods per ON (per.time_sample = %(time_sample)s AND c.year = per.period) + LEFT JOIN gen_build_predetermined e + ON e.project_id = p.project_id AND e.build_year = c.year WHERE - -- note: this allows users to have build_year < min_vintage_year for predetermined projects - -- that have entries in the cost table, e.g., if they want to prespecify some, but postpone - -- additional construction until some later year (unlikely) - (per.period IS NOT NULL AND (c.min_vintage_year IS NULL OR c.build_year >= c.min_vintage_year)) - OR e.project_id IS NOT NULL + e.project_id IS NULL -- no existing projects + AND (i.min_vintage_year IS NULL OR c.year >= i.min_vintage_year) ORDER BY 1, 2; - """, args) + """.format( + b_capital_cost_per_mw=adjust_cost("b.capital_cost_per_kw"), + b_capital_cost_per_mwh=adjust_cost("b.capital_cost_per_kwh"), + c_capital_cost_per_mw=adjust_cost( + "c.capital_cost_per_kw", cost_adjustment_table="p" + ), + c_capital_cost_per_mwh=adjust_cost("c.capital_cost_per_kwh"), + b_fixed_o_m=adjust_cost("b.fixed_o_m"), + c_fixed_o_m=adjust_cost("c.fixed_o_m"), + ), + args, + ) ######################### # spinning_reserves_advanced (if wanted; otherwise defaults to just "spinning" - if 'max_reserve_capability' in args or args.get('write_generation_projects_reserve_capability', False): - + if "max_reserve_capability" in args or args.get( + "write_generation_projects_reserve_capability", False + ): # args['max_reserve_capability'] is a list of tuples of (technology, # reserve_type) (assumed equivalent to 'regulation' if not specified) # We unzip it to use with the unnest function (psycopg2 passes lists of # tuples as arrays of tuples, and unnest would keep those as tuples) try: - reserve_technologies = [r[0] for r in args['max_reserve_capability']] - reserve_types = [r[1] for r in args['max_reserve_capability']] + reserve_technologies = [r[0] for r in args["max_reserve_capability"]] + reserve_types = [r[1] for r in args["max_reserve_capability"]] except KeyError: reserve_technologies = [] reserve_types = [] res_args = args.copy() - res_args['reserve_technologies']=reserve_technologies - res_args['reserve_types']=reserve_types + res_args["reserve_technologies"] = reserve_technologies + res_args["reserve_types"] = reserve_types # note: casting is needed if the lists are empty; see https://stackoverflow.com/a/41893576/3830997 - write_table('generation_projects_reserve_capability.csv', """ - WITH reserve_capability (technology, reserve_type) as ( + add_query( + queries, + "generation_projects_reserve_capability.csv", + "WITH " + + study_info + + ", " + + """ + reserve_capability (technology, reserve_type) as ( SELECT UNNEST(%(reserve_technologies)s::varchar(40)[]) AS technology, UNNEST(%(reserve_types)s::varchar(20)[]) AS reserve_type @@ -539,8 +897,9 @@ def write_tables(**args): JOIN reserve_types t2 on t2.rank <= COALESCE(t1.rank, 100) WHERE t2.rank > 0 ORDER BY 1, t2.rank; - """, res_args) - + """, + res_args, + ) ######################### # operation.unitcommit.fuel_use @@ -553,15 +912,21 @@ def write_tables(**args): # note: for sqlite, you could use "CONCAT(technology, ' ', output_mw, ' ', fuel_consumption_mmbtu_per_h) AS key" # TODO: rename fuel_consumption_mmbtu_per_h to fuel_use_mmbtu_per_h here and in import_data.py - if args.get('use_incremental_heat_rates', False): - write_table('gen_inc_heat_rates.csv', """ - WITH part_load AS ( + if args.get("use_incremental_heat_rates", False): + add_query( + queries, + "gen_inc_heat_rates.csv", + "WITH " + + study_info + + ", " + + """ + part_load AS ( SELECT row_number() OVER (ORDER BY technology, output_mw, fuel_consumption_mmbtu_per_h) AS key, technology, output_mw, fuel_consumption_mmbtu_per_h - FROM part_load_fuel_consumption JOIN study_generator_info USING (technology) + FROM gen_part_load_fuel JOIN study_generator_info USING (technology) ), prior AS ( SELECT a.key, MAX(b.key) AS prior_key FROM part_load a JOIN part_load b ON b.technology=a.technology AND b.key < a.key @@ -591,38 +956,47 @@ def write_tables(**args): incremental_heat_rate_mbtu_per_mwhr, fuel_use_rate_mmbtu_per_h FROM curves c JOIN study_projects p using (technology) ORDER BY c.technology, c.key, p."GENERATION_PROJECT"; - """, args) + """, + args, + ) # This gets a list of all the fueled projects (listed as "multiple" energy sources above), - # and lists them as accepting any equivalent or lighter fuel. (However, cogen plants and plants + # and lists them as accepting any equivalent or lighter fuel. (However, plants # using fuels with rank 0 are not changed.) Fuels are also filtered against the list of fuels with # costs reported for the current scenario, so this can end up re-mapping one fuel in the database # (e.g., LSFO) to a similar fuel in the scenario (e.g., LSFO-Diesel-Blend), even if the original fuel # doesn't exist in the fuel_costs table. This can also be used to remap different names for the same # fuel (e.g., "COL" in the plant definition and "Coal" in the fuel_costs table, both with the same # fuel_rank). - write_indexed_set_dat_file('gen_multiple_fuels.dat', 'FUELS_FOR_MULTIFUEL_GEN', """ - WITH all_techs AS ( + add_query( + queries, + "gen_multiple_fuels.csv", + "WITH " + + study_info + + ", " + + """ + all_techs AS ( SELECT technology, - fuel as orig_fuel, - cogen + gen_energy_source as orig_fuel FROM study_generator_info ), all_fueled_techs AS ( SELECT * from all_techs WHERE orig_fuel NOT IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') ), gen_multiple_fuels AS ( SELECT DISTINCT technology, b.energy_source as fuel FROM all_fueled_techs t - JOIN energy_source_properties a ON a.energy_source = t.orig_fuel - JOIN energy_source_properties b ON b.fuel_rank >= a.fuel_rank AND + JOIN energy_sources a ON a.energy_source = t.orig_fuel + JOIN energy_sources b ON b.fuel_rank >= a.fuel_rank AND (a.fuel_rank > 0 OR a.energy_source = b.energy_source) -- 0-rank can't change fuels - WHERE b.energy_source IN (SELECT fuel_type FROM fuel_costs WHERE fuel_scen_id = %(fuel_scen_id)s) + AND (b.rps_eligible >= a.rps_eligible) -- if rps-eligible fuel specified, only use rps-eligible fuels + WHERE b.energy_source IN (SELECT fuel FROM fuel_costs WHERE fuel_scenario = %(fuel_scenario)s) ) SELECT "GENERATION_PROJECT", fuel FROM gen_multiple_fuels g JOIN study_projects p USING (technology) ORDER BY p.technology, p."GENERATION_PROJECT", g.fuel - """, args) - + """, + args, + ) ######################### # operation.gen_dispatch @@ -631,26 +1005,31 @@ def write_tables(**args): if args.get("skip_cf", False): print("SKIPPING variable_capacity_factors.csv") else: - write_table('variable_capacity_factors.csv', """ + add_query( + queries, + "variable_capacity_factors.csv", + "WITH " + + study_info + + """ SELECT "GENERATION_PROJECT", - study_hour as timepoint, + timepoint, cap_factor as gen_max_capacity_factor FROM study_generator_info g JOIN study_projects p USING (technology) - JOIN cap_factor c USING (project_id) - JOIN study_hour h using (date_time) + JOIN variable_capacity_factors c USING (project_id) + JOIN timepoints h using (date_time) WHERE time_sample = %(time_sample)s ORDER BY 1, 2 - """, args) - + """, + args, + ) ######################### # project.discrete_build # include this module, but it doesn't need any additional data. - ######################### # operation.unitcommit.commit @@ -661,42 +1040,49 @@ def write_tables(**args): # TODO: create data files showing reserve rules - write_table('gen_timepoint_commit_bounds.csv', """ + add_query( + queries, + "gen_timepoint_commit_bounds.csv", + "WITH " + + study_info + + """ SELECT * FROM ( SELECT "GENERATION_PROJECT", - study_hour AS "TIMEPOINT", - CASE WHEN %(enable_must_run)s = 1 AND must_run = 1 THEN 1.0 ELSE null END + timepoint AS "TIMEPOINT", + CASE WHEN period < %(enable_must_run_before)s AND must_run = 1 THEN 1.0 ELSE null END AS gen_min_commit_fraction, null AS gen_max_commit_fraction, null AS gen_min_load_fraction_TP FROM study_projects JOIN study_generator_info USING (technology) - CROSS JOIN study_hour + CROSS JOIN timepoints NATURAL JOIN timeseries NATURAL JOIN periods WHERE time_sample = %(time_sample)s ) AS the_data WHERE gen_min_commit_fraction IS NOT NULL OR gen_max_commit_fraction IS NOT NULL - OR gen_min_load_fraction_TP IS NOT NULL; - """, args) - + OR gen_min_load_fraction_TP IS NOT NULL + ORDER BY 1, 2; + """, + args, + ) ######################### # project.unitcommit.discrete # include this module, but it doesn't need any additional data. - ######################### # trans_build # --- Not used --- # - # write_table('trans_lines.csv', """ + # add_query(queries, 'trans_lines.csv', """ # SELECT load_area_start AS load_zone_start, load_area_end AS load_zone_end, # tid, length_km AS transmission_length_km, efficiency AS transmission_efficiency, # existing_mw_from AS existing_transmission_from, # existing_mw_to AS existing_transmission_to # FROM trans_line # WHERE load_area_start IN %(load_zones)s OR load_area_end IN %(load_zones)s + # ORDER BY 1, 2; # """, args) # # @@ -710,70 +1096,109 @@ def write_tables(**args): # batteries # (now included as standard storage projects, but kept here # to support older projects that haven't upgraded yet) - bat_years = 'BATTERY_CAPITAL_COST_YEARS' - bat_cost = 'battery_capital_cost_per_mwh_capacity_by_year' - non_cost_bat_vars = sorted([k for k in args if k.startswith('battery_') and k not in [bat_years, bat_cost]]) + bat_years = "BATTERY_CAPITAL_COST_YEARS" + bat_cost = "battery_capital_cost_per_mwh_capacity_by_year" + non_cost_bat_vars = sorted( + [k for k in args if k.startswith("battery_") and k not in [bat_years, bat_cost]] + ) if non_cost_bat_vars: - write_simple_csv( - 'batteries.csv', - non_cost_bat_vars, - args - ) + add_one_row_literal(queries, "batteries.csv", non_cost_bat_vars, args) if bat_years in args and bat_cost in args: # annual costs were provided -- write those to a tab file - write_csv_file( - 'battery_capital_cost.csv', + add_literal_table( + queries, + "battery_capital_cost.csv", headers=[bat_years, bat_cost], data=list(zip(args[bat_years], args[bat_cost])), - arguments=args + arguments=args, ) + ######################### + # Total land in each class in each load zone + add_query( + queries, + "load_zone_land_class_area.csv", + """ + SELECT load_zone, land_class, area as load_zone_land_class_area + FROM load_zone_land_class_area + WHERE load_zone in %(load_zones)s; + """, + args, + ) + ######################### # EV annual energy consumption (original, basic version) # print "ev_scenario:", args.get('ev_scenario', None) - if args.get('ev_scenario', None) is not None: - write_table('ev_fleet_info.csv', """ + if args.get("ev_scenario", None) is not None: + add_query( + queries, + "ev_fleet_info.csv", + """ SELECT load_zone as "LOAD_ZONE", period as "PERIOD", ev_share, ice_miles_per_gallon, ev_miles_per_kwh, ev_extra_cost_per_vehicle_year, n_all_vehicles, vmt_per_vehicle - FROM ev_adoption a JOIN study_periods p on a.year = p.period + FROM ev_adoption a JOIN periods p on a.year = p.period WHERE load_zone in %(load_zones)s AND time_sample = %(time_sample)s AND ev_scenario = %(ev_scenario)s ORDER BY 1, 2; - """, args) + """, + args, + ) # power consumption for each hour of the day under business-as-usual charging # note: the charge weights have a mean value of 1.0, but go up and down in different hours - write_table('ev_bau_load.csv', """ + # NOTE: This may not average out to exactly 1.0 if time sampling is not every hour. + # We could get mean charging during each time sample by calculating avg(ev_bau_mw) + # and changing the hour_of_day join to + # (p.hour_of_day - h.hour_of_day + 24) % 24 < ts_duration_of_tp + # but that would be inconsistent with how we handle loads and weather + # (generally point sample at start of timepoint or avg. value over first + # hour of timepoint; not whole timepoint) + add_query( + queries, + "ev_bau_load.csv", + """ SELECT load_zone AS "LOAD_ZONE", - study_hour AS "TIMEPOINT", - charge_weight * ev_share * n_all_vehicles * vmt_per_vehicle / (1000.0 * ev_miles_per_kwh) / 8760 as ev_bau_mw + timepoint AS "TIMEPOINT", + charge_weight * ev_share * n_all_vehicles * vmt_per_vehicle + / (1000.0 * ev_miles_per_kwh) / 8760 as ev_bau_mw FROM ev_adoption e - JOIN study_date d ON d.period = e.year - JOIN study_hour h USING (study_date, time_sample) - JOIN ev_hourly_charge_profile p - ON p.hour_of_day = h.hour_of_day + JOIN timeseries d ON d.period = e.year + JOIN timepoints h USING (timeseries, time_sample) + JOIN ev_hourly_charge_profiles p + ON p.charge_profile = %(ev_charge_profile)s + AND p.hour_of_day = h.hour_of_day WHERE load_zone in %(load_zones)s AND time_sample = %(time_sample)s AND ev_scenario = %(ev_scenario)s ORDER BY 1, 2; - """, args) + """, + args, + ) ######################### # EV annual energy consumption (advanced, frozen Dantzig-Wolfe version) - if args.get('ev_scenario', None) is not None: - write_table('ev_share.csv', """ + if args.get("ev_scenario", None) is not None: + add_query( + queries, + "ev_share.csv", + """ SELECT load_zone as "LOAD_ZONE", period as "PERIOD", ev_share - FROM ev_adoption a JOIN study_periods p on a.year = p.period + FROM ev_adoption a JOIN periods p on a.year = p.period WHERE load_zone in %(load_zones)s AND time_sample = %(time_sample)s AND ev_scenario = %(ev_scenario)s ORDER BY 1, 2; - """, args) - write_table('ev_fleet_info_advanced.csv', """ + """, + args, + ) + add_query( + queries, + "ev_fleet_info_advanced.csv", + """ WITH detailed_fleet AS ( SELECT a.load_zone AS "LOAD_ZONE", @@ -797,7 +1222,7 @@ def write_tables(**args): + (2045-period)/25.0 * "EV extra capital cost per year 2020" END AS "ev_extra_cost_per_vehicle_year" FROM ev_adoption a - JOIN study_periods p ON a.year = p.period + JOIN periods p ON a.year = p.period JOIN ev_fleet f ON f.load_zone = a.load_zone WHERE a.load_zone in %(load_zones)s AND time_sample = %(time_sample)s @@ -815,7 +1240,9 @@ def write_tables(**args): FROM detailed_fleet GROUP BY 1, 2, 3, 6 ORDER BY 1, 2, 3; - """, args) + """, + args, + ) # power consumption bids for each hour of the day # (consolidate to one vehicle class to accelerate data retrieval and # reduce model memory requirements) (note that there are 6 classes of @@ -825,15 +1252,18 @@ def write_tables(**args): if args.get("skip_ev_bids", False): print("SKIPPING ev_charging_bids.csv") else: - write_table('ev_charging_bids.csv', """ + add_query( + queries, + "ev_charging_bids.csv", + """ SELECT b.load_zone AS "LOAD_ZONE", CONCAT_WS('_', 'All', "ICE fuel", 'Vehicles') AS "VEHICLE_TYPE", bid_number AS "BID_NUM", - study_hour AS "TIMEPOINT", + timepoint AS "TIMEPOINT", sum(charge_mw) AS ev_bid_by_type - FROM study_date d - JOIN study_hour h USING (study_date, time_sample) + FROM timeseries d + JOIN timepoints h USING (timeseries, time_sample) JOIN ev_charging_bids b ON b.hour = h.hour_of_day AND b.hours_per_step = d.ts_duration_of_tp JOIN ev_fleet f ON b.vehicle_type=f."vehicle type" AND b.load_zone=f.load_zone @@ -841,246 +1271,179 @@ def write_tables(**args): AND d.time_sample = %(time_sample)s GROUP BY 1, 2, 3, 4 ORDER BY 1, 2, 3, 4; - """, args) + """, + args, + ) ######################### # pumped hydro - # TODO: put these data in a database with hydro_scen_id's and pull them from there + # TODO: put these data in a database with hydro_scenario's and pull them from there if "pumped_hydro_headers" in args: - write_csv_file( - 'pumped_hydro.csv', + add_literal_table( + queries, + "pumped_hydro.csv", headers=args["pumped_hydro_headers"], data=args["pumped_hydro_projects"], - arguments=args + arguments=args, ) - # write_simple_csv( - # 'pumped_hydro.csv', - # [k for k in args if k.startswith('pumped_hydro_')], - # args - # ) - ######################### # hydrogen - # TODO: put these data in a database and write a .csv file instead - write_simple_csv( - 'hydrogen.csv', - sorted([k for k in args if k.startswith('hydrogen_') or k.startswith('liquid_hydrogen_')]), - args + # TODO: put these data in a database and pull from there + add_one_row_literal( + queries, + "hydrogen.csv", + sorted( + [ + k + for k in args + if k.startswith("hydrogen_") or k.startswith("liquid_hydrogen_") + ] + ), + args, ) - ######################### # PHA data - pha_params = sorted([k for k in args if k.startswith('pha_')]) + pha_params = sorted([k for k in args if k.startswith("pha_")]) if pha_params: - write_dat_file( - 'pha.dat', - pha_params, - args - ) + add_one_row_literal(queries, "pha.csv", pha_params, args) + + return queries -# the two functions below could be used as the start of a system -# to write placeholder files for any files in the current scenario -# that match the base files. This could be used to avoid creating large -# files (variable_cap_factor.csv) for alternative scenarios that are -# otherwise very similar. i.e., placeholder .csv or .dat files could -# be written with just the line 'include ../variable_cap_factor.csv' or -# 'include ../financial.dat'. - -def any_alt_args_in_list(args, l): - """Report whether any arguments in the args list appear in the list l.""" - for a in args.get('alt_args', {}): - if a in l: - return True - return False - -def any_alt_args_in_query(args, query): - """Report whether any arguments in the args list appear in the list l.""" - for a in args.get('alt_args', {}): - if '%(' + a + ')s' in query: - return True - return False def make_file_path(file, args): """Create any directories and subdirectories needed to store data in the specified file, based on inputs_dir and inputs_subdir arguments. Return a pathname to the file.""" # extract extra path information from args (if available) # and build a path to the specified file. - path = os.path.join(args.get('inputs_dir', ''), args.get('inputs_subdir', '')) - if path != '' and not os.path.exists(path): + path = os.path.join(args.get("inputs_dir", ""), args.get("inputs_subdir", "")) + if path != "" and not os.path.exists(path): os.makedirs(path) path = os.path.join(path, file) return path + con = None + + def db_cursor(): global con if con is None: try: # note: we don't import until here to avoid interfering with unit tests on systems that don't have # (or need) psycopg2 - global psycopg2 - import psycopg2 + global psycopg2, sql + import psycopg2, psycopg2.sql as sql except ImportError: - print(dedent(""" + print( + dedent( + """ ############################################################################################ Unable to import psycopg2 module to access database server. Please install this module via 'conda install psycopg2' or 'pip install psycopg2'. ############################################################################################ - """)) + """ + ) + ) raise try: - pghost='redr.eng.hawaii.edu' # note: the connection gets created when the module loads and never gets closed (until presumably python exits) - con = psycopg2.connect(database='switch', host=pghost) #, user='switch_user') + con = psycopg2.connect(database=pgdatabase, host=pghost, user=pguser) + # use read-only session, because that's enough for this script and it's possible something + # weird could come through in the configuration info that gets passed to postgresql + con.set_session(readonly=True, autocommit=True) + print( + "Reading data from database {} on server {}".format(pgdatabase, pghost) + ) except psycopg2.OperationalError: - print(dedent(""" + print( + dedent( + """ ############################################################################################ - Error while connecting to switch database on postgres server {server}. - Please ensure that the PGUSER environment variable is set with your postgres username - and there is a line like "*:*:*::" in ~/.pgpass (which should be chmod 0600) + Error while connecting to database '{db}' on postgres server '{server}' as user '{user}'. + Please ensure that the following environment variables are set: + PGUSER = your postgres username + PGHOST = hostname or IP address of postgres server + PGDATABASE = name of switch database on this server. + There should also be a line like "*:*:*::" in ~/.pgpass (which should be chmod 0600) or in %APPDATA%\postgresql\pgpass.conf (Windows). See http://www.postgresql.org/docs/9.1/static/libpq-pgpass.html for more details. ############################################################################################ - """.format(server=pghost))) + """.format( + server=pghost, db=pgdatabase, user=pguser + ) + ) + ) raise return con.cursor() -def write_simple_csv(output_file, args_to_write, arguments): - """ write a simple .csv file with the arguments specified in args_to_write, - drawn from the arguments dictionary. This includes one row with all the - parameter names and a second row with their values. - (previously write_dat_file())""" - - start=time.time() - - # collect data for the two rows (if any) - headers = [] - values = [] - for name in args_to_write: - if name in arguments: - headers.append(name) - values.append(str(arguments[name])) - if headers: - output_file = make_file_path(output_file, arguments) - print("Writing {file} ...".format(file=output_file), end=' ') - sys.stdout.flush() # display the part line to the user +def prepare_query(query, arguments): + return db_cursor().mogrify(query, arguments) - with open(output_file, 'w') as f: - f.write(','.join(headers) + '\n') - f.write(','.join(values) + '\n') - - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) - -def write_table(output_file, query, arguments): - output_file = make_file_path(output_file, arguments) - cur = db_cursor() - - print("Writing {file} ...".format(file=output_file), end=' ') - sys.stdout.flush() # display the part line to the user - start=time.time() - cur.execute(dedent(query), arguments) +def add_query(queries, file, query, arguments): + queries.append((file, prepare_query(query, arguments))) - with open(output_file, 'w') as f: - # write header row - writerow(f, [d[0] for d in cur.description]) - # write the query results (cur is used as an iterator here to get all the rows one by one) - writerows(f, cur) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) - -def write_csv_file(output_file, headers, data, arguments={}): - "Write a tab file using the headers and data supplied." - output_file = make_file_path(output_file, arguments) - - print("Writing {file} ...".format(file=output_file), end=' ') - sys.stdout.flush() # display the part line to the user - - start=time.time() - - with open(output_file, 'w') as f: - writerow(f, headers) - writerows(f, data) - - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) - -def write_dat_file(output_file, args_to_write, arguments): - """ write a simple .dat file with the arguments specified in args_to_write, - drawn from the arguments dictionary""" - - if any(arg in arguments for arg in args_to_write): - output_file = make_file_path(output_file, arguments) - print("Writing {file} ...".format(file=output_file), end=' ') - sys.stdout.flush() # display the part line to the user - start=time.time() - - with open(output_file, 'w') as f: - f.writelines([ - 'param ' + name + ' := ' + str(arguments[name]) + ';\n' - for name in args_to_write if name in arguments - ]) +def add_literal_table(queries, table, headers, data, arguments={}): + # Create an SQL query that returns the values defined by the headers and data + if data: + query = sql.SQL("SELECT * FROM (VALUES {}) AS t ({})").format( + sql.SQL(", ").join(sql.Literal(tuple(row)) for row in data), + sql.SQL(", ").join(sql.Identifier(h) for h in headers), + ) + else: + # create a zero-row table with the right headers + query = sql.SQL("SELECT * FROM (VALUES {}) AS t ({}) WHERE FALSE").format( + sql.Literal(tuple("" for h in headers)), + sql.SQL(", ").join(sql.Identifier(h) for h in headers), + ) + add_query(queries, table, query, arguments) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) -def write_indexed_set_dat_file(output_file, set_name, query, arguments): - """Write a .dat file defining an indexed set, based on the query provided. +def add_one_row_literal(queries, table, arg_names, args): + add_literal_table( + queries, table, arg_names, [tuple(args[n] for n in arg_names)], args + ) - Note: the query should produce a table with index values in all columns except - the last, and then set members for each index in the last column. (There should - be multiple rows with the same values in the index columns.)""" - output_file = make_file_path(output_file, arguments) - print("Writing {file} ...".format(file=output_file), end=' ') +def write_table(output_file, query): + print("Writing {file} ...".format(file=output_file), end=" ") sys.stdout.flush() # display the part line to the user - start=time.time() - + start = time.time() cur = db_cursor() - cur.execute(dedent(query), arguments) - - # build a dictionary grouping all values (last column) according to their index keys (earlier columns) - data_dict = collections.defaultdict(list) - for r in cur: - # note: data_dict[(index vals)] is created as an empty list on first reference, - # then gets data from all matching rows appended to it - data_dict[tuple(r[:-1])].append(r[-1]) - - # .dat file format based on p. 161 of http://ampl.com/BOOK/CHAPTERS/12-data.pdf - with open(output_file, 'w') as f: - f.writelines([ - 'set {sn}[{idx}] := {items} ;\n'.format( - sn=set_name, - idx=', '.join(k), - items=' '.join(v)) - for k, v in iteritems(data_dict) - ]) - - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) + try: + cur.execute(query) + except: + print("\nError running the following query:\n{}\n".format(query.decode())) + raise + with open(output_file, "w") as f: + writerow(f, [d[0] for d in cur.description]) # header + writerows(f, cur) # data + print("time taken: {dur:.2f}s".format(dur=time.time() - start)) def stringify(val): if val is None: - out = '.' + out = "." elif type(val) is str: out = val.replace('"', '""') - if any(char in out for char in [' ', '\t', '"', "'", ',']): + if any(char in out for char in [" ", "\t", '"', "'", ","]): out = '"' + out + '"' else: out = str(val) return out + def writerow(f, row): - f.write(','.join(stringify(c) for c in row) + '\n') + f.write(",".join(stringify(c) for c in row) + "\n") + def writerows(f, rows): for r in rows: writerow(f, r) - -def tuple_dict(keys, vals): - "Create a tuple of dictionaries, one for each row in vals, using the specified keys." - return tuple(list(zip(keys, row)) for row in vals) diff --git a/switch_model/hawaii/scenarios.py b/switch_model/hawaii/scenarios.py index 1e1568c35..612652b32 100644 --- a/switch_model/hawaii/scenarios.py +++ b/switch_model/hawaii/scenarios.py @@ -3,10 +3,13 @@ try: import fcntl + def flock(f): fcntl.flock(f, fcntl.LOCK_EX) + def funlock(f): fcntl.flock(f, fcntl.LOCK_UN) + except ImportError: # probably using windows # rely on opportunistic file writing (hope that scenarios aren't @@ -15,9 +18,11 @@ def funlock(f): # https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s25.html def flock(f): pass + def funlock(f): pass + def iterify(item): """Return an iterable for the one or more items passed.""" if isinstance(item, string_types): @@ -30,49 +35,64 @@ def iterify(item): i = iter([item]) return i + class AddModuleAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): for m in iterify(values): setattr(namespace, m, True) + class RemoveModuleAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): for m in iterify(values): setattr(namespace, m, False) + class AddListAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): if getattr(namespace, self.dest) is None: setattr(namespace, self.dest, list()) getattr(namespace, self.dest).extend(iterify(values)) + # define a standard argument parser, which can be used to setup scenarios # NOTE: you can't safely use default values here, because those end up being # assigned to cmd_line_args(), and then they override any values set for the # standard scenarios. -parser = argparse.ArgumentParser(description='Solve one or more Switch-Hawaii scenarios.') -parser.add_argument('--inputs', dest='inputs_dir') -parser.add_argument('--inputs-subdir') -parser.add_argument('--outputs', dest='outputs_dir') -parser.add_argument('--scenario', action=AddListAction, dest='scenario_to_run') -parser.add_argument('--scenarios', action=AddListAction, nargs='+', dest='scenario_to_run') -parser.add_argument('--scenario-name') -parser.add_argument('--exclude', action=AddModuleAction, dest='exclude_module', nargs='+') -parser.add_argument('-n', action=RemoveModuleAction, dest='exclude_module') -parser.add_argument('--include', action=AddModuleAction, dest='include_module', nargs='+') -parser.add_argument('-y', action=AddModuleAction, dest='include_module') -parser.add_argument(action=AddModuleAction, dest='include_module', nargs='*') +parser = argparse.ArgumentParser( + description="Solve one or more Switch-Hawaii scenarios." +) +parser.add_argument("--inputs", dest="inputs_dir") +parser.add_argument("--inputs-subdir") +parser.add_argument("--outputs", dest="outputs_dir") +parser.add_argument("--scenario", action=AddListAction, dest="scenario_to_run") +parser.add_argument( + "--scenarios", action=AddListAction, nargs="+", dest="scenario_to_run" +) +parser.add_argument("--scenario-name") +parser.add_argument( + "--exclude", action=AddModuleAction, dest="exclude_module", nargs="+" +) +parser.add_argument("-n", action=RemoveModuleAction, dest="exclude_module") +parser.add_argument( + "--include", action=AddModuleAction, dest="include_module", nargs="+" +) +parser.add_argument("-y", action=AddModuleAction, dest="include_module") +parser.add_argument(action=AddModuleAction, dest="include_module", nargs="*") + def args_dict(*a): """call the parser to get the args, then return them as a dictionary, omitting None's'""" return {k: v for k, v in vars(parser.parse_args(*a)).items() if v is not None} + # report current command line arguments for use by various functions # This is a function instead of a constant, so users can call # scenarios.parser.add_argument() to add arguments of their own before evaluation def cmd_line_args(): return args_dict() + def get_required_scenario_names(): """Return list of names of scenario(s) that were requested or defined from the command line via --scenario[s] or --scenario-name. @@ -80,11 +100,11 @@ def get_required_scenario_names(): a = cmd_line_args() if "scenario_to_run" in a: return a["scenario_to_run"] - elif "scenario_name" in a or not os.path.isfile('scenarios_to_run.txt'): + elif "scenario_name" in a or not os.path.isfile("scenarios_to_run.txt"): # They have defined one specific scenario on the command line, which is not based on any standard scenario, # or there are no standard scenarios. # Return a no-name scenario, which indicates to build the scenario without referring to any standard scenario. - return [''] + return [""] else: # no specific scenarios were requested on the command line; run the standard scenarios instead return [] @@ -101,12 +121,13 @@ def start_next_standard_scenario(): continue else: return merge_scenarios(args, cmd_line_args()) - return None # no more scenarios to run + return None # no more scenarios to run + def get_scenario_args(scenario): """Return the arguments for the specified standard scenario, amended with any command-line arguments. This may also be called with an empty scenario name ('') to define a scenario using only command-line arguments.""" - if scenario == '': + if scenario == "": return merge_scenarios(cmd_line_args()) else: scenario_list = get_standard_scenarios_dict() @@ -115,49 +136,55 @@ def get_scenario_args(scenario): else: return merge_scenarios(scenario_list[scenario], cmd_line_args()) + def get_standard_scenarios_dict(): """Return collection of standard scenarios, as defined in scenarios_to_run.txt. They are returned as an OrderedDict with keys equal to the scenario names and values that are each a dictionary of arguments for that scenario.""" # note: we read the list from the disk each time so that we get a fresher version # if the standard list is changed during a long solution effort. - with open('scenarios_to_run.txt', 'r') as f: + with open("scenarios_to_run.txt", "r") as f: # wait for exclusive access to the file (to avoid reading while the file is being changed) flock(f) - scenarios_list = list(f.read().splitlines()) # note: ignores presence/absence of \n at end of file + scenarios_list = list( + f.read().splitlines() + ) # note: ignores presence/absence of \n at end of file funlock(f) - args_list = [args_dict(s.split(' ')) for s in scenarios_list] + args_list = [args_dict(s.split(" ")) for s in scenarios_list] return collections.OrderedDict([(s["scenario_name"], s) for s in args_list]) + def merge_scenarios(*scenarios): # combine scenarios: start with the first and then apply most settings from later ones # but concatenate "tag" entries and remove "scenario_to_run" entries - d = dict(tag='') + d = dict(tag="") for s in scenarios: t1 = d["tag"] t2 = s.get("tag", "") s["tag"] = t1 + ("" if t1 == "" or t2 == "" else "_") + t2 d.update(s) - if 'scenario_to_run' in d: - del d['scenario_to_run'] + if "scenario_to_run" in d: + del d["scenario_to_run"] return d + def report_completed_scenario(scenario): scenario_already_run(scenario) + def scenario_already_run(scenario): """Add the specified scenario to the list in completed_scenarios.txt. Return False if it wasn't there already.""" - with open('completed_scenarios.txt', 'a+') as f: + with open("completed_scenarios.txt", "a+") as f: # wait for exclusive access to the list (to avoid writing the same scenario twice in a race condition) flock(f) # file starts with pointer at end; move to start f.seek(0, 0) - if scenario + '\n' in f: + if scenario + "\n" in f: already_run = True else: already_run = False # append name to the list (will always go at end, because file was opened in 'a' mode) - f.write(scenario + '\n') + f.write(scenario + "\n") funlock(f) return already_run diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index 3ac7778e3..a97eae6b5 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -1,33 +1,59 @@ -"""Minimize excess renewable production (dissipated in transmission and battery -losses) and smooth out demand response and EV charging as much as possible.""" +"""Minimizes excess renewable production (dissipated in transmission and battery +losses) and smoothes out demand response and EV charging as much as possible. +Also avoids excess allocation of surplus reserves. + +Simple use: add this to modules.txt, below most modules but before reporting. + +Advanced use: add this to modules.txt and also to iterate.txt (should +automatically improve all reporting) +""" + from __future__ import print_function +from __future__ import division from pyomo.environ import * from pyomo.core.base.numvalue import native_numeric_types import switch_model.solve from switch_model.utilities import iteritems -def define_components(m): - if m.options.solver in ('cplex', 'cplexamp', 'gurobi', 'gurobi_ampl'): +# This uses define_dynamic_components instead of define_components, to ensure +# that whatever components it needs to access will already be constructed. This +# should be placed high in the module list so that the post-solve smoothing code +# will run before the post-solve reporting code in other modules. +def define_dynamic_components(m): + if m.options.solver in ("cplex", "cplexamp", "gurobi", "gurobi_ampl"): m.options.smooth_dispatch = True else: # glpk and cbc can't handle quadratic problem used for smoothing m.options.smooth_dispatch = False if m.options.verbose: - print("Not smoothing dispatch because {} cannot solve a quadratic model.".format(m.options.solver)) - print("Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message.") + print( + "Not smoothing dispatch because {} cannot solve a quadratic model.".format( + m.options.solver + ) + ) + print( + "Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message." + ) # add an alternative objective function that smoothes out time-shiftable energy sources and sinks if m.options.smooth_dispatch: # minimize the range of variation of various slack responses; # these should each have timepoint as their final index component + # They should also be in order from most-smoothed to least-smoothed components_to_smooth = [ - 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', 'ChargeEVs', - 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW', - 'DispatchGen', 'ChargeStorage', + "ShiftDemand", + "ChargeEVs", + "RunElectrolyzerMW", + "LiquifyHydrogenMW", + "DispatchFuelCellMW", + "ChargeBattery", + "ChargeStorage", + "DischargeBattery", + "DispatchGen", ] - def add_smoothing_entry(m, d, component, key): + def add_smoothing_entry(m, d, component, key, weight=1.0): """ Add an entry to the dictionary d of elements to smooth. The entry's key is based on component name and specified key, and its value is @@ -39,21 +65,24 @@ def add_smoothing_entry(m, d, component, key): tp = key[-1] prev_tp = m.TPS_IN_TS[m.tp_ts[tp]].prevw(tp) entry_key = str((component.name,) + key) - entry_val = component[key] - component[key[:-1]+(prev_tp,)] - d[entry_key] = entry_val + entry_val = component[key] - component[key[:-1] + (prev_tp,)] + d[entry_key] = weight * entry_val def rule(m): m.component_smoothing_dict = dict() """Find all components to be smoothed""" # smooth named components - for c in components_to_smooth: + for i, c in enumerate(components_to_smooth): + weight = 0.9 - 0.4 * (i / len(components_to_smooth)) try: comp = getattr(m, c) except AttributeError: continue - print("Will smooth {}.".format(c)) + print("Will smooth {} with weight {}.".format(c, weight)) for key in comp: - add_smoothing_entry(m, m.component_smoothing_dict, comp, key) + add_smoothing_entry( + m, m.component_smoothing_dict, comp, key, weight + ) # # smooth standard storage generators # if hasattr(m, 'STORAGE_GEN_TPS'): # print "Will smooth charging and discharging of standard storage." @@ -61,41 +90,68 @@ def rule(m): # comp = getattr(m, c) # for key in m.STORAGE_GEN_TPS: # add_smoothing_entry(m, m.component_smoothing_dict, comp, key) + m.make_component_smoothing_dict = BuildAction(rule=rule) # Force IncreaseSmoothedValue to equal any step-up in a smoothed value - m.ISV_INDEX = Set(initialize=lambda m: list(m.component_smoothing_dict.keys())) + m.ISV_INDEX = Set( + dimen=1, initialize=lambda m: list(m.component_smoothing_dict.keys()) + ) m.IncreaseSmoothedValue = Var(m.ISV_INDEX, within=NonNegativeReals) m.Calculate_IncreaseSmoothedValue = Constraint( m.ISV_INDEX, - rule=lambda m, k: m.IncreaseSmoothedValue[k] >= m.component_smoothing_dict[k] + rule=lambda m, k: m.IncreaseSmoothedValue[k] + >= m.component_smoothing_dict[k], ) def Smooth_Free_Variables_obj_rule(m): # minimize production (i.e., maximize curtailment / minimize losses) obj = sum( getattr(m, component)[z, t] - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - for component in m.Zone_Power_Injections) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + for component in m.Zone_Power_Injections + ) # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables # (e.g., via storage) and (b) give better representation of the amount of reserves actually available - if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + if hasattr(m, "Spinning_Reserve_Up_Provisions") and hasattr( + m, "GEN_SPINNING_RESERVE_TYPES" + ): # advanced module print("Will maximize provision of up reserves.") - reserve_weight = {'contingency': 0.9, 'regulation': 1.1} + reserve_weight = { + m.options.contingency_reserve_type: 0.9, + m.options.regulating_reserve_type: 1.1, + } for comp_name in m.Spinning_Reserve_Up_Provisions: component = getattr(m, comp_name) obj += -0.1 * sum( reserve_weight.get(rt, 1.0) * component[rt, ba, tp] for rt, ba, tp in component ) + # also minimize contingency up reserve requirements to avoid + # spuriously high contingency requirements (they can be + # any feasible value above the largest contingency) + if hasattr(m, "Spinning_Reserve_Up_Requirements") and hasattr( + m, "GEN_SPINNING_RESERVE_TYPES" + ): # advanced module + print("Will minimize requirement for contingency up reserves.") + for comp_name in m.Spinning_Reserve_Up_Requirements: + component = getattr(m, comp_name) + obj += sum( + component[rt, ba, tp] + for rt, ba, tp in component + if rt == m.options.contingency_reserve_type + ) # minimize absolute value of changes in the smoothed variables obj += sum(v for v in m.IncreaseSmoothedValue.values()) return obj - m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) + + m.Smooth_Free_Variables = Objective( + rule=Smooth_Free_Variables_obj_rule, sense=minimize + ) # constrain smoothing objective to find unbounded ray - m.Bound_Obj = Constraint(rule=lambda m: Smooth_Free_Variables_obj_rule(m) <= 1e9) + # m.Bound_Obj = Constraint(rule=lambda m: Smooth_Free_Variables_obj_rule(m) <= 1e12) # leave standard objective in effect for now m.Smooth_Free_Variables.deactivate() @@ -109,79 +165,92 @@ def pre_iterate(m): elif m.iteration_number == 1: pre_smooth_solve(m) else: - raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + raise RuntimeError( + "Reached unexpected iteration number {} in module {}.".format( + m.iteration_number, __name__ + ) + ) return None # no comment on convergence + def post_iterate(m): if hasattr(m, "ChargeBattery"): double_charge = [ - ( - z, t, - m.ChargeBattery[z, t].value, - m.DischargeBattery[z, t].value - ) - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - if m.ChargeBattery[z, t].value > 0 - and m.DischargeBattery[z, t].value > 0 + (z, t, m.ChargeBattery[z, t].value, m.DischargeBattery[z, t].value) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + if m.ChargeBattery[z, t].value > 0 and m.DischargeBattery[z, t].value > 0 ] if len(double_charge) > 0: print("") - print("WARNING: batteries are simultaneously charged and discharged in some hours.") + print( + "WARNING: batteries are simultaneously charged and discharged in some hours." + ) print("This is usually done to relax the biofuel limit.") for (z, t, c, d) in double_charge: - print('ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}'.format( - z=z, t=m.tp_timestamp[t], - c=c, d=d - )) + print( + "ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}".format( + z=z, t=m.tp_timestamp[t], c=c, d=d + ) + ) if m.options.smooth_dispatch: # setup model for next iteration if m.iteration_number == 0: - done = False # we'll have to run again to do the smoothing + done = False # we'll have to run again to do the smoothing elif m.iteration_number == 1: # finished smoothing the model post_smooth_solve(m) # now we're done done = True else: - raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + raise RuntimeError( + "Reached unexpected iteration number {} in module {}.".format( + m.iteration_number, __name__ + ) + ) else: # not smoothing the dispatch done = True return done + def post_solve(m, outputs_dir): - """ Smooth dispatch if it wasn't already done during an iterative solution. """ - if m.options.smooth_dispatch and not getattr(m, 'iterated_smooth_dispatch', False): + """Smooth dispatch if it wasn't already done during an iterative solution.""" + if m.options.smooth_dispatch and not getattr(m, "iterated_smooth_dispatch", False): pre_smooth_solve(m) # re-solve and load results m.preprocess() solve(m) post_smooth_solve(m) + def pre_smooth_solve(m): - """ store model state and prepare for smoothing """ + """store model state and prepare for smoothing""" save_duals(m) fix_obj_expression(m.Minimize_System_Cost) m.Minimize_System_Cost.deactivate() m.Smooth_Free_Variables.activate() print("smoothing free variables...") + def solve(m): try: switch_model.solve.solve(m) except RuntimeError as e: - if e.message.lower() == 'infeasible model': + if str(e).lower() == "infeasible model": # show a warning, but don't abort the overall post_solve process - print('WARNING: model became infeasible when smoothing; reverting to original solution.') + print( + "WARNING: model became infeasible when smoothing; reverting to original solution." + ) else: raise + def post_smooth_solve(m): - """ restore original model state """ + """restore original model state""" # restore the standard objective m.Smooth_Free_Variables.deactivate() m.Minimize_System_Cost.activate() @@ -192,47 +261,51 @@ def post_smooth_solve(m): def save_duals(m): - if hasattr(m, 'dual'): + if hasattr(m, "dual"): m.old_dual_dict = m.dual._dict.copy() - if hasattr(m, 'rc'): + if hasattr(m, "rc"): m.old_rc_dict = m.rc._dict.copy() + def restore_duals(m): - if hasattr(m, 'dual'): + if hasattr(m, "dual"): m.dual._dict = m.old_dual_dict - if hasattr(m, 'rc'): + if hasattr(m, "rc"): m.rc._dict = m.old_rc_dict + def fix_obj_expression(e, status=True): """Recursively fix all variables included in an objective expression.""" # note: this contains code to work with various versions of Pyomo, # e.g., _potentially_variable in 5.1, is_potentially_variable in 5.6 - if hasattr(e, 'fixed'): - e.fixed = status # see p. 171 of the Pyomo book - elif hasattr(e, '_numerator'): + if hasattr(e, "fixed"): + e.fixed = status # see p. 171 of the Pyomo book + elif hasattr(e, "_numerator"): for e2 in e._numerator: fix_obj_expression(e2, status) for e2 in e._denominator: fix_obj_expression(e2, status) - elif hasattr(e, 'args'): # SumExpression; can't actually see where this is defined in Pyomo though + elif hasattr( + e, "args" + ): # SumExpression; can't actually see where this is defined in Pyomo though for e2 in e.args: fix_obj_expression(e2, status) - elif hasattr(e, '_args'): # switched to 'args' and/or '_args_' in Pyomo 5 + elif hasattr(e, "_args"): # switched to 'args' and/or '_args_' in Pyomo 5 for e2 in e._args: fix_obj_expression(e2, status) - elif hasattr(e, 'expr'): + elif hasattr(e, "expr"): fix_obj_expression(e.expr, status) # below here are parameters or constants, no need to fix - elif hasattr(e, 'is_potentially_variable') and not e.is_potentially_variable(): + elif hasattr(e, "is_potentially_variable") and not e.is_potentially_variable(): pass - elif hasattr(e, '_potentially_variable') and not e._potentially_variable(): + elif hasattr(e, "_potentially_variable") and not e._potentially_variable(): pass - elif hasattr(e, 'is_constant') and e.is_constant(): + elif hasattr(e, "is_constant") and e.is_constant(): pass elif type(e) in native_numeric_types: pass else: raise ValueError( - 'Expression {} does not have an expr, fixed or args property, ' - 'so it cannot be fixed.'.format(e) + "Expression {} does not have an expr, fixed or args property, " + "so it cannot be fixed.".format(e) ) diff --git a/switch_model/hawaii/smooth_dispatch_quadratic.py b/switch_model/hawaii/smooth_dispatch_quadratic.py index b855c028c..a745afbfd 100644 --- a/switch_model/hawaii/smooth_dispatch_quadratic.py +++ b/switch_model/hawaii/smooth_dispatch_quadratic.py @@ -5,62 +5,86 @@ from pyomo.environ import * import switch_model.solve + def define_components(m): - if m.options.solver in ('cplex', 'cplexamp', 'gurobi', 'gurobi_ampl'): + if m.options.solver in ("cplex", "cplexamp", "gurobi", "gurobi_ampl"): m.options.smooth_dispatch = True else: # glpk and cbc can't handle quadratic problem used for smoothing m.options.smooth_dispatch = False if m.options.verbose: - print("Not smoothing dispatch because {} cannot solve a quadratic model.".format(m.options.solver)) - print("Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message.") + print( + "Not smoothing dispatch because {} cannot solve a quadratic model.".format( + m.options.solver + ) + ) + print( + "Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message." + ) # add an alternative objective function that smoothes out time-shiftable energy sources and sinks if m.options.smooth_dispatch: - if hasattr(m, 'ChargeEVs') and isinstance(m.ChargeEVs, Expression): + if hasattr(m, "ChargeEVs") and isinstance(m.ChargeEVs, Expression): # Create a variable bound to the ChargeEVs expression # that can be squared in the objective function without creating # a non-positive-definite problem. m.ChargeEVsVar = Var(m.ChargeEVs.index_set()) m.ChargeEVsVar_fix = Constraint( m.ChargeEVs.index_set(), - rule=lambda m, *key: m.ChargeEVsVar[key] == m.ChargeEVs[key] + rule=lambda m, *key: m.ChargeEVsVar[key] == m.ChargeEVs[key], ) def Smooth_Free_Variables_obj_rule(m): # minimize production (i.e., maximize curtailment / minimize losses) obj = sum( getattr(m, component)[z, t] - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - for component in m.Zone_Power_Injections) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + for component in m.Zone_Power_Injections + ) # minimize the variability of various slack responses components_to_smooth = [ - 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', - 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW', + "ShiftDemand", + "ChargeBattery", + "DischargeBattery", + "RunElectrolyzerMW", + "LiquifyHydrogenMW", + "DispatchFuelCellMW", ] - if hasattr(m, 'ChargeEVsVar'): - components_to_smooth.append('ChargeEVsVar') + if hasattr(m, "ChargeEVsVar"): + components_to_smooth.append("ChargeEVsVar") else: - components_to_smooth.append('ChargeEVs') + components_to_smooth.append("ChargeEVs") for var in components_to_smooth: if hasattr(m, var): if m.options.verbose: print("Will smooth {}.".format(var)) comp = getattr(m, var) - obj += sum(comp[z, t]*comp[z, t] for z in m.LOAD_ZONES for t in m.TIMEPOINTS) + obj += sum( + comp[z, t] * comp[z, t] + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + ) # include standard storage generators too - if hasattr(m, 'STORAGE_GEN_TPS'): + if hasattr(m, "STORAGE_GEN_TPS"): print("Will smooth charging and discharging of standard storage.") - obj += sum(m.ChargeStorage[g, tp]*m.ChargeStorage[g, tp] for g, tp in m.STORAGE_GEN_TPS) - obj += sum(m.DispatchGen[g, tp]*m.DispatchGen[g, tp] for g, tp in m.STORAGE_GEN_TPS) + obj += sum( + m.ChargeStorage[g, tp] * m.ChargeStorage[g, tp] + for g, tp in m.STORAGE_GEN_TPS + ) + obj += sum( + m.DispatchGen[g, tp] * m.DispatchGen[g, tp] + for g, tp in m.STORAGE_GEN_TPS + ) # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables # (e.g., via storage) and (b) give better representation of the amount of reserves actually available - if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + if hasattr(m, "Spinning_Reserve_Up_Provisions") and hasattr( + m, "GEN_SPINNING_RESERVE_TYPES" + ): # advanced module print("Will maximize provision of up reserves.") - reserve_weight = {'contingency': 0.9, 'regulation': 1.1} + reserve_weight = {"contingency": 0.9, "regulation": 1.1} for comp_name in m.Spinning_Reserve_Up_Provisions: component = getattr(m, comp_name) obj += -0.1 * sum( @@ -68,7 +92,10 @@ def Smooth_Free_Variables_obj_rule(m): for rt, ba, tp in component ) return obj - m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) + + m.Smooth_Free_Variables = Objective( + rule=Smooth_Free_Variables_obj_rule, sense=minimize + ) # leave standard objective in effect for now m.Smooth_Free_Variables.deactivate() @@ -80,6 +107,7 @@ def Smooth_Free_Variables_obj_rule(m): # m.Minimize_System_Cost.activate() # m.Fix_Obj = BuildAction(rule=Fix_Obj_rule) + def pre_iterate(m): if m.options.smooth_dispatch: if m.iteration_number == 0: @@ -88,69 +116,79 @@ def pre_iterate(m): elif m.iteration_number == 1: pre_smooth_solve(m) else: - raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + raise RuntimeError( + "Reached unexpected iteration number {} in module {}.".format( + m.iteration_number, __name__ + ) + ) return None # no comment on convergence + def post_iterate(m): if hasattr(m, "ChargeBattery"): double_charge = [ - ( - z, t, - m.ChargeBattery[z, t].value, - m.DischargeBattery[z, t].value - ) - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - if m.ChargeBattery[z, t].value > 0 - and m.DischargeBattery[z, t].value > 0 + (z, t, m.ChargeBattery[z, t].value, m.DischargeBattery[z, t].value) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + if m.ChargeBattery[z, t].value > 0 and m.DischargeBattery[z, t].value > 0 ] if len(double_charge) > 0: print("") - print("WARNING: batteries are simultaneously charged and discharged in some hours.") + print( + "WARNING: batteries are simultaneously charged and discharged in some hours." + ) print("This is usually done to relax the biofuel limit.") for (z, t, c, d) in double_charge: - print('ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}'.format( - z=z, t=m.tp_timestamp[t], - c=c, d=d - )) + print( + "ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}".format( + z=z, t=m.tp_timestamp[t], c=c, d=d + ) + ) if m.options.smooth_dispatch: # setup model for next iteration if m.iteration_number == 0: - done = False # we'll have to run again to do the smoothing + done = False # we'll have to run again to do the smoothing elif m.iteration_number == 1: # finished smoothing the model post_smooth_solve(m) # now we're done done = True else: - raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + raise RuntimeError( + "Reached unexpected iteration number {} in module {}.".format( + m.iteration_number, __name__ + ) + ) else: # not smoothing the dispatch done = True return done + def post_solve(m, outputs_dir): - """ Smooth dispatch if it wasn't already done during an iterative solution. """ - if m.options.smooth_dispatch and not getattr(m, 'iterated_smooth_dispatch', False): + """Smooth dispatch if it wasn't already done during an iterative solution.""" + if m.options.smooth_dispatch and not getattr(m, "iterated_smooth_dispatch", False): pre_smooth_solve(m) # re-solve and load results m.preprocess() switch_model.solve.solve(m) post_smooth_solve(m) + def pre_smooth_solve(m): - """ store model state and prepare for smoothing """ + """store model state and prepare for smoothing""" save_duals(m) fix_obj_expression(m.Minimize_System_Cost) m.Minimize_System_Cost.deactivate() m.Smooth_Free_Variables.activate() print("smoothing free variables...") + def post_smooth_solve(m): - """ restore original model state """ + """restore original model state""" # restore the standard objective m.Smooth_Free_Variables.deactivate() m.Minimize_System_Cost.activate() @@ -161,36 +199,38 @@ def post_smooth_solve(m): def save_duals(m): - if hasattr(m, 'dual'): + if hasattr(m, "dual"): m.old_dual_dict = m.dual._dict.copy() - if hasattr(m, 'rc'): + if hasattr(m, "rc"): m.old_rc_dict = m.rc._dict.copy() + def restore_duals(m): - if hasattr(m, 'dual'): + if hasattr(m, "dual"): m.dual._dict = m.old_dual_dict - if hasattr(m, 'rc'): + if hasattr(m, "rc"): m.rc._dict = m.old_rc_dict + def fix_obj_expression(e, status=True): """Recursively fix all variables included in an objective expression.""" - if hasattr(e, 'fixed'): - e.fixed = status # see p. 171 of the Pyomo book - elif hasattr(e, '_numerator'): + if hasattr(e, "fixed"): + e.fixed = status # see p. 171 of the Pyomo book + elif hasattr(e, "_numerator"): for e2 in e._numerator: fix_obj_expression(e2, status) for e2 in e._denominator: fix_obj_expression(e2, status) - elif hasattr(e, '_args'): + elif hasattr(e, "_args"): for e2 in e._args: fix_obj_expression(e2, status) - elif hasattr(e, 'expr'): + elif hasattr(e, "expr"): fix_obj_expression(e.expr, status) - elif hasattr(e, 'is_constant'): + elif hasattr(e, "is_constant"): # parameter; we don't actually care if it's mutable or not pass else: raise ValueError( - 'Expression {e} does not have an exg, fixed or _args property, ' + - 'so it cannot be fixed.'.format(e=e) + "Expression {e} does not have an exg, fixed or _args property, " + + "so it cannot be fixed.".format(e=e) ) diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index 88f5a84e5..d0a85aa1f 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -1,8 +1,10 @@ from pyomo.environ import * + def define_components(m): """Make various changes to the model to support hawaii-specific modules.""" + # # TODO: combine the following changes into a pull request for Pyomo # # patch Pyomo's table-reading function to allow .csv files with headers but no data # import os, re diff --git a/switch_model/hawaii/unserved_load.py b/switch_model/hawaii/unserved_load.py index 786aa892a..d2cb8409c 100644 --- a/switch_model/hawaii/unserved_load.py +++ b/switch_model/hawaii/unserved_load.py @@ -4,9 +4,15 @@ spurious reports of infeasibility.""" from pyomo.environ import * + def define_arguments(argparser): - argparser.add_argument("--unserved-load-penalty", type=float, default=None, - help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).") + argparser.add_argument( + "--unserved-load-penalty", + type=float, + default=None, + help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).", + ) + def define_components(m): # create an unserved load variable with a high penalty cost, @@ -15,33 +21,40 @@ def define_components(m): # cost per MWh for unserved load (high) if m.options.unserved_load_penalty is not None: # always use penalty factor supplied on the command line, if any - m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty) + m.unserved_load_penalty_per_mwh = Param( + within=NonNegativeReals, initialize=m.options.unserved_load_penalty + ) else: # no penalty on the command line, use whatever is in the parameter files, or 10000 - m.unserved_load_penalty_per_mwh = Param(default=10000) + m.unserved_load_penalty_per_mwh = Param(within=NonNegativeReals, default=10000) # amount of unserved load during each timepoint m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved load - m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.tp_duration_hrs[tp] - * sum(m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) + m.UnservedLoadPenalty = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: m.tp_duration_hrs[tp] + * sum( + m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh + for z in m.LOAD_ZONES + ), ) # add the unserved load to the model's energy balance - m.Zone_Power_Injections.append('UnservedLoad') + m.Zone_Power_Injections.append("UnservedLoad") # add the unserved load penalty to the model's objective function - m.Cost_Components_Per_TP.append('UnservedLoadPenalty') + m.Cost_Components_Per_TP.append("UnservedLoadPenalty") # amount of unserved reserves during each timepoint m.UnservedUpReserves = Var(m.TIMEPOINTS, within=NonNegativeReals) m.UnservedDownReserves = Var(m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved reserves (90% as high as cost of unserved load, # to make the model prefer to serve load when possible) - m.UnservedReservePenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.tp_duration_hrs[tp] + m.UnservedReservePenalty = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: m.tp_duration_hrs[tp] * 0.9 * m.unserved_load_penalty_per_mwh - * (m.UnservedUpReserves[tp] + m.UnservedDownReserves[tp]) + * (m.UnservedUpReserves[tp] + m.UnservedDownReserves[tp]), ) # add the unserved load penalty to the model's objective function - m.Cost_Components_Per_TP.append('UnservedReservePenalty') + m.Cost_Components_Per_TP.append("UnservedReservePenalty") diff --git a/switch_model/hawaii/util.py b/switch_model/hawaii/util.py index 7ba759132..aaa52148f 100644 --- a/switch_model/hawaii/util.py +++ b/switch_model/hawaii/util.py @@ -6,26 +6,31 @@ # check whether this is an interactive session # (if not, there will be no __main__.__file__) -interactive_session = not hasattr(main, '__file__') +interactive_session = not hasattr(main, "__file__") -csv.register_dialect("switch-csv", +csv.register_dialect( + "switch-csv", delimiter=",", lineterminator="\n", - doublequote=False, escapechar="\\", - quotechar='"', quoting=csv.QUOTE_MINIMAL, - skipinitialspace = False + doublequote=False, + escapechar="\\", + quotechar='"', + quoting=csv.QUOTE_MINIMAL, + skipinitialspace=False, ) + def create_table(**kwargs): """Create an empty output table and write the headings.""" output_file = kwargs["output_file"] headings = kwargs["headings"] - with open(output_file, 'w') as f: + with open(output_file, "w") as f: w = csv.writer(f, dialect="switch-csv") # write header row w.writerow(list(headings)) + def append_table(model, *indexes, **kwargs): """Add rows to an output table, iterating over the indexes specified, and getting row data from the values function specified.""" @@ -35,22 +40,22 @@ def append_table(model, *indexes, **kwargs): # create a master indexing set # this is a list of lists, even if only one list was specified idx = itertools.product(*indexes) - with open(output_file, 'a') as f: + with open(output_file, "a") as f: w = csv.writer(f, dialect="switch-csv") # write the data # import pdb # if 'rfm' in output_file: # pdb.set_trace() w.writerows( - tuple(value(v) for v in values(model, *unpack_elements(x))) - for x in idx + tuple(value(v) for v in values(model, *unpack_elements(x))) for x in idx ) + def unpack_elements(tup): """Unpack any multi-element objects within tup, to make a single flat tuple. Note: this is not recursive. This is used to flatten the product of a multi-dimensional index with anything else.""" - l=[] + l = [] for t in tup: if isinstance(t, string_types): l.append(t) @@ -64,29 +69,34 @@ def unpack_elements(tup): l.append(t) return tuple(l) + def write_table(model, *indexes, **kwargs): """Write an output table in one shot - headers and body.""" output_file = kwargs["output_file"] - print("Writing {file} ...".format(file=output_file), end=' ') + print("Writing {file} ...".format(file=output_file), end=" ") sys.stdout.flush() # display the part line to the user - start=time.time() + start = time.time() create_table(**kwargs) append_table(model, *indexes, **kwargs) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) + print("time taken: {dur:.2f}s".format(dur=time.time() - start)) + def get(component, index, default=None): """Return an element from an indexed component, or the default value if the index is invalid.""" return component[index] if index in component else default + def log(msg): sys.stdout.write(msg) sys.stdout.flush() # display output to the user, even a partial line + def tic(): tic.start_time = time.time() + def toc(): - log("time taken: {dur:.2f}s\n".format(dur=time.time()-tic.start_time)) + log("time taken: {dur:.2f}s\n".format(dur=time.time() - tic.start_time)) diff --git a/switch_model/main.py b/switch_model/main.py index fbd3daa48..1d987a163 100644 --- a/switch_model/main.py +++ b/switch_model/main.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """Script to handle switch calls from the command line.""" @@ -9,6 +9,7 @@ # print "running {} as {}.".format(__file__, __name__) + def main(): cmds = ["solve", "solve-scenarios", "test", "upgrade", "--version"] if len(sys.argv) >= 2 and sys.argv[1] in cmds: @@ -38,8 +39,13 @@ def main(): from switch_model.upgrade import main main() else: - print("Usage: {} {{{}}} ...".format(os.path.basename(sys.argv[0]), ", ".join(cmds))) + print( + "Usage: {} {{{}}} ...".format( + os.path.basename(sys.argv[0]), ", ".join(cmds) + ) + ) print("Use one of these commands with --help for more information.") + if __name__ == "__main__": main() diff --git a/switch_model/policies/carbon_policies.py b/switch_model/policies/carbon_policies.py index 8820d526e..f75493037 100644 --- a/switch_model/policies/carbon_policies.py +++ b/switch_model/policies/carbon_policies.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ Add emission policies to the model, either in the form of an added cost, or of @@ -19,29 +19,44 @@ """ from __future__ import division import os -from pyomo.environ import Set, Param, Expression, Constraint, Suffix +from pyomo.environ import Set, Param, Expression, Constraint, Suffix, NonNegativeReals import switch_model.reporting as reporting + def define_components(model): - model.carbon_cap_tco2_per_yr = Param(model.PERIODS, default=float('inf'), doc=( - "Emissions from this model must be less than this cap. " - "This is specified in metric tonnes of CO2 per year.")) - model.Enforce_Carbon_Cap = Constraint(model.PERIODS, - rule=lambda m, p: - Constraint.Skip if m.carbon_cap_tco2_per_yr[p] == float('inf') - else m.AnnualEmissions[p] <= m.carbon_cap_tco2_per_yr[p], - doc=("Enforces the carbon cap for generation-related emissions.")) + model.carbon_cap_tco2_per_yr = Param( + model.PERIODS, + within=NonNegativeReals, + default=float("inf"), + doc=( + "Emissions from this model must be less than this cap. " + "This is specified in metric tonnes of CO2 per year." + ), + ) + model.Enforce_Carbon_Cap = Constraint( + model.PERIODS, + rule=lambda m, p: Constraint.Skip + if m.carbon_cap_tco2_per_yr[p] == float("inf") + else m.AnnualEmissions[p] <= m.carbon_cap_tco2_per_yr[p], + doc=("Enforces the carbon cap for generation-related emissions."), + ) # Make sure the model has a dual suffix for determining implicit carbon costs if not hasattr(model, "dual"): model.dual = Suffix(direction=Suffix.IMPORT) - model.carbon_cost_dollar_per_tco2 = Param(model.PERIODS, default=0.0, - doc="The cost adder applied to emissions, in future dollars per metric tonne of CO2.") - model.EmissionsCosts = Expression(model.PERIODS, - rule=lambda model, period: \ - model.AnnualEmissions[period] * model.carbon_cost_dollar_per_tco2[period], - doc=("Enforces the carbon cap for generation-related emissions.")) - model.Cost_Components_Per_Period.append('EmissionsCosts') + model.carbon_cost_dollar_per_tco2 = Param( + model.PERIODS, + within=NonNegativeReals, + default=0.0, + doc="The cost adder applied to emissions, in future dollars per metric tonne of CO2.", + ) + model.EmissionsCosts = Expression( + model.PERIODS, + rule=lambda model, period: model.AnnualEmissions[period] + * model.carbon_cost_dollar_per_tco2[period], + doc=("Enforces the carbon cap for generation-related emissions."), + ) + model.Cost_Components_Per_Period.append("EmissionsCosts") def load_inputs(model, switch_data, inputs_dir): @@ -56,11 +71,14 @@ def load_inputs(model, switch_data, inputs_dir): """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'carbon_policies.csv'), + filename=os.path.join(inputs_dir, "carbon_policies.csv"), optional=True, - optional_params=(model.carbon_cap_tco2_per_yr, model.carbon_cost_dollar_per_tco2), - auto_select=True, - param=(model.carbon_cap_tco2_per_yr, model.carbon_cost_dollar_per_tco2)) + optional_params=( + model.carbon_cap_tco2_per_yr, + model.carbon_cost_dollar_per_tco2, + ), + param=(model.carbon_cap_tco2_per_yr, model.carbon_cost_dollar_per_tco2), + ) def post_solve(model, outdir): @@ -74,25 +92,47 @@ def post_solve(model, outdir): discrete unit commitment, or other integer decision variables, the dual values will not be exported. """ + def get_row(model, period): - row = [period, model.AnnualEmissions[period], - model.carbon_cap_tco2_per_yr[period]] - # Only print the carbon cap dual value if it exists and if the problem - # is purely linear. - if not model.has_discrete_variables() and model.Enforce_Carbon_Cap[period] in model.dual: - row.append(model.dual[model.Enforce_Carbon_Cap[period]] / - model.bring_annual_costs_to_base_year[period]) + row = [ + period, + model.AnnualEmissions[period], + model.carbon_cap_tco2_per_yr[period], + ] + # Only print the carbon cap dual value if it exists + # Note: we previously only reported it if the model was also strictly + # continuous, but now we let the user worry about that (some solvers + # can report duals for integer models by fixing the variables to their + # integer values, which is often a reasonable approach and should give + # meaningful duals for the carbon cost, which occurs on a much higher + # level). + if ( + period in model.Enforce_Carbon_Cap + and model.Enforce_Carbon_Cap[period] in model.dual + ): + row.append( + model.dual[model.Enforce_Carbon_Cap[period]] + / model.bring_annual_costs_to_base_year[period] + ) else: - row.append('.') + row.append(".") row.append(model.carbon_cost_dollar_per_tco2[period]) - row.append(model.carbon_cost_dollar_per_tco2[period] * \ - model.AnnualEmissions[period]) + row.append( + model.carbon_cost_dollar_per_tco2[period] * model.AnnualEmissions[period] + ) return row reporting.write_table( - model, model.PERIODS, + model, + model.PERIODS, output_file=os.path.join(outdir, "emissions.csv"), - headings=("PERIOD", "AnnualEmissions_tCO2_per_yr", - "carbon_cap_tco2_per_yr", "carbon_cap_dual_future_dollar_per_tco2", - "carbon_cost_dollar_per_tco2", "carbon_cost_annual_total"), - values=get_row) + headings=( + "PERIOD", + "AnnualEmissions_tCO2_per_yr", + "carbon_cap_tco2_per_yr", + "carbon_cap_dual_future_dollar_per_tco2", + "carbon_cost_dollar_per_tco2", + "carbon_cost_annual_total", + ), + values=get_row, + ) diff --git a/switch_model/policies/rps_simple.py b/switch_model/policies/rps_simple.py index 78a6d0a3e..44f5e74ae 100644 --- a/switch_model/policies/rps_simple.py +++ b/switch_model/policies/rps_simple.py @@ -1,5 +1,6 @@ from __future__ import division -# Copyright 2017 The Switch Authors. All rights reserved. + +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. import os @@ -23,6 +24,7 @@ """ + def define_components(mod): """ @@ -62,54 +64,60 @@ def define_components(mod): """ - mod.f_rps_eligible = Param( - mod.FUELS, - within=Boolean, - default=False) + mod.f_rps_eligible = Param(mod.FUELS, within=Boolean, default=False) mod.RPS_ENERGY_SOURCES = Set( - initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) | \ - set(f for f in m.FUELS if m.f_rps_eligible[f])) + dimen=1, + initialize=lambda m: list(m.NON_FUEL_ENERGY_SOURCES) + + [f for f in m.FUELS if m.f_rps_eligible[f]], + ) - mod.RPS_PERIODS = Set( - validate=lambda m, p: p in m.PERIODS) - mod.rps_target = Param( - mod.RPS_PERIODS, - within=PercentFraction) + mod.RPS_PERIODS = Set(dimen=1, validate=lambda m, p: p in m.PERIODS) + mod.rps_target = Param(mod.RPS_PERIODS, within=PercentFraction) mod.RPSFuelEnergy = Expression( mod.RPS_PERIODS, rule=lambda m, p: sum( - m.tp_weight[t] * - sum( + m.tp_weight[t] + * sum( m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] - ) / m.gen_full_load_heat_rate[g] + ) + / m.gen_full_load_heat_rate[g] for g in m.FUEL_BASED_GENS - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p]) - ) + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.RPSNonFuelEnergy = Expression( mod.RPS_PERIODS, - rule=lambda m, p: sum(m.DispatchGen[g, t] * m.tp_weight[t] + rule=lambda m, p: sum( + m.DispatchGen[g, t] * m.tp_weight[t] for g in m.NON_FUEL_BASED_GENS - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p])) + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.RPS_Enforce_Target = Constraint( mod.RPS_PERIODS, - rule=lambda m, p: (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p] >= - m.rps_target[p] * total_demand_in_period(m, p))) + rule=lambda m, p: ( + m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p] + >= m.rps_target[p] * total_demand_in_period(m, p) + ), + ) def total_generation_in_period(model, period): return sum( model.DispatchGen[g, t] * model.tp_weight[t] for g in model.GENERATION_PROJECTS - for t in model.TPS_FOR_GEN_IN_PERIOD[g, period]) + for t in model.TPS_FOR_GEN_IN_PERIOD[g, period] + ) def total_demand_in_period(model, period): - return sum(model.zone_total_demand_in_period_mwh[zone, period] - for zone in model.LOAD_ZONES) + return sum( + model.zone_total_demand_in_period_mwh[zone, period] for zone in model.LOAD_ZONES + ) def load_inputs(mod, switch_data, inputs_dir): @@ -130,15 +138,16 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'fuels.csv'), - select=('fuel','f_rps_eligible'), - optional_params=['f_rps_eligible'], - param=(mod.f_rps_eligible,)) + filename=os.path.join(inputs_dir, "fuels.csv"), + select=("fuel", "f_rps_eligible"), + optional_params=["f_rps_eligible"], + param=(mod.f_rps_eligible,), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'rps_targets.csv'), - autoselect=True, + filename=os.path.join(inputs_dir, "rps_targets.csv"), index=mod.RPS_PERIODS, - param=(mod.rps_target,)) + param=(mod.rps_target,), + ) def post_solve(instance, outdir): @@ -148,21 +157,34 @@ def post_solve(instance, outdir): """ import switch_model.reporting as reporting + def get_row(m, p): row = (p,) row += (m.RPSFuelEnergy[p] / 1000,) row += (m.RPSNonFuelEnergy[p] / 1000,) - row += (total_generation_in_period(m,p) / 1000,) - row += ((m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / - total_generation_in_period(m,p),) + row += (total_generation_in_period(m, p) / 1000,) + row += ( + (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) + / total_generation_in_period(m, p), + ) row += (total_demand_in_period(m, p),) - row += ((m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / - total_demand_in_period(m, p),) + row += ( + (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / total_demand_in_period(m, p), + ) return row + reporting.write_table( - instance, instance.RPS_PERIODS, + instance, + instance.RPS_PERIODS, output_file=os.path.join(outdir, "rps_energy.csv"), - headings=("PERIOD", "RPSFuelEnergyGWh", "RPSNonFuelEnergyGWh", - "TotalGenerationInPeriodGWh", "RPSGenFraction", - "TotalSalesInPeriodGWh", "RPSSalesFraction"), - values=get_row) + headings=( + "PERIOD", + "RPSFuelEnergyGWh", + "RPSNonFuelEnergyGWh", + "TotalGenerationInPeriodGWh", + "RPSGenFraction", + "TotalSalesInPeriodGWh", + "RPSSalesFraction", + ), + values=get_row, + ) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index f9017ae6a..df174b444 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -19,13 +19,15 @@ """ from __future__ import print_function -from switch_model.utilities import string_types -dependencies = 'switch_model.financials' +from switch_model.utilities import string_types, UnknownSetDimen + +dependencies = "switch_model.financials" import os import csv import itertools + try: # Python 2 import cPickle as pickle @@ -38,22 +40,33 @@ "switch-csv", delimiter=",", lineterminator="\n", - doublequote=False, escapechar="\\", - quotechar='"', quoting=csv.QUOTE_MINIMAL, - skipinitialspace=False + doublequote=False, + escapechar="\\", + quotechar='"', + quoting=csv.QUOTE_MINIMAL, + skipinitialspace=False, ) + def define_arguments(argparser): argparser.add_argument( - "--sorted-output", default=False, action='store_true', - dest='sorted_output', - help='Write generic variable result values in sorted order') + "--skip-generic-output", + default=False, + action="store_true", + dest="skip_generic_output", + help="Skip exporting generic variable results", + ) argparser.add_argument( - "--save-expressions", "--save-expression", dest="save_expressions", nargs='+', - default=[], action='extend', - help="List of expressions to save in addition to variables; can also be 'all' or 'none'." + "--save-expressions", + "--save-expression", + dest="save_expressions", + nargs="+", + default=[], + action="extend", + help="List of expressions to save in addition to variables; can also be 'all' or 'none'.", ) + def write_table(instance, *indexes, **kwargs): # there must be a way to accept specific named keyword arguments and # also an open-ended list of positional arguments (*indexes), but I @@ -61,9 +74,9 @@ def write_table(instance, *indexes, **kwargs): output_file = kwargs["output_file"] headings = kwargs["headings"] values = kwargs["values"] - digits = kwargs.get('digits', 6) + digits = kwargs.get("digits", 6) - with open(output_file, 'w') as f: + with open(output_file, "w") as f: w = csv.writer(f, dialect="switch-csv") # write header row w.writerow(list(headings)) @@ -79,36 +92,46 @@ def format_row(row): row[i] = sig_digits.format(v) return tuple(row) + idx = list(itertools.product(*indexes)) + if instance.options.sorted_output: + idx.sort() + try: w.writerows( - format_row(row=values(instance, *unpack_elements(x))) - for x in itertools.product(*indexes) + format_row(row=values(instance, *unpack_elements(x))) for x in idx ) - except TypeError: # lambda got wrong number of arguments + except TypeError: # lambda got wrong number of arguments # use old code, which doesn't unpack the indices w.writerows( # TODO: flatten x (unpack tuples) like Pyomo before calling values() # That may cause problems elsewhere though... format_row(row=values(instance, *x)) - for x in itertools.product(*indexes) + for x in idx + ) + print( + "DEPRECATION WARNING: switch_model.reporting.write_table() was called with a function" + ) + print( + "that expects multidimensional index values to be stored in tuples, but Switch now unpacks" + ) + print( + "these tuples automatically. Please update your code to work with unpacked index values." ) - print("DEPRECATION WARNING: switch_model.reporting.write_table() was called with a function") - print("that expects multidimensional index values to be stored in tuples, but Switch now unpacks") - print("these tuples automatically. Please update your code to work with unpacked index values.") print("Problem occured with {}.".format(values.__code__)) + def unpack_elements(items): """Unpack any multi-element objects within items, to make a single flat list. Note: this is not recursive. This is used to flatten the product of a multi-dimensional index with anything else.""" - l=[] + l = [] for x in items: if isinstance(x, string_types): l.append(x) else: try: l.extend(x) - except TypeError: # x isn't iterable + except TypeError: # x isn't iterable l.append(x) return l @@ -117,7 +140,8 @@ def post_solve(instance, outdir): """ Minimum output generation for all model runs. """ - save_generic_results(instance, outdir, instance.options.sorted_output) + if not instance.options.skip_generic_output: + save_generic_results(instance, outdir, instance.options.sorted_output) save_total_cost_value(instance, outdir) save_cost_components(instance, outdir) @@ -125,31 +149,44 @@ def post_solve(instance, outdir): def save_generic_results(instance, outdir, sorted_output): components = list(instance.component_objects(Var)) # add Expression objects that should be saved, if any - if 'none' in instance.options.save_expressions: + if "none" in instance.options.save_expressions: # drop everything up till the last 'none' (users may have added more after that) - last_none = ( - len(instance.options.save_expressions) - - instance.options.save_expressions[::-1].index('none') - ) - instance.options.save_expressions = instance.options.save_expressions[last_none:] + last_none = len( + instance.options.save_expressions + ) - instance.options.save_expressions[::-1].index("none") + instance.options.save_expressions = instance.options.save_expressions[ + last_none: + ] - if 'all' in instance.options.save_expressions: + if "all" in instance.options.save_expressions: components += list(instance.component_objects(Expression)) else: components += [getattr(instance, c) for c in instance.options.save_expressions] + missing_val_list = [] for var in components: - output_file = os.path.join(outdir, '%s.csv' % var.name) - with open(output_file, 'w') as fh: - writer = csv.writer(fh, dialect='switch-csv') + output_file = os.path.join(outdir, "%s.csv" % var.name) + with open(output_file, "w") as fh: + writer = csv.writer(fh, dialect="switch-csv") if var.is_indexed(): index_name = var.index_set().name + index_dimen = var.index_set().dimen + if index_dimen is UnknownSetDimen: + # Need to specify dimen even if it's 1 in Pyomo 5.7+. We + # could potentially use + # pyomo.dataportal.process_data._guess_set_dimen() but it is + # undocumented and not needed if all the sets have dimen + # specified, which they do now. + raise ValueError( + f"Set {index_name} has unknown dimen; unable to infer " + f"number of index columns to write to {var.name}.csv." + ) # Write column headings - writer.writerow(['%s_%d' % (index_name, i + 1) - for i in range(var.index_set().dimen)] + - [var.name]) - # Results are saved in a random order by default for - # increased speed. Sorting is available if wanted. + writer.writerow( + [f"{index_name}_{i+1}" for i in range(index_dimen)] + [var.name] + ) + # Results are saved in the order of the index set by default. + # Lexicographic sorting is available if wanted. items = sorted(var.items()) if sorted_output else list(var.items()) for key, obj in items: writer.writerow(tuple(make_iterable(key)) + (get_value(obj),)) @@ -157,43 +194,57 @@ def save_generic_results(instance, outdir, sorted_output): # single-valued variable writer.writerow([var.name]) writer.writerow([get_value(obj)]) + if missing_val_list: + msg = ( + "WARNING: {} {}. This " + "usually indicates a coding error: either the variable is " + "not needed or it has accidentally been omitted from all " + "constraints and the objective function. These variables include " + "{}.".format( + len(missing_val_list), + ( + "variable has not been assigned a value" + if len(missing_val_list) == 1 + else "variables have not been assigned values" + ), + missing_val_list[:10], + ) + ) + try: + logger = obj.model().logger.warn(msg) + logger.warn(msg) + except AttributeError: + print(msg) + -def get_value(obj): +def get_value(obj, missing_val_list=[]): """ Retrieve value of one element of a Variable or Expression, converting division-by-zero to nan and uninitialized values to None. """ - try: - val = value(obj) - except ZeroDivisionError: - # diagnostic expressions sometimes have 0 denominator, - # e.g., AverageFuelCosts for unused fuels; - val = float("nan") - except ValueError: - # If variables are not used in constraints or the - # objective function, they will never get values, and - # give a ValueError at this point. - # Note: for variables this could instead use 0 if allowed, or - # otherwise the closest bound. - if getattr(obj, 'value', 0) is None: - val = None - # Pyomo will print an error before it raises the ValueError, - # but we say more here to help users figure out what's going on. - print ( - "WARNING: variable {} has not been assigned a value. This " - "usually indicates a coding error: either the variable is " - "not needed or it has accidentally been omitted from all " - "constraints and the objective function.".format(obj.name) - ) - else: - # Caught some other ValueError - raise + if not hasattr(obj, "expr") and getattr(obj, "value", 0) is None: + # If variables are not used in constraints or the objective function, + # they will never get values, and give a ValueError if accessed. + # Accessing obj.value may be undocumented, but avoids using value(obj), + # which emits a lot of unsuppressable text if the value is unassigned. + # Note: for variables we could use 0 if allowed or otherwise the closest + # bound. But using None makes it more clear that something weird + # happened. + val = None + missing_val_list.append(obj.name) + else: + try: + val = value(obj) + except ZeroDivisionError: + # diagnostic expressions sometimes have 0 denominator, + # e.g., AverageFuelCosts for unused fuels; + val = float("nan") return val def save_total_cost_value(instance, outdir): - with open(os.path.join(outdir, 'total_cost.txt'), 'w') as fh: - fh.write('{}\n'.format(value(instance.SystemCost))) + with open(os.path.join(outdir, "total_cost.txt"), "w") as fh: + fh.write("{}\n".format(value(instance.SystemCost))) def save_cost_components(m, outdir): @@ -205,22 +256,25 @@ def save_cost_components(m, outdir): cost = getattr(m, annual_cost) # note: storing value() instead of the expression may save # some memory while this function runs - cost_dict[annual_cost] = value(sum( - cost[p] * m.bring_annual_costs_to_base_year[p] - for p in m.PERIODS - )) + cost_dict[annual_cost] = value( + sum(cost[p] * m.bring_annual_costs_to_base_year[p] for p in m.PERIODS) + ) for tp_cost in m.Cost_Components_Per_TP: cost = getattr(m, tp_cost) - cost_dict[tp_cost] = value(sum( - cost[t] * m.tp_weight_in_year[t] - * m.bring_annual_costs_to_base_year[m.tp_period[t]] - for t in m.TIMEPOINTS - )) + cost_dict[tp_cost] = value( + sum( + cost[t] + * m.tp_weight_in_year[t] + * m.bring_annual_costs_to_base_year[m.tp_period[t]] + for t in m.TIMEPOINTS + ) + ) write_table( m, list(cost_dict.keys()), output_file=os.path.join(outdir, "cost_components.csv"), - headings=('component', 'npv_cost'), + headings=("component", "npv_cost"), values=lambda m, c: (c, cost_dict[c]), - digits=16 + digits=16, ) + diff --git a/switch_model/reporting/basic_exports.py b/switch_model/reporting/basic_exports.py index e2a184496..6b90eb5b7 100644 --- a/switch_model/reporting/basic_exports.py +++ b/switch_model/reporting/basic_exports.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -13,7 +13,11 @@ from csv import reader from itertools import cycle from pyomo.environ import Var -from switch_model.financials import uniform_series_to_present_value, future_to_present_value +from switch_model.financials import ( + uniform_series_to_present_value, + future_to_present_value, +) + def define_arguments(argparser): # argparser.add_argument( @@ -21,32 +25,44 @@ def define_arguments(argparser): # help="Exports energy marginal costs in US$/MWh per load zone and timepoint, calculated as dual variable values from the energy balance constraint." # ) argparser.add_argument( - "--export-capacities", action='store_true', default=False, - help="Exports cummulative installed generating capacity in MW per \ - technology per period." + "--export-capacities", + action="store_true", + default=False, + help="Exports cumulative installed generating capacity in MW per \ + technology per period.", ) argparser.add_argument( - "--export-transmission", action='store_true', default=False, - help="Exports cummulative installed transmission capacity in MW per \ - path per period." + "--export-transmission", + action="store_true", + default=False, + help="Exports cumulative installed transmission capacity in MW per \ + path per period.", ) argparser.add_argument( - "--export-tech-dispatch", action='store_true', default=False, + "--export-tech-dispatch", + action="store_true", + default=False, help="Exports dispatched capacity per generator technology in MW per \ - timepoint." + timepoint.", ) argparser.add_argument( - "--export-reservoirs", action='store_true', default=False, - help="Exports final reservoir volumes in cubic meters per timepoint." + "--export-reservoirs", + action="store_true", + default=False, + help="Exports final reservoir volumes in cubic meters per timepoint.", ) argparser.add_argument( - "--export-all", action='store_true', default=False, + "--export-all", + action="store_true", + default=False, help="Exports all tables and plots. Sets all other export options to \ - True." + True.", ) argparser.add_argument( - "--export-load-blocks", action='store_true', default=False, - help="Exports tables and plots for load block formulation." + "--export-load-blocks", + action="store_true", + default=False, + help="Exports tables and plots for load block formulation.", ) @@ -65,9 +81,10 @@ def post_solve(mod, outdir): import matplotlib.pyplot as plt from cycler import cycler from matplotlib.backends.backend_pdf import PdfPages + nan = float("nan") - summaries_dir = os.path.join(outdir,"Summaries") + summaries_dir = os.path.join(outdir, "Summaries") if not os.path.exists(summaries_dir): os.makedirs(summaries_dir) else: @@ -75,12 +92,12 @@ def post_solve(mod, outdir): for f in os.listdir(summaries_dir): os.unlink(os.path.join(summaries_dir, f)) - color_map = plt.get_cmap('gist_rainbow') - styles = cycle(['-','--','-.',':']) + color_map = plt.get_cmap("gist_rainbow") + styles = cycle(["-", "--", "-.", ":"]) ##### # Round doubles to the first decimal - #for var in mod.component_objects(): + # for var in mod.component_objects(): # if not isinstance(var, Var): # continue # for key, obj in var.items(): @@ -106,40 +123,44 @@ def plot_inv_decision(name, tab, n_data, ind, by_period): table into a Pandas Dataframe. Usually represents time. by_period: A boolean indicating whether the plot should be stacked - by period (False) or if values should be cummulative (True). In the + by period (False) or if values should be cumulative (True). In the former, x axis represents the investment alternatives and in the latter, it represents periods (hence he boolean values required). """ if by_period: - df = pd.DataFrame(tab[1:], - columns = tab[0]).set_index(ind).transpose() + df = pd.DataFrame(tab[1:], columns=tab[0]).set_index(ind).transpose() stack = False - num_col = int(n_data)/10 + num_col = int(n_data) / 10 else: - df = pd.DataFrame(tab[1:], columns = tab[0]).set_index(ind) + df = pd.DataFrame(tab[1:], columns=tab[0]).set_index(ind) stack = True - num_col = int(n_data)/2 + num_col = int(n_data) / 2 fig = plt.figure() inv_ax = fig.add_subplot(111) inv_ax.grid(b=False) # You have to play with the color map and the line style list to # get enough combinations for your particular plot - inv_ax.set_prop_cycle(cycler('color', - [color_map(i/n_data) for i in range(0, n_data+1)])) + inv_ax.set_prop_cycle( + cycler("color", [color_map(i / n_data) for i in range(0, n_data + 1)]) + ) # To locate the legend: "loc" is the point of the legend for which you # will specify coordinates. These coords are specified in # bbox_to_anchor (can be only 1 point or couple) - inv_plot = df.plot(kind='bar', ax=inv_ax, - stacked=stack).legend(loc='lower left', fontsize=8, - bbox_to_anchor=(0.,1.015,1.,1.015), ncol=num_col, mode="expand") + inv_plot = df.plot(kind="bar", ax=inv_ax, stacked=stack).legend( + loc="lower left", + fontsize=8, + bbox_to_anchor=(0.0, 1.015, 1.0, 1.015), + ncol=num_col, + mode="expand", + ) if by_period: plt.xticks(rotation=0, fontsize=10) - fname = summaries_dir+'/'+name+'.pdf' + fname = summaries_dir + "/" + name + ".pdf" else: plt.xticks(rotation=90, fontsize=9) - fname = summaries_dir+'/'+name+'_stacked_by_p.pdf' - plt.savefig(fname, bbox_extra_artists=(inv_plot,), bbox_inches='tight') + fname = summaries_dir + "/" + name + "_stacked_by_p.pdf" + plt.savefig(fname, bbox_extra_artists=(inv_plot,), bbox_inches="tight") plt.close() def plot_dis_decision(name, tab, n_data, ind): @@ -163,68 +184,86 @@ def plot_dis_decision(name, tab, n_data, ind): """ - plots = PdfPages(os.path.join(outdir,"Summaries",name)+'.pdf') + plots = PdfPages(os.path.join(outdir, "Summaries", name) + ".pdf") - df = pd.DataFrame(tab[1:], columns = tab[0]) + df = pd.DataFrame(tab[1:], columns=tab[0]) n_scen = mod.SCENARIOS.__len__() - #num_col = int(n_data * n_scen)/8 + # num_col = int(n_data * n_scen)/8 num_col = 6 - for p in ['all']+[p for p in mod.PERIODS]: - fig = plt.figure(figsize=(17,8), dpi=100) + for p in ["all"] + [p for p in mod.PERIODS]: + fig = plt.figure(figsize=(17, 8), dpi=100) dis_ax = fig.add_subplot(111) dis_ax.grid(b=False) # You have to play with the color map and the line style list to # get enough combinations for your particular plot. # Set up different x axis labels if all periods are being plotted - if p == 'all': - dis_ax.set_xticks([ - i*24 for i in range(int(len(mod.TIMEPOINTS)/24) + 1) - ]) - dis_ax.set_xticklabels([ - mod.tp_timestamp[mod.TIMEPOINTS[i*24+1]] - for i in range(int(len(mod.TIMEPOINTS)/24)) - ]) + if p == "all": + dis_ax.set_xticks( + [i * 24 for i in range(int(len(mod.TIMEPOINTS) / 24) + 1)] + ) + dis_ax.set_xticklabels( + [ + mod.tp_timestamp[mod.TIMEPOINTS[i * 24 + 1]] + for i in range(int(len(mod.TIMEPOINTS) / 24)) + ] + ) # Technologies have different linestyles and scenarios have # different colors - dis_ax.set_prop_cycle(cycler('color', - [color_map(i/float(n_data-1)) for i in range(n_data)]) * - cycler('linestyle',[next(styles) for i in range(n_scen)])) - df_to_plot = df.drop([ind], axis=1).replace('', nan) + dis_ax.set_prop_cycle( + cycler( + "color", + [color_map(i / float(n_data - 1)) for i in range(n_data)], + ) + * cycler("linestyle", [next(styles) for i in range(n_scen)]) + ) + df_to_plot = df.drop([ind], axis=1).replace("", nan) else: n_scen = mod.PERIOD_SCENARIOS[p].__len__() - dis_ax.set_xticks([ - i*6 for i in range(int(len(mod.PERIOD_TPS[p])/6) + 1) - ]) - dis_ax.set_xticklabels([ - mod.tp_timestamp[mod.PERIOD_TPS[p][t*6+1]] - for t in range(int(len(mod.PERIOD_TPS[p])/6)) - ]) + dis_ax.set_xticks( + [i * 6 for i in range(int(len(mod.PERIOD_TPS[p]) / 6) + 1)] + ) + dis_ax.set_xticklabels( + [ + mod.tp_timestamp[mod.PERIOD_TPS[p][t * 6 + 1]] + for t in range(int(len(mod.PERIOD_TPS[p]) / 6)) + ] + ) # Technologies have different colors and scenarios have # different line styles - dis_ax.set_prop_cycle(cycler('color', - [color_map(i/float(n_data-1)) for i in range(n_data)]) * - cycler('linestyle', [next(styles) for i in range(n_scen)])) + dis_ax.set_prop_cycle( + cycler( + "color", + [color_map(i / float(n_data - 1)) for i in range(n_data)], + ) + * cycler("linestyle", [next(styles) for i in range(n_scen)]) + ) # Before plotting, data must be filtered by period - period_tps = [mod.tp_timestamp[tp] - for tp in mod.PERIOD_TPS[p].value] - df_to_plot = df.loc[df[ind].isin(period_tps)].drop([ind], - axis=1).reset_index(drop=True).dropna(axis=1, how='all') + period_tps = [mod.tp_timestamp[tp] for tp in mod.PERIOD_TPS[p].value] + df_to_plot = ( + df.loc[df[ind].isin(period_tps)] + .drop([ind], axis=1) + .reset_index(drop=True) + .dropna(axis=1, how="all") + ) # To locate the legend: "loc" is the point of the legend for which # you will specify coordinates. These coords are specified in # bbox_to_anchor (can be only 1 point or couple) - dis_plot = df_to_plot.plot(ax=dis_ax, - linewidth=1.6).legend(loc='lower left', fontsize=8, - bbox_to_anchor=(0., 1.015, 1., 1.015), ncol=num_col, - mode="expand") + dis_plot = df_to_plot.plot(ax=dis_ax, linewidth=1.6).legend( + loc="lower left", + fontsize=8, + bbox_to_anchor=(0.0, 1.015, 1.0, 1.015), + ncol=num_col, + mode="expand", + ) plt.xticks(rotation=90, fontsize=9) - plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches='tight') + plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches="tight") plt.close() plots.close() print("Printing summaries:\n===================") - start=time.time() + start = time.time() # print "renewable energy production" # rpsenergy = {s:0.0 for s in mod.SCENARIOS} @@ -267,93 +306,137 @@ def plot_dis_decision(name, tab, n_data, ind): if mod.options.export_capacities: n_elements = mod.GENERATION_TECHNOLOGIES.__len__() - index = 'gentech' + index = "gentech" - table_name = "cummulative_capacity_by_tech_periods" - print(table_name+" ...") + table_name = "cumulative_capacity_by_tech_periods" + print(table_name + " ...") table = export.write_table( - mod, mod.GENERATION_TECHNOLOGIES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p - for p in mod.PERIODS), - values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] - for (g, bldyr) in m.GEN_BLD_YRS - if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + - tuple( sum(m.GenCapacity[g, p] for g in m.GENERATION_PROJECTS - if m.gen_tech[g] == gt) for p in m.PERIODS)) + mod, + mod.GENERATION_TECHNOLOGIES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, gt: ( + gt, + sum( + m.BuildGen[g, bldyr] + for (g, bldyr) in m.GEN_BLD_YRS + if m.gen_tech[g] == gt and bldyr not in m.PERIODS + ), + ) + + tuple( + sum( + m.GenCapacity[g, p] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] == gt + ) + for p in m.PERIODS + ), + ) plot_inv_decision(table_name, table, n_elements, index, True) table_name = "capacity_installed_by_tech_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, mod.GENERATION_TECHNOLOGIES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p - for p in mod.PERIODS), - values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] - for (g, bldyr) in m.GEN_BLD_YRS - if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + - tuple( sum(m.BuildGen[g, p] for g in m.GENERATION_PROJECTS - if m.gen_tech[g] == gt) for p in m.PERIODS)) + mod, + mod.GENERATION_TECHNOLOGIES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, gt: ( + gt, + sum( + m.BuildGen[g, bldyr] + for (g, bldyr) in m.GEN_BLD_YRS + if m.gen_tech[g] == gt and bldyr not in m.PERIODS + ), + ) + + tuple( + sum( + m.BuildGen[g, p] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] == gt + ) + for p in m.PERIODS + ), + ) plot_inv_decision(table_name, table, n_elements, index, False) if mod.options.export_transmission: n_elements = mod.TRANSMISSION_LINES.__len__() - index = 'path' + index = "path" - table_name = "cummulative_transmission_by_path_periods" - print(table_name+" ...") + table_name = "cumulative_transmission_by_path_periods" + print(table_name + " ...") table = export.write_table( - mod, True, mod.TRANSMISSION_LINES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p for p in mod.PERIODS), - values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + - tuple(m.TransCapacity[tx, p] for p in m.PERIODS)) - #plot_inv_decision(table_name, table, n_elements, index, True) + mod, + True, + mod.TRANSMISSION_LINES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + + tuple(m.TransCapacity[tx, p] for p in m.PERIODS), + ) + # plot_inv_decision(table_name, table, n_elements, index, True) table_name = "transmission_installation_by_path_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TRANSMISSION_LINES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p for p in mod.PERIODS), - values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + - tuple(m.BuildTrans[tx, p] for p in m.PERIODS)) + mod, + True, + mod.TRANSMISSION_LINES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + + tuple(m.BuildTrans[tx, p] for p in m.PERIODS), + ) plot_inv_decision(table_name, table, n_elements, index, False) - if mod.options.export_tech_dispatch: n_elements = mod.GENERATION_TECHNOLOGIES.__len__() - index = 'timepoints' + index = "timepoints" gen_projects = {} for g in mod.GENERATION_TECHNOLOGIES: gen_projects[g] = [] for prj in mod.PROJECTS: - if mod.proj_gen_tech[prj]==g: + if mod.proj_gen_tech[prj] == g: gen_projects[g].append(prj) + def print_dis(m, tp): tup = (m.tp_timestamp[tp],) for g in m.GENERATION_TECHNOLOGIES: for s in m.SCENARIOS: if s in m.PERIOD_SCENARIOS[m.tp_period[tp]]: - tup += (sum(m.DispatchProj[proj, tp, s] for proj in gen_projects[g] if (proj,tp,s) in m.PROJ_DISPATCH_POINTS),) + tup += ( + sum( + m.DispatchProj[proj, tp, s] + for proj in gen_projects[g] + if (proj, tp, s) in m.PROJ_DISPATCH_POINTS + ), + ) else: - tup += ('',) + tup += ("",) return tup table_name = "dispatch_proj_by_tech_tps" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TIMEPOINTS, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=("timepoints",) + tuple(str(g)+"-"+str(mod.scenario_stamp[s]) for g in mod.GENERATION_TECHNOLOGIES for s in mod.SCENARIOS), - values=print_dis) + mod, + True, + mod.TIMEPOINTS, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("timepoints",) + + tuple( + str(g) + "-" + str(mod.scenario_stamp[s]) + for g in mod.GENERATION_TECHNOLOGIES + for s in mod.SCENARIOS + ), + values=print_dis, + ) plot_dis_decision(table_name, table, n_elements, index) if mod.options.export_reservoirs: n_elements = mod.RESERVOIRS.__len__() - index = 'timepoints' + index = "timepoints" def print_res(m, tp): tup = (m.tp_timestamp[tp],) @@ -362,28 +445,34 @@ def print_res(m, tp): if s in m.PERIOD_SCENARIOS[m.tp_period[tp]]: tup += (m.ReservoirVol[r, tp, s] - m.initial_res_vol[r],) else: - tup += ('',) + tup += ("",) return tup table_name = "reservoir_final_vols_tp" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TIMEPOINTS, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=("timepoints",) + tuple(str(r)+"-"+ - str(mod.scenario_stamp[s]) for r in mod.RESERVOIRS - for s in mod.SCENARIOS), - values=print_res) + mod, + True, + mod.TIMEPOINTS, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("timepoints",) + + tuple( + str(r) + "-" + str(mod.scenario_stamp[s]) + for r in mod.RESERVOIRS + for s in mod.SCENARIOS + ), + values=print_res, + ) plot_dis_decision(table_name, table, n_elements, index) ############################################################## # The following is a custom export to get dispatch for certain # Chile load zones - lzs_to_print = ['charrua','ancoa'] + lzs_to_print = ["charrua", "ancoa"] lz_hprojs = {} for lz in lzs_to_print: - lz_hprojs[lz]=[] + lz_hprojs[lz] = [] for proj in mod.LZ_PROJECTS[lz]: if proj in mod.HYDRO_PROJECTS: lz_hprojs[lz].append(proj) @@ -393,135 +482,192 @@ def print_hgen(m, tp): for lz in lzs_to_print: for s in m.SCENARIOS: if s in m.PERIOD_SCENARIOS[m.tp_period[tp]]: - tup += (sum(m.DispatchProj[proj, tp, s] for proj in lz_hprojs[lz] if (proj,tp,s) in m.HYDRO_PROJ_DISPATCH_POINTS),) + tup += ( + sum( + m.DispatchProj[proj, tp, s] + for proj in lz_hprojs[lz] + if (proj, tp, s) in m.HYDRO_PROJ_DISPATCH_POINTS + ), + ) else: - tup += ('',) + tup += ("",) return tup table_name = "hydro_dispatch_special_nodes_tp" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TIMEPOINTS, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=("timepoints",) + tuple(str(lz)+"-"+str( - mod.scenario_stamp[s]) for lz in lzs_to_print - for s in mod.SCENARIOS), - values=print_hgen) - #plot_dis_decision(table_name, table, n_elements, index) + mod, + True, + mod.TIMEPOINTS, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("timepoints",) + + tuple( + str(lz) + "-" + str(mod.scenario_stamp[s]) + for lz in lzs_to_print + for s in mod.SCENARIOS + ), + values=print_hgen, + ) + # plot_dis_decision(table_name, table, n_elements, index) if mod.options.export_load_blocks: + def print_res(m, ym): tup = (ym,) for r in m.RESERVOIRS: for s in m.SCENARIOS: - if s in m.PERIOD_SCENARIOS[m.tp_period[next(iter(m.ym_timepoints[ym]))]]: + if ( + s + in m.PERIOD_SCENARIOS[ + m.tp_period[next(iter(m.ym_timepoints[ym]))] + ] + ): tup += (m.ReservoirVol[r, ym, s] - m.initial_res_vol[r],) else: - tup += ('',) + tup += ("",) return tup + table_name = "reservoir_vols_load_block" - print(table_name+" ...") + print(table_name + " ...") tab = export.write_table( - mod, True, mod.YEARMONTHS, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=("yearmonth",) + tuple(str(r)+"-"+ - str(mod.scenario_stamp[s]) for r in mod.RESERVOIRS - for s in mod.SCENARIOS), - values=print_res) + mod, + True, + mod.YEARMONTHS, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("yearmonth",) + + tuple( + str(r) + "-" + str(mod.scenario_stamp[s]) + for r in mod.RESERVOIRS + for s in mod.SCENARIOS + ), + values=print_res, + ) n_data = mod.RESERVOIRS.__len__() - ind = 'yearmonth' - plots = PdfPages(os.path.join(outdir,"Summaries",table_name)+'.pdf') + ind = "yearmonth" + plots = PdfPages(os.path.join(outdir, "Summaries", table_name) + ".pdf") - df = pd.DataFrame(tab[1:], columns = tab[0]) + df = pd.DataFrame(tab[1:], columns=tab[0]) n_scen = mod.SCENARIOS.__len__() - #num_col = int(n_data * n_scen)/8 + # num_col = int(n_data * n_scen)/8 num_col = 6 - for p in ['all']+[p for p in mod.PERIODS]: - fig = plt.figure(figsize=(17,8), dpi=100) + for p in ["all"] + [p for p in mod.PERIODS]: + fig = plt.figure(figsize=(17, 8), dpi=100) dis_ax = fig.add_subplot(111) dis_ax.grid(b=False) # You have to play with the color map and the line style list to # get enough combinations for your particular plot. # Set up different x axis labels if all periods are being plotted - if p == 'all': - dis_ax.set_xticks([ - i*5 - for i in range(int(len(mod.YEARMONTHS)/5) + 1) - ]) - dis_ax.set_xticklabels([ - mod.YEARMONTHS[i*5+1] - for i in range(int(len(mod.YEARMONTHS)/5)) - ]) + if p == "all": + dis_ax.set_xticks( + [i * 5 for i in range(int(len(mod.YEARMONTHS) / 5) + 1)] + ) + dis_ax.set_xticklabels( + [ + mod.YEARMONTHS[i * 5 + 1] + for i in range(int(len(mod.YEARMONTHS) / 5)) + ] + ) # Technologies have different linestyles and scenarios have # different colors - dis_ax.set_prop_cycle(cycler('color', - [color_map(i/float(n_data-1)) for i in range(n_data)]) * - cycler('linestyle',[next(styles) for i in range(n_scen)])) - df_to_plot = df.drop([ind], axis=1).replace('', nan) + dis_ax.set_prop_cycle( + cycler( + "color", + [color_map(i / float(n_data - 1)) for i in range(n_data)], + ) + * cycler("linestyle", [next(styles) for i in range(n_scen)]) + ) + df_to_plot = df.drop([ind], axis=1).replace("", nan) else: n_scen = mod.PERIOD_SCENARIOS[p].__len__() - dis_ax.set_xticks([i*5 for i in range(0,24)]) - dis_ax.set_xticklabels([mod.YEARMONTHS[i] - for i in range(1,25)]) + dis_ax.set_xticks([i * 5 for i in range(0, 24)]) + dis_ax.set_xticklabels([mod.YEARMONTHS[i] for i in range(1, 25)]) # Technologies have different colors and scenarios have # different line styles - dis_ax.set_prop_cycle(cycler('color', - [color_map(i/float(n_data-1)) for i in range(n_data)]) * - cycler('linestyle', [next(styles) for i in range(n_scen)])) + dis_ax.set_prop_cycle( + cycler( + "color", + [color_map(i / float(n_data - 1)) for i in range(n_data)], + ) + * cycler("linestyle", [next(styles) for i in range(n_scen)]) + ) # Before plotting, data must be filtered by period - period_yms = [(p+y)*100+i for y in [0,1] for i in range(1,13)] - df_to_plot = df.loc[df[ind].isin(period_yms)].drop([ind], - axis=1).reset_index(drop=True).dropna(axis=1, how='all') + period_yms = [(p + y) * 100 + i for y in [0, 1] for i in range(1, 13)] + df_to_plot = ( + df.loc[df[ind].isin(period_yms)] + .drop([ind], axis=1) + .reset_index(drop=True) + .dropna(axis=1, how="all") + ) # To locate the legend: "loc" is the point of the legend for which # you will specify coordinates. These coords are specified in # bbox_to_anchor (can be only 1 point or couple) - dis_plot = df_to_plot.plot(ax=dis_ax, - linewidth=1.6).legend(loc='lower left', fontsize=8, - bbox_to_anchor=(0., 1.015, 1., 1.015), ncol=num_col, - mode="expand") + dis_plot = df_to_plot.plot(ax=dis_ax, linewidth=1.6).legend( + loc="lower left", + fontsize=8, + bbox_to_anchor=(0.0, 1.015, 1.0, 1.015), + ncol=num_col, + mode="expand", + ) plt.xticks(rotation=90, fontsize=9) - plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches='tight') + plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches="tight") plt.close() plots.close() ############################################################## def calc_tp_costs_in_period_one_scenario(m, p, s): - return (sum(sum( - # This are total costs in each tp for a scenario - getattr(m, tp_cost)[t, s].expr() * m.tp_weight_in_year[t] - for tp_cost in m.cost_components_tp) - # Now, summation over timepoints - for t in m.PERIOD_TPS[p]) * + return ( + sum( + sum( + # This are total costs in each tp for a scenario + getattr(m, tp_cost)[t, s].expr() * m.tp_weight_in_year[t] + for tp_cost in m.cost_components_tp + ) + # Now, summation over timepoints + for t in m.PERIOD_TPS[p] + ) + * # Conversion to lump sum at beginning of period - uniform_series_to_present_value( - 0, m.period_length_years[p]) * + uniform_series_to_present_value(0, m.period_length_years[p]) + * # Conversion to base year future_to_present_value( - m.discount_rate, (m.period_start[p] - m.base_financial_year))) + m.discount_rate, (m.period_start[p] - m.base_financial_year) + ) + ) """ Writing Objective Function value. """ print("total_system_costs.txt...") - with open(os.path.join(summaries_dir, "total_system_costs.txt"),'w+') as f: + with open(os.path.join(summaries_dir, "total_system_costs.txt"), "w+") as f: f.write("Total Expected System Costs: %.2f \n" % mod.SystemCost()) - f.write("Total Investment Costs: %.2f \n" % sum( - mod.AnnualCostPerPeriod[p].expr() for p in mod.PERIODS)) - f.write("Total Expected Operations Costs: %.2f \n" % sum( - mod.TpCostPerPeriod[p].expr() for p in mod.PERIODS)) + f.write( + "Total Investment Costs: %.2f \n" + % sum(mod.AnnualCostPerPeriod[p].expr() for p in mod.PERIODS) + ) + f.write( + "Total Expected Operations Costs: %.2f \n" + % sum(mod.TpCostPerPeriod[p].expr() for p in mod.PERIODS) + ) for p in mod.PERIODS: f.write("PERIOD %s\n" % p) f.write(" Investment Costs: %.2f \n" % mod.AnnualCostPerPeriod[p].expr()) - f.write(" Expected Operations Costs: %.2f \n" % mod.TpCostPerPeriod[p].expr()) + f.write( + " Expected Operations Costs: %.2f \n" % mod.TpCostPerPeriod[p].expr() + ) for s in mod.PERIOD_SCENARIOS[p]: - f.write(" Operational Costs of scenario %s with probability %s: %.2f\n" % (s, mod.scenario_probability[s], calc_tp_costs_in_period_one_scenario(mod, p, s))) - - - print("\nTime taken writing summaries: %.2f s." % (time.time()-start)) - - + f.write( + " Operational Costs of scenario %s with probability %s: %.2f\n" + % ( + s, + mod.scenario_probability[s], + calc_tp_costs_in_period_one_scenario(mod, p, s), + ) + ) + + print("\nTime taken writing summaries: %.2f s." % (time.time() - start)) # if mod.options.export_marginal_costs: # """ diff --git a/switch_model/reporting/dump.py b/switch_model/reporting/dump.py index a6fb22335..e7bf62fcd 100644 --- a/switch_model/reporting/dump.py +++ b/switch_model/reporting/dump.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -10,12 +10,21 @@ """ import os, sys + def define_arguments(argparser): - argparser.add_argument("--dump-level", type=int, default=2, - help="Use 1 for an abbreviated dump via instance.display(), or 2 " + - "for a complete dump via instance.pprint().") - argparser.add_argument("--dump-to-screen", action='store_true', default=False, - help="Print the model dump to screen as well as an export file.") + argparser.add_argument( + "--dump-level", + type=int, + default=2, + help="Use 1 for an abbreviated dump via instance.display(), or 2 " + + "for a complete dump via instance.pprint().", + ) + argparser.add_argument( + "--dump-to-screen", + action="store_true", + default=False, + help="Print the model dump to screen as well as an export file.", + ) def _print_output(instance): @@ -33,7 +42,9 @@ def post_solve(instance, outdir): instance.display() or instance.pprint(), depending on the value of dump-level. Default is pprint(). """ - stdout_copy = sys.stdout # make a copy of current sys.stdout to return to eventually + stdout_copy = ( + sys.stdout + ) # make a copy of current sys.stdout to return to eventually out_path = os.path.join(outdir, "model_dump.txt") out_file = open(out_path, "w", buffering=1) sys.stdout = out_file diff --git a/switch_model/reporting/example_export.py b/switch_model/reporting/example_export.py index 400f72602..2ef2bf252 100644 --- a/switch_model/reporting/example_export.py +++ b/switch_model/reporting/example_export.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -13,7 +13,8 @@ import os from switch_model.reporting import write_table -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" + def post_solve(instance, outdir): """ @@ -21,13 +22,15 @@ def post_solve(instance, outdir): a different file name (load_balance2.csv). """ write_table( - instance, instance.LOAD_ZONES, instance.TIMEPOINTS, + instance, + instance.LOAD_ZONES, + instance.TIMEPOINTS, output_file=os.path.join(outdir, "load_balance2.csv"), - headings=("load_zone", "timestamp",) + tuple( - instance.Zone_Power_Injections + - instance.Zone_Power_Withdrawals), - values=lambda m, z, t: (z, m.tp_timestamp[t],) + tuple( + headings=("load_zone", "timestamp") + + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals), + values=lambda m, z, t: (z, m.tp_timestamp[t]) + + tuple( getattr(m, component)[z, t] - for component in ( - m.Zone_Power_Injections + - m.Zone_Power_Withdrawals))) + for component in (m.Zone_Power_Injections + m.Zone_Power_Withdrawals) + ), + ) diff --git a/switch_model/solve.py b/switch_model/solve.py index cac25b0bc..1ce633bcc 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -1,13 +1,18 @@ #!/usr/bin/env python -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. from __future__ import print_function -from pyomo.environ import * -from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition -import pyomo.version +import logging +import sys, os, time, shlex, re, inspect, textwrap, types, threading, json + +try: + import IPython + + has_ipython = True +except ImportError: + has_ipython = False -import sys, os, time, shlex, re, inspect, textwrap, types try: # Python 2 import cPickle as pickle @@ -20,9 +25,19 @@ except NameError: pass +from pyomo.environ import * +from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition +import pyomo.version + import switch_model from switch_model.utilities import ( - create_model, _ArgumentParser, StepTimer, make_iterable, LogOutput, warn + create_model, + _ArgumentParser, + StepTimer, + make_iterable, + LogOutput, + warn, + unwrap, ) from switch_model.upgrade import do_inputs_need_upgrade, upgrade_inputs @@ -41,41 +56,117 @@ def main(args=None, return_model=False, return_instance=False): # turn on post-mortem debugging mode if requested # (from http://stackoverflow.com/a/1237407 ; more options available there) if pre_module_options.debug: + def debug(type, value, tb): import traceback + try: - from ipdb import pm + from ipdb import post_mortem except ImportError: - from pdb import pm + from pdb import post_mortem traceback.print_exception(type, value, tb) - pm() + report_model_in_traceback(tb) + # explicitly use _this_ tb, so debug can be called from an + # exception handler if needed (see https://stackoverflow.com/a/242514) + post_mortem(tb) + sys.excepthook = debug # Write output to a log file if logging option is specified + # TODO: change all our non-interactive output to report via the logger, then + # use logger.addHandler(logging.FileHandler(log_file_path)) in make_logger() + # and drop the LogOutput context manager. (That will also enable logging of + # messages from solve_scenarios.py to the default log file.) This may + # require context code anyway to copy all stdout and stderr to the logger + # while also emitting it on stdout and stderr, e.g., to correctly log + # tracebacks or messages from Pyomo code or tracebacks from our code. if pre_module_options.log_run_to_file: logs_dir = pre_module_options.logs_dir else: - logs_dir = None # disables logging + logs_dir = None # disables logging + + # set root logger to an appropriate level + # we never use pyomo's DEBUG level, because it produces an overwhelming + # amount of output + pyomo_levels = { + 'DEBUG': 'INFO', + 'INFO': 'WARNING', + 'WARNING': 'WARNING', + 'ERROR': 'ERROR' + } + pyomo_log_level = pyomo_levels[pre_module_options.log_level.upper()] + logging.getLogger("root").setLevel(pyomo_log_level) with LogOutput(logs_dir): + # Create a unique logger for this model (other models may have different + # logging settings and may exist at the same time as this one). This has + # to be done after we start LogOutput, because the logger gets a + # reference to the current sys.stdout, which should be the tee to the + # log file. + logger = make_logger(pre_module_options) + + logger.info( + textwrap.dedent( + f""" + ======================================================================= + Switch {switch_model.__version__}, https://switch-model.org + ======================================================================= + """ + ) + ) + + # Warn users about deprecated flags; we know this earlier but don't have + # a working logger to report it until here. + # if '--verbose' in args or '--quiet' in args: + # logger.warn(unwrap(""" + # The --verbose and --quiet flags will be removed in a future + # version of Switch. Please use --log-level instead. + # """)) - # Look out for outdated inputs. This has to happen before modules.txt is + # Check for outdated inputs. This has to happen before modules.txt is # parsed to avoid errors from incompatible files. parser = _ArgumentParser(allow_abbrev=False, add_help=False) add_module_args(parser) module_options = parser.parse_known_args(args=args)[0] - if(os.path.exists(module_options.inputs_dir) and - do_inputs_need_upgrade(module_options.inputs_dir)): + + if os.path.exists(module_options.inputs_dir) and do_inputs_need_upgrade( + module_options.inputs_dir + ): + if "--help" in args or "-h" in args: + # don't prompt to upgrade if they're looking for help + print( + unwrap( + """ + Limited help is available because the inputs directory + needs to be upgraded. Module-specific help will be + available after upgrading the inputs directory via "switch + solve" or "switch upgrade". + """ + ) + ) + parser.print_help() + return 0 + do_upgrade = query_yes_no( - "Warning! Your inputs directory needs to be upgraded. " - "Do you want to auto-upgrade now? We'll keep a backup of " - "this current version." + unwrap( + """ + Warning! Your inputs directory needs to be upgraded. Do you + want to auto-upgrade now? We'll keep a backup of this current + version. + """ + ) ) if do_upgrade: upgrade_inputs(module_options.inputs_dir) else: - print("Inputs need upgrade. Consider `switch upgrade --help`. Exiting.") - stop_logging_output() + print( + unwrap( + """ + Inputs need to be upgraded. Consider using "switch upgrade + --help". Exiting. + """ + ) + ) return -1 # build a module list based on configuration options, and add @@ -87,45 +178,37 @@ def debug(type, value, tb): patch_pyomo() # Define the model - model = create_model(modules, args=args) + model = create_model(modules, args=args, logger=logger) # Add any suffixes specified on the command line (usually only iis) add_extra_suffixes(model) + logger.info("Model created in {:.2f} s.".format(timer.step_time())) + # return the model as-is if requested if return_model and not return_instance: return model if model.options.reload_prior_solution: + # Fail quickly if the prior solution file is not available. # TODO: allow a directory to be specified after --reload-prior-solution, # otherwise use outputs_dir. - if not os.path.isdir(model.options.outputs_dir): - raise IOError("Directory specified for prior solution does not exist.") - - # get a list of modules to iterate through - iterate_modules = get_iteration_list(model) - - if model.options.verbose: - print("\n=======================================================================") - print("Switch {}, http://switch-model.org".format(switch_model.__version__)) - print("=======================================================================") - print("Arguments:") - print(", ".join(k+"="+repr(v) for k, v in model.options.__dict__.items() if v)) - print("Modules:\n"+", ".join(m for m in modules)) - if iterate_modules: - print("Iteration modules:", iterate_modules) - print("=======================================================================\n") - print("Model created in {:.2f} s.".format(timer.step_time())) - print("Loading inputs...") + prior_solution_file = os.path.join( + model.options.outputs_dir, "results.pickle" + ) + if not os.path.exists(prior_solution_file): + raise IOError( + "Prior solution {} does not exist.".format(prior_solution_file) + ) # create an instance (also reports time spent reading data and loading into model) + logger.info("Loading inputs...") instance = model.load_inputs() #### Below here, we refer to instance instead of model #### instance.pre_solve() - if instance.options.verbose: - print("Total time spent constructing model: {:.2f} s.\n".format(timer.step_time())) + logger.info(f"Total time spent constructing model: {timer.step_time():.2f} s.\n") # return the instance as-is if requested if return_instance: @@ -144,110 +227,123 @@ def debug(type, value, tb): raise if instance.options.reload_prior_solution: - print('Loading prior solution...') - reload_prior_solution_from_pickle(instance, instance.options.outputs_dir) - if instance.options.verbose: - print( - 'Loaded previous results into model instance in {:.2f} s.' - .format(timer.step_time()) - ) + logger.info("Loading prior solution...") + reload_prior_solution_from_pickle(instance, prior_solution_file) + logger.info( + f"Loaded previous results into model instance in {timer.step_time():.2f} s." + ) else: # solve the model (reports time for each step as it goes) - if iterate_modules: - if instance.options.verbose: - print("Iterating model...") - iterate(instance, iterate_modules) + if instance.iterate_modules: + logger.info("Iterating model...") + iterate(instance) else: results = solve(instance) - if instance.options.verbose: - print("") - print("Optimization termination condition was {}.".format( - results.solver.termination_condition)) - if str(results.solver.message) != '': - print('Solver message: {}'.format(results.solver.message)) - print("") - - if instance.options.verbose: - timer.step_time() # restart counter for next step - - if not instance.options.no_save_solution: - save_results(instance, instance.options.outputs_dir) - if instance.options.verbose: - print('Saved results in {:.2f} s.'.format(timer.step_time())) + logger.info("") + logger.info( + f"Optimization termination condition was " + f"{results.solver.termination_condition}." + ) + if str(results.solver.message) != "": + logger.info(f"Solver message: {results.solver.message}") + logger.info("") + timer.step_time() # restart counter for next step + + # save model configuration for future reference + file = os.path.join(instance.options.outputs_dir, "model_config.json") + with open(file, "w") as f: + json.dump( + { + "options": vars(instance.options), + "modules": modules, + "iterate_modules": instance.iterate_modules, + }, + f, + indent=4, + ) + + if not instance.options.no_save_solution: + save_results(instance, instance.options.outputs_dir) + logger.info(f"Saved results in {timer.step_time():.2f} s.") # report results # (repeated if model is reloaded, to automatically run any new export code) if not instance.options.no_post_solve: - if instance.options.verbose: - print("Executing post solve functions...") + logger.info("Executing post solve functions...") instance.post_solve() - if instance.options.verbose: - print("Post solve processing completed in {:.2f} s.".format(timer.step_time())) + logger.info(f"Post solve processing completed in {timer.step_time():.2f} s.") # end of LogOutput block if instance.options.interact: m = instance # present the solved model as 'm' for convenience - banner = ( - "\n" - "=======================================================================\n" - "Entering interactive Python shell.\n" - "Abstract model is in 'model' variable; \n" - "Solved instance is in 'instance' and 'm' variables.\n" - "Type ctrl-d or exit() to exit shell.\n" - "=======================================================================\n" + banner = "\n".join( + [ + "", + "=" * 60, + "Entering interactive {} shell.".format( + "IPython" if has_ipython else "Python" + ), + "Abstract model is in 'model' variable;", + "Solved instance is in 'instance' and 'm' variables.", + "Type ctrl-d or exit() to exit shell.", + "=" * 60, + "", + ] ) - import code - code.interact(banner=banner, local=dict(list(globals().items()) + list(locals().items()))) + # IPython support is disabled until they fix + # https://github.com/ipython/ipython/issues/12199 + if has_ipython and False: + banner += "\nUse tab to auto-complete" + IPython.embed( + banner1=banner, + exit_msg="Leaving interactive interpreter, returning to program.", + colors=instance.options.interact_color, + ) + else: + import code + code.interact( + banner=banner, + local=dict(list(globals().items()) + list(locals().items())), + ) -def reload_prior_solution_from_pickle(instance, outdir): - with open(os.path.join(outdir, 'results.pickle'), 'rb') as fh: - results = pickle.load(fh) + # return solved model for users who want to do other things with it + return instance + + +def reload_prior_solution_from_pickle(instance, pickle_file): + with open(pickle_file, "rb") as fh: + results = pickle.load(fh) instance.solutions.load_from(results) return instance patched_pyomo = False + + def patch_pyomo(): + # patch Pyomo if needed global patched_pyomo - if not patched_pyomo: - patched_pyomo = True - # patch Pyomo if needed - - # Pyomo 4.2 and 4.3 mistakenly discard the original rule during - # Expression.construct. This makes it impossible to reconstruct - # expressions (e.g., for iterated models). So we patch it. - if (4, 2) <= pyomo.version.version_info[:2] <= (4, 3): - # test whether patch is needed: - m = ConcreteModel() - m.e = Expression(rule=lambda m: 0) - if hasattr(m.e, "_init_rule") and m.e._init_rule is None: - # add a deprecation warning here when we stop supporting Pyomo 4.2 or 4.3 - old_construct = pyomo.environ.Expression.construct - def new_construct(self, *args, **kwargs): - # save rule, call the function, then restore it - _init_rule = self._init_rule - old_construct(self, *args, **kwargs) - self._init_rule = _init_rule - pyomo.environ.Expression.construct = new_construct - del m - - # Pyomo 5.1.1 (and maybe others) is very slow to load prior solutions because - # it does a full-component search for each component name as it assigns the - # data. This ends up taking longer than solving the model. So we micro- - # patch pyomo.core.base.PyomoModel.ModelSolutions.add_solution to use - # Pyomo's built-in caching system for component names. - # TODO: create a pull request for Pyomo to do this - # NOTE: space inside the long quotes is significant; must match the Pyomo code - old_code = """ + if patched_pyomo: + return + patched_pyomo = True + + # Pyomo 5.1.1 (and maybe others) is very slow to load prior solutions + # because it does a full-component search for each component name as it + # assigns the data. This ends up taking longer than solving the model. So we + # micro- patch pyomo.core.base.PyomoModel.ModelSolutions.add_solution to use + # Pyomo's built-in caching system for component names. + # TODO: create a pull request for Pyomo to do this + # NOTE: space inside the long quotes is significant; must match the Pyomo code + old_code = """ for obj in instance.component_data_objects(Var): cache[obj.name] = obj for obj in instance.component_data_objects(Objective, active=True): cache[obj.name] = obj for obj in instance.component_data_objects(Constraint, active=True): cache[obj.name] = obj""" - new_code = """ + new_code = """ # use buffer to avoid full search of component for data object # which introduces a delay that is quadratic in model size buf=dict() @@ -258,19 +354,24 @@ def new_construct(self, *args, **kwargs): for obj in instance.component_data_objects(Constraint, active=True): cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj""" - from pyomo.core.base.PyomoModel import ModelSolutions - add_solution_code = inspect.getsource(ModelSolutions.add_solution) - if old_code in add_solution_code: - # create and inject a new version of the method - add_solution_code = add_solution_code.replace(old_code, new_code) - replace_method(ModelSolutions, 'add_solution', add_solution_code) - elif pyomo.version.version_info[:2] >= (5, 0): - print( - "NOTE: The patch to pyomo.core.base.PyomoModel.ModelSolutions.add_solution " - "has been deactivated because the Pyomo source code has changed. " - "Check whether this patch is still needed and edit {} accordingly." - .format(__file__) - ) + from pyomo.core.base.PyomoModel import ModelSolutions + + add_solution_code = inspect.getsource(ModelSolutions.add_solution) + if old_code in add_solution_code: + # create and inject a new version of the method + add_solution_code = add_solution_code.replace(old_code, new_code) + replace_method(ModelSolutions, "add_solution", add_solution_code) + elif pyomo.version.version_info[:2] >= (5, 0): + # We don't allow later versions of Pyomo than we've tested with, so + # this should only show up during testing when preparing to release a + # new version. + print( + "NOTE: The patch to pyomo.core.base.PyomoModel.ModelSolutions.add_solution " + "has been deactivated because the Pyomo source code has changed. " + f"Check whether this patch is still needed and edit {__file__} " + "accordingly." + ) + def replace_method(class_ref, method_name, new_source_code): """ @@ -287,7 +388,7 @@ def replace_method(class_ref, method_name, new_source_code): orig_method.__globals__, orig_method.__name__, orig_method.__defaults__, - orig_method.__closure__ + orig_method.__closure__, ) # note: this normal function will be automatically converted to an unbound # method when it is assigned as an attribute of a class @@ -300,9 +401,10 @@ def reload_prior_solution_from_csvs(instance): previous solution. (Not currently used.) """ import csv + var_objects = instance.component_objects(Var) for var in var_objects: - var_file = os.path.join(instance.options.outputs_dir, '{}.csv'.format(var.name)) + var_file = os.path.join(instance.options.outputs_dir, "{}.csv".format(var.name)) if not os.path.isfile(var_file): raise RuntimeError( "Tab output file for variable {} cannot be found in outputs " @@ -313,19 +415,20 @@ def reload_prior_solution_from_csvs(instance): key_types = [type(i) for i in make_iterable(next(var.iterkeys()))] except StopIteration: key_types = [] # no keys - with open(var_file,'r') as f: - reader = csv.reader(f, delimiter=',') - next(reader) # skip headers + with open(var_file, "r") as f: + reader = csv.reader(f, delimiter=",") + next(reader) # skip headers for row in reader: index = tuple(t(k) for t, k in zip(key_types, row[:-1])) try: v = var[index] except KeyError: raise KeyError( - "Unable to set value for {}[{}]; index is invalid." - .format(var.name, keys) + "Unable to set value for {}[{}]; index is invalid.".format( + var.name, keys + ) ) - if row[-1] == '': + if row[-1] == "": # Variables that are not used in the model end up with no # value after the solve and get saved as blanks; we skip those. continue @@ -333,11 +436,10 @@ def reload_prior_solution_from_csvs(instance): if v.is_integer() or v.is_binary(): val = int(val) v.value = val - if instance.options.verbose: - print('Loaded variable {} values into instance.'.format(var.name)) + instance.logger.info(f"Loaded variable {var.name} values into instance.") -def iterate(m, iterate_modules, depth=0): +def iterate(m, depth=0): """Iterate through all modules listed in the iterate_list (usually iterate.txt), if any. If there is no iterate_list, then this will just solve the model once. @@ -358,7 +460,7 @@ def iterate(m, iterate_modules, depth=0): if depth == 0: m.iteration_node = tuple() - if depth == len(iterate_modules): + if depth == len(m.iterate_modules): # asked to converge at the deepest level # just preprocess to reflect all changes and then solve m.preprocess() @@ -366,12 +468,17 @@ def iterate(m, iterate_modules, depth=0): else: # iterate until converged at the current level - # note: the modules in iterate_modules were also specified in the model's + # note: the modules in m.iterate_modules were also specified in the model's # module list, and have already been loaded, so they are accessible via sys.modules - # This prepends 'switch_model.' if needed, to be consistent with modules.txt. - current_modules = [ - sys.modules[module_name if module_name in sys.modules else 'switch_model.' + module_name] - for module_name in iterate_modules[depth]] + current_modules = [] + for module_name in m.iterate_modules[depth]: + try: + current_modules.append(sys.modules[module_name]) + except KeyError: + raise ValueError( + "Module {} specified in iterate.txt has not been loaded. " + "It should be added to modules.txt as well.".format(module_name) + ) j = 0 converged = False @@ -386,24 +493,33 @@ def iterate(m, iterate_modules, depth=0): m.iteration_number = j m.iteration_node = m.iteration_node[:depth] + (j,) for module in current_modules: - converged = iterate_module_func(m, module, 'pre_iterate', converged) + converged = iterate_module_func(m, module, "pre_iterate", converged) # converge the deeper-level modules, if any (inner loop) - iterate(m, iterate_modules, depth=depth+1) + iterate(m, depth=depth + 1) # post-iterate modules at this level - m.iteration_number = j # may have been changed during iterate() + m.iteration_number = j # may have been changed during iterate() m.iteration_node = m.iteration_node[:depth] + (j,) for module in current_modules: - converged = iterate_module_func(m, module, 'post_iterate', converged) + converged = iterate_module_func(m, module, "post_iterate", converged) j += 1 if converged: - print("Iteration of {ms} was completed after {j} rounds.".format(ms=iterate_modules[depth], j=j)) + m.logger.info( + "Iteration of {ms} was completed after {j} rounds.".format( + ms=m.iterate_modules[depth], j=j + ) + ) else: - print("Iteration of {ms} was stopped after {j} iterations without convergence.".format(ms=iterate_modules[depth], j=j)) + m.logger.info( + "Iteration of {ms} was stopped after {j} iterations without convergence.".format( + ms=m.iterate_modules[depth], j=j + ) + ) return + def iterate_module_func(m, module, func, converged): """Call function func() in specified module (if available) and use the result to adjust model convergence status. If func doesn't exist or returns None, convergence @@ -431,128 +547,314 @@ def define_arguments(argparser): # iteration options argparser.add_argument( - "--iterate-list", default=None, - help="Text file with a list of modules to iterate until converged (default is iterate.txt); " - "each row is one level of iteration, and there can be multiple modules on each row" + "--iterate-list", + dest="iterate_list", + default=None, + help=""" + Text file with a list of modules to iterate until converged + (default is iterate.txt). Each row is one level of iteration, and + there can be multiple modules on each row. + """, ) argparser.add_argument( - "--max-iter", type=int, default=None, - help="Maximum number of iterations to complete at each level for iterated models" + "--max-iter", + dest="max_iter", + type=int, + default=None, + help=""" + Maximum number of iterations to complete at each level for iterated + models + """, ) # scenario information argparser.add_argument( - "--scenario-name", default="", help="Name of research scenario represented by this model" + "--scenario-name", + dest="scenario_name", + default="", + help="Name of research scenario represented by this model", + ) + + # flag for output; used by many modules so we define it here + argparser.add_argument( + "--sorted-output", + default=False, + action="store_true", + dest="sorted_output", + help=( + "Sort result files lexicographically. Otherwise results are " + "written in the same order as the input data (with Pyomo 5.7+) or " + "in random order (with earlier versions of Pyomo)." + ), ) # note: pyomo has a --solver-suffix option but it is not clear # whether that does the same thing as --suffix defined here, # so we don't reuse the same name. - argparser.add_argument("--suffixes", "--suffix", nargs="+", action='extend', default=[], - help="Extra suffixes to add to the model and exchange with the solver (e.g., iis, rc, dual, or slack)") + argparser.add_argument( + "--suffixes", + "--suffix", + dest="suffixes", + nargs="+", + action="extend", + default=[], + help=""" + Extra suffixes to add to the model and exchange with the solver + (e.g., iis, rc, dual, or slack) + """, + ) # Define solver-related arguments # These are a subset of the arguments offered by "pyomo solve --solver=cplex --help" - argparser.add_argument("--solver", default="glpk", - help='Name of Pyomo solver to use for the model (default is "glpk")') - argparser.add_argument("--solver-manager", default="serial", - help='Name of Pyomo solver manager to use for the model ("neos" to use remote NEOS server)') - argparser.add_argument("--solver-io", default=None, help="Method for Pyomo to use to communicate with solver") + argparser.add_argument( + "--solver", + default="glpk", + help='Name of Pyomo solver to use for the model (default is "glpk")', + ) + argparser.add_argument( + "--solver-manager", + dest="solver_manager", + default="serial", + help=""" + Name of Pyomo solver manager to use for the model ("neos" to use + remote NEOS server) + """, + ) + argparser.add_argument( + "--solver-io", + dest="solver_io", + default=None, + help="Method for Pyomo to use to communicate with solver", + ) # note: pyomo has a --solver-options option but it is not clear # whether that does the same thing as --solver-options-string so we don't reuse the same name. - argparser.add_argument("--solver-options-string", default=None, - help='A quoted string of options to pass to the model solver. Each option must be of the form option=value. ' - '(e.g., --solver-options-string "mipgap=0.001 primalopt=\'\' advance=2 threads=1")') - argparser.add_argument("--keepfiles", action='store_true', default=None, - help="Keep temporary files produced by the solver (may be useful with --symbolic-solver-labels)") argparser.add_argument( - "--stream-output", "--stream-solver", action='store_true', dest="tee", default=None, - help="Display information from the solver about its progress (usually combined with a suitable --solver-options-string)") + "--solver-options-string", + dest="solver_options_string", + default=None, + help=""" + A quoted string of options to pass to the model solver. Each option + must be of the form option=value. (e.g., --solver-options-string + "mipgap=0.001 primalopt='' advance=2 threads=1") + """, + ) argparser.add_argument( - "--no-stream-output", "--no-stream-solver", action='store_false', dest="tee", default=None, - help="Don't display information from the solver about its progress") + "--keepfiles", + action="store_true", + default=None, + help=""" + Keep temporary files produced by the solver (may be useful with + --symbolic-solver-labels) + """, + ) argparser.add_argument( - "--symbolic-solver-labels", action='store_true', default=None, - help='Use symbol names derived from the model when interfacing with the solver. ' - 'See "pyomo solve --solver=x --help" for more details.') - argparser.add_argument("--tempdir", default=None, - help='The name of a directory to hold temporary files produced by the solver. ' - 'This is usually paired with --keepfiles and --symbolic-solver-labels.') + "--stream-output", + "--stream-solver", + action="store_true", + dest="tee", + default=None, + help=""" + Display information from the solver about its progress (usually + combined with a suitable --solver-options-string) + """, + ) argparser.add_argument( - '--retrieve-cplex-mip-duals', default=False, action='store_true', - help=( - "Patch Pyomo's solver script for cplex to re-solve and retrieve " - "dual values for mixed-integer programs." - ) + "--no-stream-output", + "--no-stream-solver", + action="store_false", + dest="tee", + default=None, + help="Don't display information from the solver about its progress", + ) + argparser.add_argument( + "--symbolic-solver-labels", + action="store_true", + dest="symbolic_solver_labels", + default=None, + help=""" + Use symbol names derived from the model when interfacing with the + solver. See "pyomo solve --solver=x --help" for more details. + """, + ) + argparser.add_argument( + "--tempdir", + default=None, + help=""" + The name of a directory to hold temporary files produced by the + solver. This is usually paired with --keepfiles and + --symbolic-solver-labels. + """, + ) + argparser.add_argument( + "--retrieve-cplex-mip-duals", + dest="retrieve_cplex_mip_duals", + default=False, + action="store_true", + help=""" + Patch Pyomo's solver script for cplex to re-solve and retrieve dual + values for mixed-integer programs. + """, ) + # General purpose arguments # NOTE: the following could potentially be made into standard arguments for all models, # e.g. by defining them in a define_standard_arguments() function in switch.utilities.py # Define input/output options # note: --inputs-dir is defined in add_module_args, because it may specify the # location of the module list (deprecated) - # argparser.add_argument("--inputs-dir", default="inputs", - # help='Directory containing input files (default is "inputs")') - argparser.add_argument( - "--input-alias", "--input-aliases", dest="input_aliases", nargs='+', default=[], - help='List of input file substitutions, in form of standard_file.csv=alternative_file.csv, ' - 'useful for sensitivity studies with different inputs.') - argparser.add_argument("--outputs-dir", default="outputs", - help='Directory to write output files (default is "outputs")') - - # General purpose arguments argparser.add_argument( - '--verbose', '-v', dest='verbose', default=False, action='store_true', - help='Show information about model preparation and solution') + "--input-alias", + "--input-aliases", + dest="input_aliases", + nargs="+", + default=[], + action="extend", + help=""" + List of input file substitutions, in form of + standard_file.csv=alternative_file.csv, useful for sensitivity + studies with alternative inputs. + """, + ) argparser.add_argument( - '--quiet', '-q', dest='verbose', action='store_false', - help="Don't show information about model preparation and solution (cancels --verbose setting)") + "--outputs-dir", + default="outputs", + help='Directory to write output files (default is "outputs")', + ) argparser.add_argument( - '--no-post-solve', default=False, action='store_true', - help="Don't run post-solve code on the completed model (i.e., reporting functions).") + "--no-post-solve", + default=False, + action="store_true", + help=""" + Don't run post-solve code on the completed model (i.e., reporting + functions). + """, + ) argparser.add_argument( - '--reload-prior-solution', default=False, action='store_true', - help='Load a previously saved solution; useful for re-running post-solve code or interactively exploring the model (via --interact).') + "--reload-prior-solution", + default=False, + action="store_true", + help=""" + Load a previously saved solution; useful for re-running + post-solve code or interactively exploring the model (with + --interact). + """, + ) argparser.add_argument( - '--no-save-solution', default=False, action='store_true', - help="Don't save solution after model is solved.") + "--no-save-solution", + default=False, + action="store_true", + help="Don't save solution after model is solved.", + ) argparser.add_argument( - '--interact', default=False, action='store_true', - help='Enter interactive shell after solving the instance to enable inspection of the solved model.') + "--interact", + default=False, + action="store_true", + help=""" + Enter interactive shell after solving the instance to enable + inspection of the solved model. + """, + ) + if has_ipython: + argparser.add_argument( + "--interact-color", + dest="interact_color", + default="NoColor", + choices=["NoColor", "LightBG", "Linux"], + help="Color scheme to use with the IPython interactive shell.", + ) def add_module_args(parser): parser.add_argument( - "--module-list", default=None, - help='Text file with a list of modules to include in the model (default is "modules.txt")' + "--module-list", + default=None, + help='Text file with a list of modules to include in the model (default is "modules.txt")', ) parser.add_argument( - "--include-modules", "--include-module", dest="include_exclude_modules", nargs='+', - action='include', default=[], - help="Module(s) to add to the model in addition to any specified with --module-list file" + "--include-modules", + "--include-module", + dest="include_exclude_modules", + nargs="+", + action="include", + default=[], + help="Module(s) to add to the model in addition to any specified with --module-list file", ) parser.add_argument( - "--exclude-modules", "--exclude-module", dest="include_exclude_modules", nargs='+', - action='exclude', default=[], - help="Module(s) to remove from the model after processing --module-list file and prior --include-modules arguments" + "--exclude-modules", + "--exclude-module", + dest="include_exclude_modules", + nargs="+", + action="exclude", + default=[], + help="Module(s) to remove from the model after processing " + "--module-list file and prior --include-modules arguments", ) # note: we define --inputs-dir here because it may be used to specify the location of # the module list, which is needed before it is loaded. - parser.add_argument("--inputs-dir", default="inputs", - help='Directory containing input files (default is "inputs")') + parser.add_argument( + "--inputs-dir", + default="inputs", + help='Directory containing input files (default is "inputs")', + ) def add_pre_module_args(parser): """ Add arguments needed before any modules are loaded. """ - parser.add_argument("--log-run", dest="log_run_to_file", default=False, action="store_true", - help="Log output to a file.") - parser.add_argument("--logs-dir", dest="logs_dir", default="logs", - help='Directory containing log files (default is "logs"') - parser.add_argument("--debug", action="store_true", default=False, - help='Automatically start pdb debugger on exceptions') + parser.add_argument( + "--log-run", + dest="log_run_to_file", + default=False, + action="store_true", + help="Log output to a file.", + ) + parser.add_argument( + "--logs-dir", + dest="logs_dir", + default="logs", + help='Directory containing log files (default is "logs"', + ) + + # Standard logging levels from + # https://docs.python.org/3/library/logging.html#levels + # Code should use logger.warn() for errors that can be recovered from, + # logger.info() for high-level sequence-of-events reporting and + # logger.debug() for detailed diagnostic information. + # logger.error() should be used to explain an error in more detail if + # needed at the same time as the code raises an exception. + parser.add_argument( + "--log-level", + dest="log_level", + default="warning", + choices=["error", "warning", "info", "debug"], + help="Amount of detail to include in on-screen logging and log files. " + 'Default is "warning".', + ) + # Older logging flags are retained for now to avoid disruption. They may be + # deprecated later. + parser.add_argument( + "--verbose", + dest="log_level", + action="store_const", + const="info", + help="Older logging flag; equivalent to --log-level info", + ) + parser.add_argument( + "--quiet", + dest="log_level", + action="store_const", + const="warning", + help="Older logging flag; equivalent to --log-level warning", + ) + + parser.add_argument( + "--debug", + action="store_true", + default=False, + help="Automatically start pdb debugger on exceptions", + ) def parse_pre_module_options(args): @@ -566,6 +868,15 @@ def parse_pre_module_options(args): return pre_module_args +def parse_list_file(file): + """Read all items from `file` into a list, removing white space at either + end of line, blank lines and anything after "#" """ + with open(file) as f: + items = [r.split("#", 1)[0].strip() for r in f.read().splitlines()] + items = [i for i in items if i] + return items + + def get_module_list(args): # parse module options parser = _ArgumentParser(allow_abbrev=False, add_help=False) @@ -586,75 +897,88 @@ def get_module_list(args): if module_list_file is None: # note: this could be a RuntimeError, but then users can't do "switch solve --help" in a random directory # (alternatively, we could provide no warning at all, since the user can specify --include-modules in the arguments) - print("WARNING: No module list found. Please create a modules.txt file with a list of modules to use for the model.") + print( + "WARNING: No module list found. Please create a modules.txt file with a list of modules to use for the model." + ) modules = [] else: # if it exists, the module list contains one module name per row (no .py extension) # we strip whitespace from either end (because those errors can be annoyingly hard to debug). - # We also omit blank lines and lines that start with "#" + # We also omit blank lines and anything after "#". # Otherwise take the module names as given. - with open(module_list_file) as f: - modules = [r.strip() for r in f.read().splitlines()] - modules = [m for m in modules if m and not m.startswith("#")] + modules = parse_list_file(module_list_file) # adjust modules as requested by the user # include_exclude_modules format: [('include', [mod1, mod2]), ('exclude', [mod3])] for action, mods in module_options.include_exclude_modules: - if action == 'include': + if action == "include": for module_name in mods: - if module_name not in modules: # maybe we should raise an error if already present? + if ( + module_name not in modules + ): # maybe we should raise an error if already present? modules.append(module_name) - if action == 'exclude': + if action == "exclude": for module_name in mods: try: modules.remove(module_name) except ValueError: - raise ValueError( # maybe we should just pass? - 'Unable to exclude module {} because it was not ' - 'previously included.'.format(module_name) + raise ValueError( # maybe we should just pass? + "Unable to exclude module {} because it was not " + "previously included.".format(module_name) ) - # add this module, since it has callbacks, e.g. define_arguments for iteration and suffixes - modules.append("switch_model.solve") + # add this module, since it has callbacks, e.g. define_arguments for + # iteration and suffixes + modules.append(__name__) return modules def get_iteration_list(m): # Identify modules to iterate until convergence (if any) - iterate_list_file = m.options.iterate_list + try: + iterate_list_file = m.options.iterate_list + except AttributeError as e: + # the --iterate-list option is defined in this module, but sometimes + # this module will not be in the module list (e.g., for small test + # models) so it will not be defined. In those cases, we assume no + # iteration should be done, rather than trying to read the default + # iteration file. (We could change this to use the default file later + # if needed.) + return [] if iterate_list_file is None and os.path.exists("iterate.txt"): iterate_list_file = "iterate.txt" if iterate_list_file is None: iterate_modules = [] else: - with open(iterate_list_file) as f: - iterate_rows = f.read().splitlines() - iterate_rows = [r.strip() for r in iterate_rows] - iterate_rows = [r for r in iterate_rows if r and not r.startswith("#")] + iterate_rows = parse_list_file(iterate_list_file) # delimit modules at the same level with space(s), tab(s) or comma(s) iterate_modules = [re.sub("[ \t,]+", " ", r).split(" ") for r in iterate_rows] return iterate_modules -def get_option_file_args(dir='.', extra_args=[]): +def get_option_file_args(dir=".", extra_args=[]): + """ + Retrieve base arguments from options.txt (if present). These can be on + multiple lines to ease editing, and comments starting with "#" (possibly + mid-line) will be ignored. + """ args = [] - # retrieve base arguments from options.txt (if present) - # note: these can be on multiple lines to ease editing, - # and lines can be commented out with # options_path = os.path.join(dir, "options.txt") if os.path.exists(options_path): with open(options_path) as f: base_options = f.read().splitlines() for r in base_options: - if not r.lstrip().startswith("#"): - args.extend(shlex.split(r)) + args.extend(shlex.split(r, comments=True)) + args.extend(extra_args) return args + # Generic argument-related code; could potentially be moved to utilities.py # if we want to make these standard parts of Switch. + def add_extra_suffixes(model): """ Add any suffix objects requested in the configuration options. @@ -675,17 +999,22 @@ def solve(model): # with its own solver object (e.g., with runph or a parallel solver server). # In those cases, we don't want to go through the expense of creating an # unused solver object, or get errors if the solver options are invalid. - model.solver = SolverFactory(model.options.solver, solver_io=model.options.solver_io) + model.solver = SolverFactory( + model.options.solver, solver_io=model.options.solver_io + ) # patch for Pyomo < 4.2 # note: Pyomo added an options_string argument to solver.solve() in Pyomo 4.2 rev 10587. # (See https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10587 ) # This is misreported in the documentation as options=, but options= actually accepts a dictionary. - if model.options.solver_options_string and not hasattr(model.solver, "_options_string_to_dict"): - for k, v in _options_string_to_dict(model.options.solver_options_string).items(): + if model.options.solver_options_string and not hasattr( + model.solver, "_options_string_to_dict" + ): + for k, v in _options_string_to_dict( + model.options.solver_options_string + ).items(): model.solver.options[k] = v - # import pdb; pdb.set_trace() model.solver_manager = SolverManagerFactory(model.options.solver_manager) # get solver arguments @@ -693,15 +1022,13 @@ def solve(model): options_string=model.options.solver_options_string, keepfiles=model.options.keepfiles, tee=model.options.tee, - symbolic_solver_labels=model.options.symbolic_solver_labels + symbolic_solver_labels=model.options.symbolic_solver_labels, ) # drop all the unspecified options solver_args = {k: v for (k, v) in solver_args.items() if v is not None} # Automatically send all defined suffixes to the solver - solver_args["suffixes"] = [ - c.name for c in model.component_objects(ctype=Suffix) - ] + solver_args["suffixes"] = [c.name for c in model.component_objects(ctype=Suffix)] # note: the next few lines are faster than the line above, but seem risky: # i = m._ctypes.get(Suffix, [None])[0] @@ -716,69 +1043,110 @@ def solve(model): # patch Pyomo to retrieve MIP duals from cplex if needed if model.options.retrieve_cplex_mip_duals: - retrieve_cplex_mip_duals() + retrieve_cplex_mip_duals(model) # solve the model - if model.options.verbose: - timer = StepTimer() - print("Solving model...") + timer = StepTimer() + model.logger.info("Solving model...") if model.options.tempdir is not None: # from https://software.sandia.gov/downloads/pub/pyomo/PyomoOnlineDocs.html#_changing_the_temporary_directory from pyutilib.services import TempfileManager + TempfileManager.tempdir = model.options.tempdir - results = model.solver_manager.solve(model, opt=model.solver, **solver_args) - #import pdb; pdb.set_trace() + try: + results = model.solver_manager.solve(model, opt=model.solver, **solver_args) + except ValueError as err: + # show the solver status for obscure errors if possible + model.logger.error("\nError during solve:\n") + try: + model.logger.error(err.__traceback__.tb_frame.f_locals["results"]) + except: + pass + raise - if model.options.verbose: - print("Solved model. Total time spent in solver: {:2f} s.".format(timer.step_time())) + model.logger.info( + f"Solved model. Total time spent in solver: {timer.step_time():2f} s." + ) # Treat infeasibility as an error, rather than trying to load and save the results # (note: in this case, results.solver.status may be SolverStatus.warning instead of # SolverStatus.error) - if (results.solver.termination_condition == TerminationCondition.infeasible): + infeasibility_message = ( + "You can identify infeasible constraints by adding " + "switch_model.balancing.diagnose_infeasibility to the module list and " + "solving again." + "\n\nAlternatively, if the solver can generate an irreducibly " + "inconsistent set (IIS), more information may be available by setting " + "the appropriate flags in the --solver-options-string and then calling " + 'this script with "--suffixes iis".\n' + ) + + if results.solver.termination_condition == TerminationCondition.infeasible: if hasattr(model, "iis"): - print("Model was infeasible; irreducibly inconsistent set (IIS) returned by solver:") - print("\n".join(sorted(c.name for c in model.iis))) + model.logger.error( + "Model was infeasible; irreducibly inconsistent set (IIS) returned by solver:" + ) + model.logger.error("\n".join(sorted(c.name for c in model.iis))) else: - print("Model was infeasible; if the solver can generate an irreducibly inconsistent set (IIS),") - print("more information may be available by setting the appropriate flags in the ") - print('solver_options_string and calling this script with "--suffixes iis".') + model.logger.error("Model was infeasible. " + infeasibility_message) + + # This infeasibility logging module could be nice, but it doesn't work + # for my solvers and produces extraneous messages. + # import pyomo.util.infeasible + # pyomo.util.infeasible.log_infeasible_constraints(model) raise RuntimeError("Infeasible model") # Raise an error if the solver failed to produce a solution # Note that checking for results.solver.status in {SolverStatus.ok, # SolverStatus.warning} is not enough because with a warning there will # sometimes be a solution and sometimes not. - # Note: the results object originally contains values for model components - # in results.solution.variable, etc., but pyomo.solvers.solve erases it via - # result.solution.clear() after calling model.solutions.load_from() with it. - # load_from() loads values into the model.solutions._entry, so we check there. - # (See pyomo.PyomoModel.ModelSolutions.add_solution() for the code that - # actually creates _entry). - # Another option might be to check that model.solutions[-1].status (previously - # result.solution.status, but also cleared) is in - # pyomo.opt.SolutionStatus.['optimal', 'bestSoFar', 'feasible', 'globallyOptimal', 'locallyOptimal'], - # but this seems pretty foolproof (if undocumented). - if len(model.solutions[-1]._entry['variable']) == 0: + + try: + # pyomo 5.2, maybe earlier or later + no_solution = len(model.solutions.solutions) == 0 + solution_status = "unavailable" + except AttributeError: + # other pyomo version (4.4 or 5.6.8?) + # Note: the results object originally contains values for model components + # in results.solution.variable, etc., but pyomo.solvers.solve erases it via + # result.solution.clear() after calling model.solutions.load_from() with it. + # load_from() loads values into the model.solutions._entry, so we check there. + # (See pyomo.PyomoModel.ModelSolutions.add_solution() for the code that + # actually creates _entry). + # Another option might be to check that model.solutions[-1].status (previously + # result.solution.status, but also cleared) is in + # pyomo.opt.SolutionStatus.['optimal', 'bestSoFar', 'feasible', 'globallyOptimal', 'locallyOptimal'], + # but this seems pretty foolproof (if undocumented). + no_solution = len(model.solutions[-1]._entry["variable"]) == 0 + solution_status = model.solutions[-1].status + + if no_solution: # no solution returned - print("Solver terminated without a solution.") - print(" Solver Status: ", results.solver.status) - print(" Solution Status: ", model.solutions[-1].status) - print(" Termination Condition: ", results.solver.termination_condition) - if model.options.solver == 'glpk' and results.solver.termination_condition == TerminationCondition.other: - print("Hint: glpk has been known to classify infeasible problems as 'other'.") + model.logger.error("Solver terminated without a solution.") + model.logger.error(" Solver Status: ", results.solver.status) + model.logger.error(" Solution Status: ", solution_status) + model.logger.error(" Termination Condition: ", results.solver.termination_condition) + if ( + model.options.solver == "glpk" + and results.solver.termination_condition == TerminationCondition.other + ): + model.logger.error( + "Hint: glpk has been known to classify infeasible problems as 'other'." + ) + model.logger.error(infeasibility_message) raise RuntimeError("Solver failed to find an optimal solution.") # Report any warnings; these are written to stderr so users can find them in # error logs (e.g. on HPC systems). These can occur, e.g., if solver reaches # time limit or iteration limit but still returns a valid solution if results.solver.status == SolverStatus.warning: - warn( + model.logger.warning( "Solver terminated with warning.\n" - + " Solution Status: {}\n".format(model.solutions[-1].status) - + " Termination Condition: {}".format(results.solver.termination_condition) + f" Solver Status: {results.solver.status}\n" + f" Solution Status: {model.solutions[-1].status}\n" + f" Termination Condition: {results.solver.termination_condition}" ) ### process and return solution ### @@ -788,39 +1156,84 @@ def solve(model): model.last_results = results return results -def retrieve_cplex_mip_duals(): + +instance_number = 0 +instance_number_lock = threading.Lock() + + +def make_logger(parsed_args): + """ + Create a unique logger to attach to a model instance. + + This module may be kept in memory and used to create multiple instances with + different logging settings (e.g., via switch solve-scenarios), so we need to + create a unique logger for each model. This is also used by solve_scenarios + to create a logger for its own output. + """ + global instance_number + # Create a unique name to avoid reloading a logger created in a previous + # call to logging.getLogger. This name only needs to be unique within this + # process because if users call this function in separate processes they + # will not see the loggers each other have created (logging module is not + # multiprocessing-aware). So process-level locking is adequate. + with instance_number_lock: + instance_number += 1 + if instance_number == 1: + # typical case, solving one model and quitting + instance_name = "Switch" + else: + instance_name = "Switch instance {}".format(instance_number) + logger = logging.getLogger(instance_name) + # Follow user-specified logging level (converted to standard key) + logger.setLevel(parsed_args.log_level.upper()) + # Always log to stdout (not stderr) + logger.addHandler(logging.StreamHandler(sys.stdout)) + return logger + + +def retrieve_cplex_mip_duals(model): """patch Pyomo's solver to retrieve duals and reduced costs for MIPs from cplex lp solver. (This could be made permanent in pyomo.solvers.plugins.solvers.CPLEX.create_command_line).""" from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL + old_create_command_line = CPLEXSHELL.create_command_line + logger = model.logger + def new_create_command_line(*args, **kwargs): # call original command command = old_create_command_line(*args, **kwargs) # alter script - if hasattr(command, 'script') and 'optimize\n' in command.script: + if ( + hasattr(command, "script") + and "optimize\n" in command.script + and not "change problem fix\n" in command.script + ): command.script = command.script.replace( - 'optimize\n', - 'optimize\nchange problem fix\noptimize\n' + "optimize\n", + "optimize\nchange problem fix\noptimize\n" # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 ) - print("changed CPLEX solve script to the following:") - print(command.script) + logger.info("changed CPLEX solve script to the following:") + logger.info(command.script) else: - print ( + logger.warning( "Unable to patch CPLEX solver script to retrieve duals " "for MIP problems" ) return command + new_create_command_line.is_patched = True - if not getattr(CPLEXSHELL.create_command_line, 'is_patched', False): + if not getattr(CPLEXSHELL.create_command_line, "is_patched", False): CPLEXSHELL.create_command_line = new_create_command_line # taken from https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10784 # This can be removed when all users are on Pyomo 4.2 import pyutilib + + def _options_string_to_dict(istr): ans = {} istr = istr.strip() @@ -828,19 +1241,21 @@ def _options_string_to_dict(istr): return ans if istr[0] == "'" or istr[0] == '"': istr = eval(istr) - tokens = pyutilib.misc.quote_split('[ ]+',istr) + tokens = pyutilib.misc.quote_split("[ ]+", istr) for token in tokens: - index = token.find('=') + index = token.find("=") if index == -1: raise ValueError( - "Solver options must have the form option=value: '{}'".format(istr)) + "Solver options must have the form option=value: '{}'".format(istr) + ) try: - val = eval(token[(index+1):]) + val = eval(token[(index + 1) :]) except: - val = token[(index+1):] + val = token[(index + 1) :] ans[token[:index]] = val return ans + def save_results(instance, outdir): """ Save model solution for later reuse. @@ -852,13 +1267,12 @@ def save_results(instance, outdir): # First, save the full solution data to the results object, because recent # versions of Pyomo only store execution metadata there by default. instance.solutions.store_to(instance.last_results) - with open(os.path.join(outdir, 'results.pickle'), 'wb') as fh: + with open(os.path.join(outdir, "results.pickle"), "wb") as fh: pickle.dump(instance.last_results, fh, protocol=-1) # remove the solution from the results object, to minimize long-term memory use instance.last_results.solution.clear() - def query_yes_no(question, default="yes"): """Ask a yes/no question via input() and return their answer. @@ -869,8 +1283,7 @@ def query_yes_no(question, default="yes"): The "answer" return value is True for "yes" or False for "no". """ - valid = {"yes": True, "y": True, "ye": True, - "no": False, "n": False} + valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": @@ -883,13 +1296,42 @@ def query_yes_no(question, default="yes"): while True: sys.stdout.write(question + prompt) choice = input().lower() - if default is not None and choice == '': + if default is not None and choice == "": return valid[default] elif choice in valid: return valid[choice] else: - sys.stdout.write("Please respond with 'yes' or 'no' " - "(or 'y' or 'n').\n") + sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") + + +def report_model_in_traceback(tb): + """ + Report on location of model in current traceback, if one can be found easily. + """ + import traceback + + for level, (frame, line) in enumerate(reversed(list(traceback.walk_tb(tb)))): + file_loc = "{}, line {}".format(frame.f_code.co_filename, line) + if level == 0: + location = "in the current frame" + elif level == 1: + location = "in\n{}\n(1 level up)".format(file_loc) + else: + location = "in\n{}\n({} levels up)".format(file_loc, level) + vars = frame.f_locals + for name, v in vars.items(): + if isinstance(v, Model): + print( + "\nA model can be found in variable '{}' {}".format(name, location) + ) + return + for name, v in vars.items(): + if isinstance(v, Component) and hasattr(v, "model"): + print( + "\nA model can be found in '{}.model()' {}".format(name, location) + ) + return + print("\nNo Pyomo model was found in the current stack trace.") ############### diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py index fb4a0dcc2..f7b3f3029 100755 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """Scenario management module. @@ -20,7 +20,7 @@ from __future__ import print_function, absolute_import import sys, os, time -import argparse, shlex, socket, io, glob +import argparse, shlex, socket, io, glob, multiprocessing from collections import OrderedDict from .utilities import _ArgumentParser @@ -33,26 +33,37 @@ cmd_line_args = sys.argv[1:] # Parse scenario-manager-related command-line arguments. -# Other command-line arguments will be passed through to solve.py via scenario_cmd_line_args +# Other command-line arguments will be passed through to solve.py via +# scenario_cmd_line_args parser = _ArgumentParser( - allow_abbrev=False, description='Solve one or more Switch scenarios.' + allow_abbrev=False, description="Solve one or more Switch scenarios." ) parser.add_argument( - '--scenario', '--scenarios', nargs='+', dest='scenarios', - default=[], action='extend' + "--scenario", + "--scenarios", + nargs="+", + dest="scenarios", + default=[], + action="extend", ) -#parser.add_argument('--scenarios', nargs='+', default=[]) parser.add_argument("--scenario-list", default="scenarios.txt") parser.add_argument("--scenario-queue", default="scenario_queue") parser.add_argument("--job-id", default=None) # import pdb; pdb.set_trace() # get a namespace object with successfully parsed scenario manager arguments -scenario_manager_args = parser.parse_known_args(args=option_file_args + cmd_line_args)[0] +scenario_manager_args = parser.parse_known_args(args=option_file_args + cmd_line_args)[ + 0 +] # get lists of other arguments to pass through to standard solve routine scenario_option_file_args = parser.parse_known_args(args=option_file_args)[1] scenario_cmd_line_args = parser.parse_known_args(args=cmd_line_args)[1] +# create a logger for this module's output based on default arguments +logger = solve.make_logger( + solve.parse_pre_module_options(scenario_option_file_args + scenario_cmd_line_args) +) + requested_scenarios = scenario_manager_args.scenarios scenario_list_file = scenario_manager_args.scenario_list scenario_queue_dir = scenario_manager_args.scenario_queue @@ -72,11 +83,11 @@ # If a job id is not specified, interrupted jobs will not be restarted. job_id = scenario_manager_args.job_id if job_id is None: - job_id = os.environ.get('SWITCH_JOB_ID') + job_id = os.environ.get("SWITCH_JOB_ID") if job_id is None: # this cannot be running in parallel with another task with the same pid on # the same host, so it's safe to requeue any jobs with this id - job_id = socket.gethostname() + '_' + str(os.getpid()) + job_id = socket.gethostname() + "_" + str(os.getpid()) # TODO: other options for requeueing jobs: # - use file locks on lockfiles: lock a @@ -127,12 +138,13 @@ # the DB), so users can restart scenarios by deleting the 'done' file. # But this requires synchronized clocks across workers... -running_scenarios_file = os.path.join(scenario_queue_dir, job_id+"_running.txt") +running_scenarios_file = os.path.join(scenario_queue_dir, job_id + "_running.txt") # list of scenarios currently being run by this job (always just one with the current code) running_scenarios = [] -#import pdb; pdb.set_trace() +# import pdb; pdb.set_trace() + def main(args=None): # make sure the scenario_queue_dir exists (marginally better to do this once @@ -140,14 +152,14 @@ def main(args=None): try: os.makedirs(scenario_queue_dir) except OSError: - pass # directory probably exists already + pass # directory probably exists already # remove lock directories for any scenarios that were # previously being solved by this job but were interrupted unlock_running_scenarios() for (scenario_name, args) in scenarios_to_run(): - print( + logger.warn( # not strictly a warning, but often nice to see in the log "\n\n=======================================================================\n" + "running scenario {s}\n".format(s=scenario_name) + "arguments: {}\n".format(args) @@ -155,15 +167,38 @@ def main(args=None): ) # call the standard solve module with the arguments for this particular scenario - solve.main(args=args) - - # another option: + # We run this in its own process to avoid sharing module state info between + # model instances (e.g., a logger created in Pyomo may grab the current sys.stdout + # while Switch has temporarily replaced it with a timing counter stream, then + # keep using that for subsequent instances) + process = multiprocessing.Process(target=run_scenario, args=(args,)) + process.start() + process.join() + + # other options: + # solve.main(args) + # or # subprocess.call(shlex.split("python -m solve") + args) <- omit args from options.txt # it should also be possible to use a solver server, but that's not really needed # since this script has built-in queue management. mark_completed(scenario_name) +def run_scenario(args): + # reactivate stdin in subprocess + # from https://stackoverflow.com/questions/30134297/python-multiprocessing-stdin-input + # also see refs to stdin in https://docs.python.org/3/library/multiprocessing.html + sys.stdin = os.fdopen(0) + try: + solve.main(args) + except: + # code run in a subprocess never has an uncaught exception, so the + # excepthook never gets run, which makes it impossible to use the + # --debug flag. So we call the excepthook (possibly set by solve.main) + # directly. + sys.excepthook(*sys.exc_info()) + + def scenarios_to_run(): """Generator function which returns argument lists for each scenario that should be run. @@ -180,13 +215,17 @@ def scenarios_to_run(): # just run them in the order specified, with no queue-management for scenario_name in requested_scenarios: completed = False - scenario_args = scenario_option_file_args + get_scenario_dict()[scenario_name] + scenario_cmd_line_args + scenario_args = ( + scenario_option_file_args + + get_scenario_dict()[scenario_name] + + scenario_cmd_line_args + ) # flag the scenario as being run; then run it whether or not it was previously run checkout(scenario_name, force=True) yield (scenario_name, scenario_args) # no more scenarios to run return - else: # no specific scenarios requested + else: # no specific scenarios requested # Run every scenario in the list, with queue management # This is done by repeatedly scanning the scenario list and choosing # the first scenario that hasn't been run. This way, users can edit the @@ -199,7 +238,9 @@ def scenarios_to_run(): # This list is found by retrieving the names of the lock-directories. already_run = {f for f in os.listdir(".") if os.path.isdir(f)} for scenario_name, base_args in get_scenario_dict().items(): - scenario_args = scenario_option_file_args + base_args + scenario_cmd_line_args + scenario_args = ( + scenario_option_file_args + base_args + scenario_cmd_line_args + ) if scenario_name not in already_run and checkout(scenario_name): # run this scenario, then start again at the top of the list ran.append(scenario_name) @@ -209,16 +250,20 @@ def scenarios_to_run(): else: if scenario_name not in skipped and scenario_name not in ran: skipped.append(scenario_name) - if is_verbose(scenario_args): - print("Skipping {} because it was already run.".format(scenario_name)) + logger.info( + "Skipping {} because it was already run.".format( + scenario_name + ) + ) # move on to the next candidate # no more scenarios to run if skipped and not ran: - print( + logger.warn( "Skipping all scenarios because they have already been solved. " "If you would like to run these scenarios again, " - "please remove the {sq} directory or its contents. (rm -rf {sq})" - .format(sq=scenario_queue_dir) + "please remove the {sq} directory or its contents. (rm -rf {sq})".format( + sq=scenario_queue_dir + ) ) return @@ -231,39 +276,35 @@ def parse_arg(arg, args=sys.argv[1:], **parse_kw): # (They have no reason to set the destination anyway.) # note: we use the term "option" so that parsing errors will make a little more # sense, e.g., if users call with "--suffixes " (instead of just omitting it) - parse_kw["dest"]="option" + parse_kw["dest"] = "option" parser.add_argument(arg, **parse_kw) return parser.parse_known_args(args)[0].option + def get_scenario_name(scenario_args): # use ad-hoc parsing to extract the scenario name from a scenario-definition string return parse_arg("--scenario-name", default=None, args=scenario_args) + def last_index(lst, val): try: return len(lst) - lst[::-1].index(val) - 1 except ValueError: return -1 -def is_verbose(scenario_args): - # check options settings for --verbose flag - # we can't use parse_arg, because we need to process both --verbose and --quiet - # note: this duplicates settings in switch_model.solve, so it may fall out of date - return last_index(scenario_args, '--verbose') >= last_index(scenario_args, '--quiet') - # return parse_arg("--verbose", action='store_true', default=False, args=scenario_args) def get_scenario_dict(): - # note: we read the list from the disk each time so that we get a fresher version - # if the standard list is changed during a long solution effort. - with open(scenario_list_file, 'r') as f: - scenario_list_text = [r.strip() for r in f.read().splitlines()] - scenario_list_text = [r for r in scenario_list_text if r and not r.startswith("#")] - - # note: text.splitlines() omits newlines and ignores presence/absence of \n at end of the text - # shlex.split() breaks an command-line-style argument string into a list like sys.argv - scenario_list = [shlex.split(r) for r in scenario_list_text] + # note: we read the list from the disk each time so that we get a fresher + # version if the standard list is changed during a long solution effort. + # This ignores comments in the scenario list (possibly starting mid-line), + # just like switch solve does in options.txt. + with open(scenario_list_file, "r") as f: + scenario_list = [shlex.split(r, comments=True) for r in f.read().splitlines()] + # drop any empty lines + scenario_list = [s for s in scenario_list if s] return OrderedDict((get_scenario_name(s), s) for s in scenario_list) + def checkout(scenario_name, force=False): # write a flag that we are solving this scenario, before actually trying to lock it # this way, if the job gets interrupted in the middle of this function, the @@ -277,7 +318,7 @@ def checkout(scenario_name, force=False): os.mkdir(os.path.join(scenario_queue_dir, scenario_name)) locked = True except OSError as e: - if e.errno != 17: # File exists + if e.errno != 17: # File exists raise locked = False if locked or force: @@ -288,6 +329,7 @@ def checkout(scenario_name, force=False): write_running_scenarios_file() return False + def mark_completed(scenario_name): # remove the scenario from the list of running scenarios (since it's been completed now) running_scenarios.remove(scenario_name) @@ -295,6 +337,7 @@ def mark_completed(scenario_name): # note: the scenario lock directory is left in place so the scenario won't get checked # out again + def write_running_scenarios_file(): # write the list of scenarios currently being run by this job to disk # so they can be released back to the queue if the job is interrupted and restarted @@ -307,16 +350,17 @@ def write_running_scenarios_file(): # done that actually haven't.) flags = "r+" if os.path.exists(running_scenarios_file) else "w" with open(running_scenarios_file, flags) as f: - f.write("\n".join(running_scenarios)+"\n") + f.write("\n".join(running_scenarios) + "\n") f.truncate() else: # remove the running_scenarios_file entirely if it would be empty try: os.remove(running_scenarios_file) except OSError as e: - if e.errno != 2: # no such file + if e.errno != 2: # no such file raise + def unlock_running_scenarios(): # called during startup to remove lockfiles for any scenarios that were still running # when this job was interrupted @@ -327,9 +371,10 @@ def unlock_running_scenarios(): try: os.rmdir(os.path.join(scenario_queue_dir, scenario_name)) except OSError as e: - if e.errno != 2: # no such file + if e.errno != 2: # no such file raise + # run the main function if called as a script if __name__ == "__main__": main() diff --git a/switch_model/test.py b/switch_model/test.py index b42692929..d679ac5a2 100644 --- a/switch_model/test.py +++ b/switch_model/test.py @@ -1,13 +1,15 @@ from __future__ import print_function -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. + +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. import sys + def main(): print("running {} as {}.".format(__file__, __name__)) print("system path:") print("\n".join(sys.path)) + if __name__ == "__main__": main() - diff --git a/switch_model/timescales.py b/switch_model/timescales.py index 82a41b944..ea4eac157 100644 --- a/switch_model/timescales.py +++ b/switch_model/timescales.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -227,24 +227,28 @@ def define_components(mod): """ - mod.PERIODS = Set(ordered=True) + mod.PERIODS = Set(dimen=1, ordered=True) mod.period_start = Param(mod.PERIODS, within=NonNegativeReals) mod.period_end = Param(mod.PERIODS, within=NonNegativeReals) - mod.min_data_check('PERIODS', 'period_start', 'period_end') + mod.min_data_check("PERIODS", "period_start", "period_end") - mod.TIMESERIES = Set(ordered=True) + mod.TIMESERIES = Set(ordered=True, dimen=1) mod.ts_period = Param(mod.TIMESERIES, within=mod.PERIODS) - mod.ts_duration_of_tp = Param(mod.TIMESERIES, within=PositiveReals) - mod.ts_num_tps = Param(mod.TIMESERIES, within=PositiveIntegers) - mod.ts_scale_to_period = Param(mod.TIMESERIES, within=PositiveReals) + mod.ts_duration_of_tp = Param(mod.TIMESERIES, within=NonNegativeReals) + mod.ts_num_tps = Param(mod.TIMESERIES, within=NonNegativeIntegers) + mod.ts_scale_to_period = Param(mod.TIMESERIES, within=NonNegativeReals) mod.min_data_check( - 'TIMESERIES', 'ts_period', 'ts_duration_of_tp', 'ts_num_tps', - 'ts_scale_to_period') + "TIMESERIES", + "ts_period", + "ts_duration_of_tp", + "ts_num_tps", + "ts_scale_to_period", + ) - mod.TIMEPOINTS = Set(ordered=True) + mod.TIMEPOINTS = Set(ordered=True, dimen=1) mod.tp_ts = Param(mod.TIMEPOINTS, within=mod.TIMESERIES) - mod.min_data_check('TIMEPOINTS', 'tp_ts') - mod.tp_timestamp = Param(mod.TIMEPOINTS, default=lambda m, t: t) + mod.min_data_check("TIMEPOINTS", "tp_ts") + mod.tp_timestamp = Param(mod.TIMEPOINTS, default=lambda m, t: t, within=Any) # Derived sets and parameters # note: the first five are calculated early so they @@ -252,34 +256,43 @@ def define_components(mod): mod.tp_duration_hrs = Param( mod.TIMEPOINTS, - initialize=lambda m, t: m.ts_duration_of_tp[m.tp_ts[t]]) + within=NonNegativeReals, + initialize=lambda m, t: m.ts_duration_of_tp[m.tp_ts[t]], + ) mod.tp_weight = Param( mod.TIMEPOINTS, - within=PositiveReals, + within=NonNegativeReals, initialize=lambda m, t: ( - m.tp_duration_hrs[t] * m.ts_scale_to_period[m.tp_ts[t]])) + m.tp_duration_hrs[t] * m.ts_scale_to_period[m.tp_ts[t]] + ), + ) + # TODO: build this in one pass, not multiple scans mod.TPS_IN_TS = Set( mod.TIMESERIES, + dimen=1, ordered=True, within=mod.TIMEPOINTS, - initialize=lambda m, ts: [ - t for t in m.TIMEPOINTS if m.tp_ts[t] == ts]) + initialize=lambda m, ts: [t for t in m.TIMEPOINTS if m.tp_ts[t] == ts], + ) mod.tp_period = Param( mod.TIMEPOINTS, within=mod.PERIODS, - initialize=lambda m, t: m.ts_period[m.tp_ts[t]]) + initialize=lambda m, t: m.ts_period[m.tp_ts[t]], + ) mod.TS_IN_PERIOD = Set( mod.PERIODS, + dimen=1, ordered=True, within=mod.TIMESERIES, - initialize=lambda m, p: [ - ts for ts in m.TIMESERIES if m.ts_period[ts] == p]) + initialize=lambda m, p: [ts for ts in m.TIMESERIES if m.ts_period[ts] == p], + ) mod.TPS_IN_PERIOD = Set( mod.PERIODS, + dimen=1, ordered=True, within=mod.TIMEPOINTS, - initialize=lambda m, p: [ - t for t in m.TIMEPOINTS if m.tp_period[t] == p]) + initialize=lambda m, p: [t for t in m.TIMEPOINTS if m.tp_period[t] == p], + ) # Decide whether period_end values have been given as exact points in time # (e.g., 2020.0 means 2020-01-01 00:00:00), or as a label for a full @@ -288,45 +301,67 @@ def define_components(mod): # NOTE: we can't just check whether period_end[p] + 1 = period_start[p+1], # because that is undefined for single-period models. def add_one_to_period_end_rule(m): - hours_in_period = {p: sum(m.tp_weight[t] for t in m.TPS_IN_PERIOD[p]) for p in m.PERIODS} + hours_in_period = { + p: sum(m.tp_weight[t] for t in m.TPS_IN_PERIOD[p]) for p in m.PERIODS + } err_plain = sum( (m.period_end[p] - m.period_start[p]) * hours_per_year - hours_in_period[p] - for p in m.PERIODS) + for p in m.PERIODS + ) err_add_one = sum( - (m.period_end[p] + 1 - m.period_start[p]) * hours_per_year - hours_in_period[p] - for p in m.PERIODS) - add_one = (abs(err_add_one) < abs(err_plain)) + (m.period_end[p] + 1 - m.period_start[p]) * hours_per_year + - hours_in_period[p] + for p in m.PERIODS + ) + add_one = abs(err_add_one) < abs(err_plain) # print "add_one: {}".format(add_one) return add_one - mod.add_one_to_period_end = Param(within=Boolean, initialize=add_one_to_period_end_rule) + + mod.add_one_to_period_end = Param( + within=Boolean, initialize=add_one_to_period_end_rule + ) mod.period_length_years = Param( mod.PERIODS, - initialize=lambda m, p: m.period_end[p] - m.period_start[p] + (1 if m.add_one_to_period_end else 0)) + within=NonNegativeReals, + initialize=lambda m, p: m.period_end[p] + - m.period_start[p] + + (1 if m.add_one_to_period_end else 0), + ) mod.period_length_hours = Param( mod.PERIODS, - initialize=lambda m, p: m.period_length_years[p] * hours_per_year) + within=NonNegativeReals, + initialize=lambda m, p: m.period_length_years[p] * hours_per_year, + ) mod.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD = Set( - mod.PERIODS, ordered=True, - initialize=lambda m, p: - [p2 for p2 in m.PERIODS if m.PERIODS.ord(p2) <= m.PERIODS.ord(p)] + mod.PERIODS, + dimen=1, + ordered=True, + initialize=lambda m, p: [ + p2 for p2 in m.PERIODS if m.PERIODS.ord(p2) <= m.PERIODS.ord(p) + ], ) mod.ts_scale_to_year = Param( mod.TIMESERIES, + within=NonNegativeReals, initialize=lambda m, ts: ( - m.ts_scale_to_period[ts] / m.period_length_years[m.ts_period[ts]])) + m.ts_scale_to_period[ts] / m.period_length_years[m.ts_period[ts]] + ), + ) mod.ts_duration_hrs = Param( mod.TIMESERIES, - initialize=lambda m, ts: ( - m.ts_num_tps[ts] * m.ts_duration_of_tp[ts])) + within=NonNegativeReals, + initialize=lambda m, ts: (m.ts_num_tps[ts] * m.ts_duration_of_tp[ts]), + ) mod.tp_weight_in_year = Param( mod.TIMEPOINTS, - within=PositiveReals, - initialize=lambda m, t: ( - m.tp_weight[t] / m.period_length_years[m.tp_period[t]])) + within=NonNegativeReals, + initialize=lambda m, t: m.tp_weight[t] / m.period_length_years[m.tp_period[t]], + doc="This weight scales a timepoint to an annual average.", + ) # Identify previous step for each timepoint, for use in tracking # unit commitment or storage. We use circular indexing (.prevw() method) # for the timepoints within a timeseries to give consistency between the @@ -335,24 +370,30 @@ def add_one_to_period_end_rule(m): mod.tp_previous = Param( mod.TIMEPOINTS, within=mod.TIMEPOINTS, - initialize=lambda m, t: m.TPS_IN_TS[m.tp_ts[t]].prevw(t)) + initialize=lambda m, t: m.TPS_IN_TS[m.tp_ts[t]].prevw(t), + ) def validate_time_weights_rule(m, p): hours_in_period = sum(m.tp_weight[t] for t in m.TPS_IN_PERIOD[p]) tol = 0.01 - if(hours_in_period > (1 + tol) * m.period_length_hours[p] or - hours_in_period < (1 - tol) * m.period_length_hours[p]): - print(("validate_time_weights_rule failed for period " + - "'{period:.0f}'. Expected {period_h:0.2f}, based on " + - "length in years, but the sum of timepoint weights " + - "is {ds_h:0.2f}.\n" - ).format(period=p, period_h=m.period_length_hours[p], - ds_h=hours_in_period)) + if ( + hours_in_period > (1 + tol) * m.period_length_hours[p] + or hours_in_period < (1 - tol) * m.period_length_hours[p] + ): + print( + ( + "validate_time_weights_rule failed for period " + + "'{period:.0f}'. Expected {period_h:0.2f}, based on " + + "length in years, but the sum of timepoint weights " + + "is {ds_h:0.2f}.\n" + ).format( + period=p, period_h=m.period_length_hours[p], ds_h=hours_in_period + ) + ) return 0 return 1 - mod.validate_time_weights = BuildCheck( - mod.PERIODS, - rule=validate_time_weights_rule) + + mod.validate_time_weights = BuildCheck(mod.PERIODS, rule=validate_time_weights_rule) def validate_period_lengths_rule(m, p): tol = 0.01 @@ -360,16 +401,19 @@ def validate_period_lengths_rule(m, p): p_end = m.period_start[p] + m.period_length_years[p] p_next = m.period_start[m.PERIODS.next(p)] if abs(p_next - p_end) > tol: - print(( - "validate_period_lengths_rule failed for period" - + "'{p:.0f}'. Period ends at {p_end}, but next period" - + "begins at {p_next}." - ).format(p=p, p_end=p_end, p_next=p_next)) + print( + ( + "validate_period_lengths_rule failed for period" + + "'{p:.0f}'. Period ends at {p_end}, but next period" + + "begins at {p_next}." + ).format(p=p, p_end=p_end, p_next=p_next) + ) return False return True + mod.validate_period_lengths = BuildCheck( - mod.PERIODS, - rule=validate_period_lengths_rule) + mod.PERIODS, rule=validate_period_lengths_rule + ) def load_inputs(mod, switch_data, inputs_dir): @@ -399,19 +443,23 @@ def load_inputs(mod, switch_data, inputs_dir): # names, be indifferent to column order, and throw an error message if # some columns are not found. switch_data.load_aug( - filename=os.path.join(inputs_dir, 'periods.csv'), - select=('INVESTMENT_PERIOD', 'period_start', 'period_end'), + filename=os.path.join(inputs_dir, "periods.csv"), index=mod.PERIODS, - param=(mod.period_start, mod.period_end)) + param=(mod.period_start, mod.period_end), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'timeseries.csv'), - select=('TIMESERIES', 'ts_period', 'ts_duration_of_tp', - 'ts_num_tps', 'ts_scale_to_period'), + filename=os.path.join(inputs_dir, "timeseries.csv"), index=mod.TIMESERIES, - param=(mod.ts_period, mod.ts_duration_of_tp, - mod.ts_num_tps, mod.ts_scale_to_period)) + param=( + mod.ts_period, + mod.ts_duration_of_tp, + mod.ts_num_tps, + mod.ts_scale_to_period, + ), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'timepoints.csv'), - select=('timepoint_id', 'timestamp', 'timeseries'), + filename=os.path.join(inputs_dir, "timepoints.csv"), + select=("timepoint_id", "timestamp", "timeseries"), index=mod.TIMEPOINTS, - param=(mod.tp_timestamp, mod.tp_ts)) + param=(mod.tp_timestamp, mod.tp_ts), + ) diff --git a/switch_model/transmission/copperplate.py b/switch_model/transmission/copperplate.py new file mode 100644 index 000000000..539137f4c --- /dev/null +++ b/switch_model/transmission/copperplate.py @@ -0,0 +1,14 @@ +""" +Allow unlimited transfer of power between zones at no cost. +""" +from pyomo.environ import * + + +def define_components(m): + m.TXPowerNet = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=Reals) + # net imports into each zone from all other zones + m.TX_Energy_Balance = Constraint( + m.TIMEPOINTS, + rule=lambda m, t: sum(m.TXPowerNet[z, t] for z in m.LOAD_ZONES) == 0.0, + ) + m.Zone_Power_Injections.append("TXPowerNet") diff --git a/switch_model/transmission/local_td.py b/switch_model/transmission/local_td.py index 26e451e8d..ae4808df7 100644 --- a/switch_model/transmission/local_td.py +++ b/switch_model/transmission/local_td.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -12,10 +12,16 @@ from __future__ import division import os + +import pandas as pd from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", +) + def define_dynamic_lists(mod): """ @@ -35,21 +41,21 @@ def define_components(mod): """ Define local transmission and distribution portions of an electric grid. - This models load zones as two nodes: the central grid node described in - the load_zones module, and a distributed (virtual) node that is connected - to the central bus via a local_td pathway with losses described by - distribution_loss_rate. Distributed Energy Resources (DER) such as - distributed solar, demand response, efficiency programs, etc will need to - register with the Distributed_Power_Withdrawals and Distributed_Power_Injections lists - which are used for power balance equations. This module is divided into - two sections: the distribution node and the local_td pathway that connects - it to the central grid. + This models load zones as two nodes: the central grid node described in the + load_zones module, and a distributed (virtual) node that is connected to the + central bus via a local_td pathway with losses described by + local_td_loss_rate. Distributed Energy Resources (DER) such as distributed + solar, demand response, efficiency programs, etc., will need to register + with the Distributed_Power_Withdrawals and Distributed_Power_Injections + lists which are used for power balance equations. This module is divided + into two sections: the distribution node and the local_td pathway that + connects it to the central grid. Note: This module interprets the parameter zone_demand_mw[z,t] as the end- use sales rather than the withdrawals from the central grid, and moves zone_demand_mw from the Zone_Power_Withdrawals list to the - Distributed_Power_Withdrawals list so that distribution losses can be accounted - for. + Distributed_Power_Withdrawals list so that distribution losses can be + accounted for. Unless otherwise stated, all power capacity is specified in units of MW and all sets and parameters are mandatory. @@ -61,12 +67,12 @@ def define_components(mod): the perspective of the central grid. We currently prohibit injections into the central grid because it would create a mathematical loophole for "spilling power" and we currently lack use cases that need this. We cannot - use a single unsigned variable for this without introducing errrors in + use a single unsigned variable for this without introducing errors in calculating Local T&D line losses. WithdrawFromCentralGrid is added to the load_zone power balance, and has a corresponding expression from the perspective of the distributed node: - InjectIntoDistributedGrid[z,t] = WithdrawFromCentralGrid[z,t] * (1-distribution_loss_rate) + InjectIntoDistributedGrid[z,t] = WithdrawFromCentralGrid[z,t] * (1-local_td_loss_rate) The Distributed_Energy_Balance constraint is defined in define_dynamic_components. @@ -74,7 +80,10 @@ def define_components(mod): existing_local_td[z in LOAD_ZONES] is the amount of local transmission and distribution capacity in MW that is in place prior to the start of the - study. This is assumed to remain in service throughout the study. + study. This is assumed to remain in service throughout the study. It can be + omitted, in which case Switch constructs enough to meet loads (possibly less + than is in place already in cases where local T&D costs and losses have a + strong effect on results). BuildLocalTD[load_zone, period] is a decision variable describing how much local transmission and distribution to add in each load @@ -89,27 +98,29 @@ def define_components(mod): increasing local T&D requirements, or adding more distributed solar, potentially decreasing local T&D requirements. - distribution_loss_rate is the ratio of average losses for local T&D. This - value is relative to delivered energy, so the total energy needed is load - * (1 + distribution_loss_rate). This optional value defaults to 0.053 - based on ReEDS Solar Vision documentation: + local_td_loss_rate[z in LOAD_ZONES] is the ratio of average losses for local + T&D in zone z. This value is relative to delivered energy, so the total + energy needed is load * (1 + local_td_loss_rate). This optional value + defaults to 0.053 based on ReEDS Solar Vision documentation: http://www1.eere.energy.gov/solar/pdfs/svs_appendix_a_model_descriptions_data.pdf Meet_Local_TD[z, period] is a constraint that enforces minimal local T&D requirements. LocalTDCapacity >= max_local_demand - local_td_annual_cost_per_mw[z in LOAD_ZONES] describes the total - annual costs for each MW of local transmission & distribution. This - value should include the annualized capital costs as well as fixed - operations & maintenance costs. These costs will be applied to - existing and new infrastructure. We assume that existing capacity - will be replaced at the end of its life, so these costs will - continue indefinitely. + local_td_annual_cost_per_mw[z in LOAD_ZONES] describes the total annual + costs for each MW of local transmission & distribution. This value should + include the annualized capital costs as well as fixed operations & + maintenance costs. These costs will be applied to existing and new + infrastructure. We assume that existing capacity will be replaced at the end + of its life, so these costs will continue indefinitely. This can be omitted, + in which case it is assumed to be zero. (In that case, the main effect of + the local_td module would be to calculate losses between the central node + and the distribution node.) --- NOTES --- - Switch 2 treats all transmission and distribution (long- distance or local) + Switch 2 treats all transmission and distribution (long-distance or local) the same. Any capacity that is built will be kept online indefinitely. At the end of its financial lifetime, existing capacity will be retired and rebuilt, so the annual cost of a line upgrade will remain constant in every @@ -119,60 +130,65 @@ def define_components(mod): """ # Local T&D - mod.existing_local_td = Param(mod.LOAD_ZONES, within=NonNegativeReals) - mod.min_data_check('existing_local_td') + mod.existing_local_td = Param(mod.LOAD_ZONES, within=NonNegativeReals, default=0.0) - mod.BuildLocalTD = Var( - mod.LOAD_ZONES, mod.PERIODS, - within=NonNegativeReals) + mod.BuildLocalTD = Var(mod.LOAD_ZONES, mod.PERIODS, within=NonNegativeReals) mod.LocalTDCapacity = Expression( - mod.LOAD_ZONES, mod.PERIODS, - rule=lambda m, z, period: - m.existing_local_td[z] - + sum( - m.BuildLocalTD[z, bld_yr] - for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[period] - ) + mod.LOAD_ZONES, + mod.PERIODS, + rule=lambda m, z, period: m.existing_local_td[z] + + sum( + m.BuildLocalTD[z, bld_yr] + for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[period] + ), + ) + mod.local_td_loss_rate = Param( + mod.LOAD_ZONES, within=NonNegativeReals, default=0.053 ) - mod.distribution_loss_rate = Param(default=0.053) mod.Meet_Local_TD = Constraint( mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS, - rule=lambda m, z, period: - m.LocalTDCapacity[z, period] * (1-m.distribution_loss_rate) - >= - m.zone_expected_coincident_peak_demand[z, period] + rule=lambda m, z, period: ( + m.LocalTDCapacity[z, period] * (1 - m.local_td_loss_rate[z]) + >= m.zone_expected_coincident_peak_demand[z, period] + ), ) mod.local_td_annual_cost_per_mw = Param( - mod.LOAD_ZONES, - within=NonNegativeReals) - mod.min_data_check('local_td_annual_cost_per_mw') + mod.LOAD_ZONES, within=NonNegativeReals, default=0.0 + ) + mod.min_data_check("local_td_annual_cost_per_mw") mod.LocalTDFixedCosts = Expression( mod.PERIODS, doc="Summarize annual local T&D costs for the objective function.", rule=lambda m, p: sum( m.LocalTDCapacity[z, p] * m.local_td_annual_cost_per_mw[z] - for z in m.LOAD_ZONES)) - mod.Cost_Components_Per_Period.append('LocalTDFixedCosts') - + for z in m.LOAD_ZONES + ), + ) + mod.Cost_Components_Per_Period.append("LocalTDFixedCosts") # DISTRIBUTED NODE mod.WithdrawFromCentralGrid = Var( mod.ZONE_TIMEPOINTS, within=NonNegativeReals, - doc="Power withdrawn from a zone's central node sent over local T&D.") + doc="Power withdrawn from a zone's central node sent over local T&D.", + ) mod.Enforce_Local_TD_Capacity_Limit = Constraint( mod.ZONE_TIMEPOINTS, - rule=lambda m, z, t: - m.WithdrawFromCentralGrid[z,t] <= m.LocalTDCapacity[z,m.tp_period[t]]) + rule=lambda m, z, t: m.WithdrawFromCentralGrid[z, t] + <= m.LocalTDCapacity[z, m.tp_period[t]], + ) mod.InjectIntoDistributedGrid = Expression( mod.ZONE_TIMEPOINTS, doc="Describes WithdrawFromCentralGrid after line losses.", - rule=lambda m, z, t: m.WithdrawFromCentralGrid[z,t] * (1-m.distribution_loss_rate)) + rule=lambda m, z, t: ( + m.WithdrawFromCentralGrid[z, t] * (1 - m.local_td_loss_rate[z]) + ), + ) # Register energy injections & withdrawals - mod.Zone_Power_Withdrawals.append('WithdrawFromCentralGrid') - mod.Distributed_Power_Injections.append('InjectIntoDistributedGrid') + mod.Zone_Power_Withdrawals.append("WithdrawFromCentralGrid") + mod.Distributed_Power_Injections.append("InjectIntoDistributedGrid") def define_dynamic_components(mod): @@ -196,24 +212,90 @@ def define_dynamic_components(mod): sum( getattr(m, component)[z, t] for component in m.Distributed_Power_Injections - ) == sum( + ) + == sum( getattr(m, component)[z, t] - for component in m.Distributed_Power_Withdrawals))) + for component in m.Distributed_Power_Withdrawals + ) + ), + ) def load_inputs(mod, switch_data, inputs_dir): """ - Import local transmission & distribution data. The following files - are expected in the input directory. load_zones.csv will - contain additional columns that are used by the load_zones module. + Import local transmission & distribution data. The following file is + expected in the input directory. Optional columns are marked with *. + load_zones.csv will contain additional columns that are used by the + load_zones module. load_zones.csv - load_zone, existing_local_td, local_td_annual_cost_per_mw + load_zone, ..., existing_local_td, local_td_annual_cost_per_mw, local_td_loss_rate* """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'load_zones.csv'), - auto_select=True, - param=(mod.existing_local_td, mod.local_td_annual_cost_per_mw)) + filename=os.path.join(inputs_dir, "load_zones.csv"), + optional_params=["local_td_loss_rate"], + param=( + mod.existing_local_td, + mod.local_td_annual_cost_per_mw, + mod.local_td_loss_rate, + ), + ) + + +def post_solve(instance, outdir): + """ + Export results. + + local_td_energy_balance_wide.csv is a wide table of energy balance + components for every zone and timepoint. Each component registered with + Distributed_Power_Injections and Distributed_Power_Withdrawals will become + a column. Values of Distributed_Power_Withdrawals will be multiplied by -1 + during export. The columns in this file can vary based on which modules + are included in your model. + + local_td_energy_balance.csv is the same data in "tidy" form with a constant + number of columns. + + """ + wide_dat = [] + for z, t in instance.ZONE_TIMEPOINTS: + record = {"load_zone": z, "timestamp": t} + for component in instance.Distributed_Power_Injections: + record[component] = value(getattr(instance, component)[z, t]) + for component in instance.Distributed_Power_Withdrawals: + record[component] = value(-1.0 * getattr(instance, component)[z, t]) + wide_dat.append(record) + wide_df = pd.DataFrame(wide_dat) + wide_df.set_index(["load_zone", "timestamp"], inplace=True) + if instance.options.sorted_output: + wide_df.sort_index(inplace=True) + wide_df.to_csv(os.path.join(outdir, "local_td_energy_balance_wide.csv")) + + normalized_dat = [] + for z, t in instance.ZONE_TIMEPOINTS: + for component in instance.Distributed_Power_Injections: + record = { + "load_zone": z, + "timestamp": t, + "component": component, + "injects_or_withdraws": "injects", + "value": value(getattr(instance, component)[z, t]), + } + normalized_dat.append(record) + for component in instance.Distributed_Power_Withdrawals: + record = { + "load_zone": z, + "timestamp": t, + "component": component, + "injects_or_withdraws": "withdraws", + "value": value(-1.0 * getattr(instance, component)[z, t]), + } + normalized_dat.append(record) + df = pd.DataFrame(normalized_dat) + df.set_index(["load_zone", "timestamp", "component"], inplace=True) + if instance.options.sorted_output: + df.sort_index(inplace=True) + df.to_csv(os.path.join(outdir, "local_td_energy_balance.csv")) diff --git a/switch_model/transmission/transport/__init__.py b/switch_model/transmission/transport/__init__.py index 4b66b6fa6..0457fcabd 100644 --- a/switch_model/transmission/transport/__init__.py +++ b/switch_model/transmission/transport/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2017 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. """ @@ -9,5 +9,6 @@ """ core_modules = [ - 'switch_model.transmission.transport.build', - 'switch_model.transmission.transport.dispatch'] + "switch_model.transmission.transport.build", + "switch_model.transmission.transport.dispatch", +] diff --git a/switch_model/transmission/transport/build.py b/switch_model/transmission/transport/build.py index 11e20adb6..df50f55b5 100644 --- a/switch_model/transmission/transport/build.py +++ b/switch_model/transmission/transport/build.py @@ -1,26 +1,32 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ Defines transmission build-outs. """ +import logging import os + +import pandas as pd from pyomo.environ import * + from switch_model.financials import capital_recovery_factor as crf -import pandas as pd +from switch_model.utilities import unique_list + +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", +) -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials' def define_components(mod): """ + Defines a transport model for inter-zone transmission. Unless otherwise + stated, all power capacity is specified in units of MW and all sets and + parameters are mandatory. - Adds components to a Pyomo abstract model object to describe bulk - transmission of an electric grid. This includes parameters, build - decisions and constraints. Unless otherwise stated, all power - capacity is specified in units of MW and all sets and parameters are - mandatory. TRANSMISSION_LINES is the complete set of transmission pathways connecting load zones. Each member of this set is a one dimensional @@ -51,30 +57,22 @@ def define_components(mod): indicating whether new transmission build-outs are allowed along a transmission line. This optional parameter defaults to True. - BLD_YRS_FOR_TX is the set of transmission lines and years in - which they have been or could be built. This set includes past and - potential future builds. All future builds must come online in the - first year of an investment period. This set is composed of two - elements with members: (tx, build_year). For existing transmission - where the build years are not known, build_year is set to 'Legacy'. - - BLD_YRS_FOR_EXISTING_TX is a subset of BLD_YRS_FOR_TX that lists - builds that happened before the first investment period. For most - datasets the build year is unknown, so is it always set to 'Legacy'. + TRANS_BLD_YRS is the set of transmission lines and future years in + which they could be built. This set is composed of two + elements with members: (tx, build_year). In a prior implementation, + this set also contained existing transmission (with build_year typically + set to 'Legacy'), but this changed in commit 868ca08 on June 13, 2019. existing_trans_cap[tx in TRANSMISSION_LINES] is a parameter that - describes how many MW of capacity has been installed before the + describes how many MW of capacity was been installed before the start of the study. - NEW_TRANS_BLD_YRS is a subset of BLD_YRS_FOR_TX that describes - potential builds. - - BuildTx[(tx, bld_yr) in BLD_YRS_FOR_TX] is a decision variable + BuildTx[(tx, bld_yr) in TRANS_BLD_YRS] is a decision variable that describes the transfer capacity in MW installed on a corridor in a given build year. For existing builds, this variable is locked to the existing capacity. - TxCapacityNameplate[(tx, bld_yr) in BLD_YRS_FOR_TX] is an expression + TxCapacityNameplate[(tx, bld_yr) in TRANS_BLD_YRS] is an expression that returns the total nameplate transfer capacity of a transmission line in a given period. This is the sum of existing and newly-build capacity. @@ -83,9 +81,9 @@ def define_components(mod): derating factor for each transmission line that can reflect forced outage rates, stability or contingency limitations. This parameter is optional and defaults to 1. This parameter should be in the - range of 0 to 1, being 0 a value that disables the line completely. + range of 0 to 1. A value of 0 will disables the line completely. - TxCapacityNameplateAvailable[(tx, bld_yr) in BLD_YRS_FOR_TX] is an + TxCapacityNameplateAvailable[(tx, bld_yr) in TRANS_BLD_YRS] is an expression that returns the available transfer capacity of a transmission line in a given period, taking into account the nameplate capacity and derating factor. @@ -130,18 +128,6 @@ def define_components(mod): trans_d_line[trans_d] is the transmission line associated with this directional path. - TX_BUILDS_IN_PERIOD[p in PERIODS] is an indexed set that - describes which transmission builds will be operational in a given - period. Currently, transmission lines are kept online indefinitely, - with parts being replaced as they wear out. - - TX_BUILDS_IN_PERIOD[p] will return a subset of (tx, bld_yr) - in BLD_YRS_FOR_TX. - - --- Delayed implementation --- - - is_dc_line ... Do I even need to implement this? - --- NOTES --- The cost stream over time for transmission lines differs from the @@ -172,59 +158,77 @@ def define_components(mod): """ - mod.TRANSMISSION_LINES = Set() + mod.TRANSMISSION_LINES = Set(dimen=1) mod.trans_lz1 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES) mod.trans_lz2 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES) # we don't do a min_data_check for TRANSMISSION_LINES, because it may be empty for model # configurations that are sometimes run with interzonal transmission and sometimes not # (e.g., island interconnect scenarios). However, presence of this column will still be # checked by load_data_aug. - mod.min_data_check('trans_lz1', 'trans_lz2') - mod.trans_dbid = Param(mod.TRANSMISSION_LINES, default=lambda m, tx: tx) + mod.min_data_check("trans_lz1", "trans_lz2") + + def _check_tx_duplicate_paths(m): + forward_paths = set( + [(m.trans_lz1[tx], m.trans_lz2[tx]) for tx in m.TRANSMISSION_LINES] + ) + reverse_paths = set( + [(m.trans_lz2[tx], m.trans_lz1[tx]) for tx in m.TRANSMISSION_LINES] + ) + overlap = forward_paths.intersection(reverse_paths) + if overlap: + logging.error( + "Transmission lines have bi-directional paths specified " + "in input files. They are expected to specify a single path " + "per pair of connected load zones. " + "(Ex: either A->B or B->A, but not both). " + "Over-specified lines: {}".format(overlap) + ) + return False + else: + return True + + mod.check_tx_duplicate_paths = BuildCheck(rule=_check_tx_duplicate_paths) + + mod.trans_dbid = Param(mod.TRANSMISSION_LINES, default=lambda m, tx: tx, within=Any) mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=NonNegativeReals) - mod.trans_efficiency = Param( - mod.TRANSMISSION_LINES, - within=PercentFraction) - mod.existing_trans_cap = Param( - mod.TRANSMISSION_LINES, - within=NonNegativeReals) - mod.min_data_check( - 'trans_length_km', 'trans_efficiency', 'existing_trans_cap') + mod.trans_efficiency = Param(mod.TRANSMISSION_LINES, within=PercentFraction) + mod.existing_trans_cap = Param(mod.TRANSMISSION_LINES, within=NonNegativeReals) + mod.min_data_check("trans_length_km", "trans_efficiency", "existing_trans_cap") mod.trans_new_build_allowed = Param( - mod.TRANSMISSION_LINES, within=Boolean, default=True) + mod.TRANSMISSION_LINES, within=Boolean, default=True + ) mod.TRANS_BLD_YRS = Set( dimen=2, initialize=mod.TRANSMISSION_LINES * mod.PERIODS, - filter=lambda m, tx, p: m.trans_new_build_allowed[tx]) + filter=lambda m, tx, p: m.trans_new_build_allowed[tx], + ) mod.BuildTx = Var(mod.TRANS_BLD_YRS, within=NonNegativeReals) mod.TxCapacityNameplate = Expression( - mod.TRANSMISSION_LINES, mod.PERIODS, + mod.TRANSMISSION_LINES, + mod.PERIODS, rule=lambda m, tx, period: sum( m.BuildTx[tx, bld_yr] for bld_yr in m.PERIODS if bld_yr <= period and (tx, bld_yr) in m.TRANS_BLD_YRS - ) + m.existing_trans_cap[tx]) + ) + + m.existing_trans_cap[tx], + ) mod.trans_derating_factor = Param( - mod.TRANSMISSION_LINES, - within=PercentFraction, - default=1) + mod.TRANSMISSION_LINES, within=PercentFraction, default=1 + ) mod.TxCapacityNameplateAvailable = Expression( - mod.TRANSMISSION_LINES, mod.PERIODS, + mod.TRANSMISSION_LINES, + mod.PERIODS, rule=lambda m, tx, period: ( - m.TxCapacityNameplate[tx, period] * m.trans_derating_factor[tx])) + m.TxCapacityNameplate[tx, period] * m.trans_derating_factor[tx] + ), + ) mod.trans_terrain_multiplier = Param( - mod.TRANSMISSION_LINES, - within=NonNegativeReals, - default=1) - mod.trans_capital_cost_per_mw_km = Param( - within=NonNegativeReals, - default=1000) - mod.trans_lifetime_yrs = Param( - within=NonNegativeReals, - default=20) - mod.trans_fixed_om_fraction = Param( - within=NonNegativeReals, - default=0.03) + mod.TRANSMISSION_LINES, within=NonNegativeReals, default=1 + ) + mod.trans_capital_cost_per_mw_km = Param(within=NonNegativeReals, default=1000) + mod.trans_lifetime_yrs = Param(within=NonNegativeReals, default=20) + mod.trans_fixed_om_fraction = Param(within=NonNegativeReals, default=0.03) # Total annual fixed costs for building new transmission lines... # Multiply capital costs by capital recover factor to get annual # payments. Add annual fixed O&M that are expressed as a fraction of @@ -233,9 +237,12 @@ def define_components(mod): mod.TRANSMISSION_LINES, within=NonNegativeReals, initialize=lambda m, tx: ( - m.trans_capital_cost_per_mw_km * m.trans_terrain_multiplier[tx] * - m.trans_length_km[tx] * (crf(m.interest_rate, m.trans_lifetime_yrs) + - m.trans_fixed_om_fraction))) + m.trans_capital_cost_per_mw_km + * m.trans_terrain_multiplier[tx] + * m.trans_length_km[tx] + * (crf(m.interest_rate, m.trans_lifetime_yrs) + m.trans_fixed_om_fraction) + ), + ) # An expression to summarize annual costs for the objective # function. Units should be total annual future costs in $base_year # real dollars. The objective function will convert these to @@ -245,89 +252,89 @@ def define_components(mod): rule=lambda m, p: sum( m.TxCapacityNameplate[tx, p] * m.trans_cost_annual[tx] for tx in m.TRANSMISSION_LINES - ) + ), ) - mod.Cost_Components_Per_Period.append('TxFixedCosts') + mod.Cost_Components_Per_Period.append("TxFixedCosts") def init_DIRECTIONAL_TX(model): - tx_dir = set() + tx_dir = [] for tx in model.TRANSMISSION_LINES: - tx_dir.add((model.trans_lz1[tx], model.trans_lz2[tx])) - tx_dir.add((model.trans_lz2[tx], model.trans_lz1[tx])) + tx_dir.append((model.trans_lz1[tx], model.trans_lz2[tx])) + tx_dir.append((model.trans_lz2[tx], model.trans_lz1[tx])) return tx_dir - mod.DIRECTIONAL_TX = Set( - dimen=2, - initialize=init_DIRECTIONAL_TX) + + mod.DIRECTIONAL_TX = Set(dimen=2, initialize=init_DIRECTIONAL_TX) mod.TX_CONNECTIONS_TO_ZONE = Set( mod.LOAD_ZONES, - initialize=lambda m, lz: set( - z for z in m.LOAD_ZONES if (z,lz) in m.DIRECTIONAL_TX)) + dimen=1, + initialize=lambda m, lz: [ + z for z in m.LOAD_ZONES if (z, lz) in m.DIRECTIONAL_TX + ], + ) def init_trans_d_line(m, zone_from, zone_to): for tx in m.TRANSMISSION_LINES: - if((m.trans_lz1[tx] == zone_from and m.trans_lz2[tx] == zone_to) or - (m.trans_lz2[tx] == zone_from and m.trans_lz1[tx] == zone_to)): + if (m.trans_lz1[tx] == zone_from and m.trans_lz2[tx] == zone_to) or ( + m.trans_lz2[tx] == zone_from and m.trans_lz1[tx] == zone_to + ): return tx + mod.trans_d_line = Param( - mod.DIRECTIONAL_TX, - within=mod.TRANSMISSION_LINES, - initialize=init_trans_d_line) + mod.DIRECTIONAL_TX, within=mod.TRANSMISSION_LINES, initialize=init_trans_d_line + ) def load_inputs(mod, switch_data, inputs_dir): """ - Import data related to transmission builds. The following files are - expected in the input directory: + expected in the input directory. Optional files & columns are marked with + a *. transmission_lines.csv TRANSMISSION_LINE, trans_lz1, trans_lz2, trans_length_km, - trans_efficiency, existing_trans_cap, trans_dbid, - trans_derating_factor, trans_terrain_multiplier, - trans_new_build_allowed - The last 4 columns of transmission_lines.csv are optional. If the - columns are missing or if cells contain a dot (.), those parameters - will be set to default values as described in documentation. + trans_efficiency, existing_trans_cap, trans_dbid*, + trans_derating_factor*, trans_terrain_multiplier*, + trans_new_build_allowed* Note that in the next file, parameter names are written on the first row (as usual), and the single value for each parameter is written in - the second row. The distribution_loss_rate parameter is read by the - local_td module (if used). + the second row. - trans_params.csv - trans_capital_cost_per_mw_km, trans_lifetime_yrs, - trans_fixed_om_fraction, distribution_loss_rate + trans_params.csv* + trans_capital_cost_per_mw_km*, trans_lifetime_yrs*, + trans_fixed_om_fraction* """ - # TODO: send issue / pull request to Pyomo to allow .csv files with # no rows after header (fix bugs in pyomo.core.plugins.data.text) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'transmission_lines.csv'), - select=( - 'TRANSMISSION_LINE', 'trans_lz1', 'trans_lz2', - 'trans_length_km', 'trans_efficiency', 'existing_trans_cap', - 'trans_dbid', 'trans_derating_factor', - 'trans_terrain_multiplier', 'trans_new_build_allowed' - ), + filename=os.path.join(inputs_dir, "transmission_lines.csv"), index=mod.TRANSMISSION_LINES, optional_params=( - 'trans_dbid', 'trans_derating_factor', - 'trans_terrain_multiplier', 'trans_new_build_allowed' + "trans_dbid", + "trans_derating_factor", + "trans_terrain_multiplier", + "trans_new_build_allowed", ), param=( - mod.trans_lz1, mod.trans_lz2, - mod.trans_length_km, mod.trans_efficiency, mod.existing_trans_cap, - mod.trans_dbid, mod.trans_derating_factor, - mod.trans_terrain_multiplier, mod.trans_new_build_allowed - ) + mod.trans_lz1, + mod.trans_lz2, + mod.trans_length_km, + mod.trans_efficiency, + mod.existing_trans_cap, + mod.trans_dbid, + mod.trans_derating_factor, + mod.trans_terrain_multiplier, + mod.trans_new_build_allowed, + ), ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'trans_params.csv'), - optional=True, auto_select=True, + filename=os.path.join(inputs_dir, "trans_params.csv"), + optional=True, param=( - mod.trans_capital_cost_per_mw_km, mod.trans_lifetime_yrs, - mod.trans_fixed_om_fraction, mod.distribution_loss_rate - ) + mod.trans_capital_cost_per_mw_km, + mod.trans_lifetime_yrs, + mod.trans_fixed_om_fraction, + ), ) @@ -335,19 +342,26 @@ def post_solve(instance, outdir): mod = instance normalized_dat = [ { - "TRANSMISSION_LINE": tx, - "PERIOD": p, - "trans_lz1": mod.trans_lz1[tx], - "trans_lz2": mod.trans_lz2[tx], - "trans_dbid": mod.trans_dbid[tx], - "trans_length_km": mod.trans_length_km[tx], - "trans_efficiency": mod.trans_efficiency[tx], - "trans_derating_factor": mod.trans_derating_factor[tx], - "TxCapacityNameplate": value(mod.TxCapacityNameplate[tx,p]), - "TxCapacityNameplateAvailable": value(mod.TxCapacityNameplateAvailable[tx,p]), - "TotalAnnualCost": value(mod.TxCapacityNameplate[tx,p] * mod.trans_cost_annual[tx]) - } for tx, p in mod.TRANSMISSION_LINES * mod.PERIODS + "TRANSMISSION_LINE": tx, + "PERIOD": p, + "trans_lz1": mod.trans_lz1[tx], + "trans_lz2": mod.trans_lz2[tx], + "trans_dbid": mod.trans_dbid[tx], + "trans_length_km": mod.trans_length_km[tx], + "trans_efficiency": mod.trans_efficiency[tx], + "trans_derating_factor": mod.trans_derating_factor[tx], + "TxCapacityNameplate": value(mod.TxCapacityNameplate[tx, p]), + "TxCapacityNameplateAvailable": value( + mod.TxCapacityNameplateAvailable[tx, p] + ), + "TotalAnnualCost": value( + mod.TxCapacityNameplate[tx, p] * mod.trans_cost_annual[tx] + ), + } + for tx, p in mod.TRANSMISSION_LINES * mod.PERIODS ] tx_build_df = pd.DataFrame(normalized_dat) tx_build_df.set_index(["TRANSMISSION_LINE", "PERIOD"], inplace=True) + if instance.options.sorted_output: + tx_build_df.sort_index(inplace=True) tx_build_df.to_csv(os.path.join(outdir, "transmission.csv")) diff --git a/switch_model/transmission/transport/dispatch.py b/switch_model/transmission/transport/dispatch.py index 031afb24c..8f630b6b4 100644 --- a/switch_model/transmission/transport/dispatch.py +++ b/switch_model/transmission/transport/dispatch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -8,8 +8,13 @@ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.transmission.transport.build' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.transmission.transport.build", +) + def define_components(mod): """ @@ -49,36 +54,42 @@ def define_components(mod): """ mod.TRANS_TIMEPOINTS = Set( - dimen=3, - initialize=lambda m: m.DIRECTIONAL_TX * m.TIMEPOINTS + dimen=3, initialize=lambda m: m.DIRECTIONAL_TX * m.TIMEPOINTS ) mod.DispatchTx = Var(mod.TRANS_TIMEPOINTS, within=NonNegativeReals) mod.Maximum_DispatchTx = Constraint( mod.TRANS_TIMEPOINTS, rule=lambda m, zone_from, zone_to, tp: ( - m.DispatchTx[zone_from, zone_to, tp] <= - m.TxCapacityNameplateAvailable[m.trans_d_line[zone_from, zone_to], - m.tp_period[tp]])) + m.DispatchTx[zone_from, zone_to, tp] + <= m.TxCapacityNameplateAvailable[ + m.trans_d_line[zone_from, zone_to], m.tp_period[tp] + ] + ), + ) mod.TxPowerSent = Expression( mod.TRANS_TIMEPOINTS, - rule=lambda m, zone_from, zone_to, tp: ( - m.DispatchTx[zone_from, zone_to, tp])) + rule=lambda m, zone_from, zone_to, tp: (m.DispatchTx[zone_from, zone_to, tp]), + ) mod.TxPowerReceived = Expression( mod.TRANS_TIMEPOINTS, rule=lambda m, zone_from, zone_to, tp: ( - m.DispatchTx[zone_from, zone_to, tp] * - m.trans_efficiency[m.trans_d_line[zone_from, zone_to]])) + m.DispatchTx[zone_from, zone_to, tp] + * m.trans_efficiency[m.trans_d_line[zone_from, zone_to]] + ), + ) def TXPowerNet_calculation(m, z, tp): - return ( - sum(m.TxPowerReceived[zone_from, z, tp] - for zone_from in m.TX_CONNECTIONS_TO_ZONE[z]) - - sum(m.TxPowerSent[z, zone_to, tp] - for zone_to in m.TX_CONNECTIONS_TO_ZONE[z])) + return sum( + m.TxPowerReceived[zone_from, z, tp] + for zone_from in m.TX_CONNECTIONS_TO_ZONE[z] + ) - sum( + m.TxPowerSent[z, zone_to, tp] for zone_to in m.TX_CONNECTIONS_TO_ZONE[z] + ) + mod.TXPowerNet = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=TXPowerNet_calculation) + mod.LOAD_ZONES, mod.TIMEPOINTS, rule=TXPowerNet_calculation + ) # Register net transmission as contributing to zonal energy balance - mod.Zone_Power_Injections.append('TXPowerNet') + mod.Zone_Power_Injections.append("TXPowerNet") diff --git a/switch_model/upgrade/__init__.py b/switch_model/upgrade/__init__.py index d2ff07c52..db2c1f4b8 100644 --- a/switch_model/upgrade/__init__.py +++ b/switch_model/upgrade/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -23,8 +23,12 @@ """ # Public interface from .manager import ( - main, upgrade_inputs, scan_and_upgrade, - get_input_version, do_inputs_need_upgrade + main, + upgrade_inputs, + scan_and_upgrade, + get_input_version, + do_inputs_need_upgrade, ) + # Private utility functions for this upgrade sub-package from .manager import _backup, _write_input_version, print_verbose diff --git a/switch_model/upgrade/manager.py b/switch_model/upgrade/manager.py index 9c71b2169..c19709831 100644 --- a/switch_model/upgrade/manager.py +++ b/switch_model/upgrade/manager.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. from __future__ import print_function @@ -7,7 +7,7 @@ import argparse import os import shutil -from distutils.version import StrictVersion +from pkg_resources import parse_version import switch_model @@ -17,6 +17,8 @@ from . import upgrade_2_0_1 from . import upgrade_2_0_4 from . import upgrade_2_0_5 +from . import upgrade_2_0_6 +from . import upgrade_2_0_7 # Available upgrade code. This needs to be in consecutive order so # upgrade_inputs can incrementally apply the upgrades. @@ -28,25 +30,31 @@ upgrade_2_0_0b4, upgrade_2_0_1, upgrade_2_0_4, - upgrade_2_0_5 + upgrade_2_0_5, + upgrade_2_0_6, + upgrade_2_0_7, ] ] # Not every code revision requires an update; this is the last revision that did. last_required_update = upgrade_plugins[-1][-1] -code_version = StrictVersion(switch_model.__version__) -version_file = 'switch_inputs_version.txt' -#verbose = False +code_version = parse_version(switch_model.__version__) +version_file = "switch_inputs_version.txt" +# verbose = False verbose = True -def scan_and_upgrade(top_dir, inputs_dir_name='inputs', backup=True, assign_current_version=False): + +def scan_and_upgrade( + top_dir, inputs_dir_name="inputs", backup=True, assign_current_version=False +): for dirpath, dirnames, filenames in os.walk(top_dir): for dirname in dirnames: path = os.path.join(dirpath, dirname) - if os.path.exists(os.path.join(path, inputs_dir_name, 'modules.txt')): - # print_verbose('upgrading {}'.format(os.path.join(path, inputs_dir_name))) - upgrade_inputs(os.path.join(path, inputs_dir_name), backup, assign_current_version) + if os.path.exists(os.path.join(path, inputs_dir_name, "modules.txt")): + upgrade_inputs( + os.path.join(path, inputs_dir_name), backup, assign_current_version + ) def get_input_version(inputs_dir): @@ -61,27 +69,30 @@ def get_input_version(inputs_dir): """ version_path = os.path.join(inputs_dir, version_file) if os.path.isfile(version_path): - with open(version_path, 'r') as f: + with open(version_path, "r") as f: version = f.readline().strip() # Before we started storing version numbers in the inputs directory, we # had an input file named generator_info.tab. If that file exists, we are # dealing with version 2.0.0b0. - elif os.path.isfile(os.path.join(inputs_dir, 'generator_info.tab')): - version = '2.0.0b0' + elif os.path.isfile(os.path.join(inputs_dir, "generator_info.tab")): + version = "2.0.0b0" else: - raise ValueError(( - "Input directory {} is not recognized as a valid Switch input folder. " - "An input directory needs to contain a file named '{}' that stores the " - "version number of Switch that it was intended for. ").format( - inputs_dir, version_file)) + raise ValueError( + ( + "Input directory {} is not recognized as a valid Switch input folder. " + "An input directory needs to contain a file named '{}' that stores the " + "version number of Switch that it was intended for. " + ).format(inputs_dir, version_file) + ) return version def _write_input_version(inputs_dir, new_version): version_path = os.path.join(inputs_dir, version_file) - with open(version_path, 'w') as f: + with open(version_path, "w") as f: f.write(new_version + "\n") + def do_inputs_need_upgrade(inputs_dir): """ Determine if input directory can be upgraded with this script. @@ -93,54 +104,59 @@ def do_inputs_need_upgrade(inputs_dir): # Not every code revision requires an update, so just hard-code the last # revision that required an update. inputs_version = get_input_version(inputs_dir) - return StrictVersion(inputs_version) < StrictVersion(last_required_update) + return parse_version(inputs_version) < parse_version(last_required_update) def _backup(inputs_dir): """ Make a backup of the inputs_dir into a zip file, unless it already exists """ - inputs_backup = inputs_dir + '_v' + get_input_version(inputs_dir) + inputs_backup = inputs_dir + "_v" + get_input_version(inputs_dir) inputs_backup_path = inputs_backup + ".zip" if not os.path.isfile(inputs_backup_path): - shutil.make_archive(inputs_backup, 'zip', inputs_dir) + shutil.make_archive(inputs_backup, "zip", inputs_dir) -def print_verbose(*args): +def print_verbose(*args, indent=True): global verbose if verbose: - print(*args) + if indent: + print(" ", *args) + else: + print(*args) def upgrade_inputs(inputs_dir, backup=True, assign_current_version=False): # This logic will grow over time as complexity evolves.. Don't overengineer upgraded = False if do_inputs_need_upgrade(inputs_dir): - print_verbose('Upgrading ' + inputs_dir) + print_verbose("Upgrading " + inputs_dir, indent=False) if backup: - print_verbose('\tBacked up original inputs') + print_verbose("Backed up original inputs") _backup(inputs_dir) # Successively apply the upgrade scripts as needed. for (upgrader, v_from, v_to) in upgrade_plugins: - inputs_v = StrictVersion(get_input_version(inputs_dir)) + inputs_v = parse_version(get_input_version(inputs_dir)) # note: the next line catches datasets created by/for versions of Switch that # didn't require input directory upgrades - if StrictVersion(v_from) <= inputs_v < StrictVersion(v_to): - print_verbose('\tUpgrading from ' + v_from + ' to ' + v_to) + if parse_version(v_from) <= inputs_v < parse_version(v_to): + print_verbose("Upgrading from " + v_from + " to " + v_to) upgrader.upgrade_input_dir(inputs_dir) upgraded = True - if (StrictVersion(last_required_update) < StrictVersion(switch_model.__version__) - and assign_current_version): + if ( + parse_version(last_required_update) < parse_version(switch_model.__version__) + and assign_current_version + ): # user requested writing of current version number, even if no upgrade is needed # (useful for updating examples to track with new release of Switch) _write_input_version(inputs_dir, switch_model.__version__) upgraded = True if upgraded: - print_verbose('\tFinished upgrading ' + inputs_dir + '\n') + print_verbose("Finished upgrading " + inputs_dir + "\n") else: - print_verbose('Skipped ' + inputs_dir + '; it does not need upgrade.') + print_verbose(f"Skipped {inputs_dir}; it does not need upgrade.", indent=False) def main(args=None): @@ -152,33 +168,68 @@ def main(args=None): args = parser.parse_args() set_verbose(args.verbose) if args.recursive: - scan_and_upgrade('.', args.inputs_dir_name, args.backup, args.assign_current_version) + scan_and_upgrade( + ".", args.inputs_dir_name, args.backup, args.assign_current_version + ) else: if not os.path.isdir(args.inputs_dir_name): - print("Error: Input directory {} does not exist.".format(args.inputs_dir_name)) + print( + "Error: Input directory {} does not exist.".format(args.inputs_dir_name) + ) return -1 - upgrade_inputs(os.path.normpath(args.inputs_dir_name), args.backup, args.assign_current_version) + upgrade_inputs( + os.path.normpath(args.inputs_dir_name), + args.backup, + args.assign_current_version, + ) + def set_verbose(verbosity): global verbose verbose = verbosity + def add_parser_args(parser): - parser.add_argument("--inputs-dir-name", type=str, default="inputs", - help='Input directory name (default is "inputs")') - parser.add_argument("--backup", action='store_true', default=True, - help='Make backup of inputs directory before upgrading (set true by default)') - parser.add_argument("--no-backup", action='store_false', dest='backup', - help='Do not make backup of inputs directory before upgrading') - parser.add_argument("--assign-current-version", dest='assign_current_version', - action='store_true', default=False, - help=('Update version number in inputs directory to match current version' - 'of Switch, even if data does not require an upgrade.')) - parser.add_argument("--recursive", dest="recursive", - default=False, action='store_true', - help=('Recursively scan the provided path for inputs directories ' - 'named "inputs", and upgrade each directory found. Note, this ' - 'requires each inputs directory to include modules.txt. This ' - 'will not work if modules.txt is in the parent directory.')) - parser.add_argument("--verbose", action='store_true', default=verbose) - parser.add_argument("--quiet", dest="verbose", action='store_false') + parser.add_argument( + "--inputs-dir-name", + type=str, + default="inputs", + help='Input directory name (default is "inputs")', + ) + parser.add_argument( + "--backup", + action="store_true", + default=True, + help="Make backup of inputs directory before upgrading (set true by default)", + ) + parser.add_argument( + "--no-backup", + action="store_false", + dest="backup", + help="Do not make backup of inputs directory before upgrading", + ) + parser.add_argument( + "--assign-current-version", + dest="assign_current_version", + action="store_true", + default=False, + help=( + "Update version number in inputs directory to match current version" + "of Switch, even if data does not require an upgrade." + ), + ) + parser.add_argument( + "--recursive", + dest="recursive", + default=False, + action="store_true", + help=( + "Recursively scan from the current directory, searching for " + "directories named as shown in --inputs-dir-name, and " + "upgrading each directory found. Note, this " + "requires each inputs directory to include modules.txt. This " + "will not work if modules.txt is in the parent directory." + ), + ) + parser.add_argument("--verbose", action="store_true", default=verbose) + parser.add_argument("--quiet", dest="verbose", action="store_false") diff --git a/switch_model/upgrade/re_upgrade.py b/switch_model/upgrade/re_upgrade.py index f5cefb3fc..41b6474b4 100644 --- a/switch_model/upgrade/re_upgrade.py +++ b/switch_model/upgrade/re_upgrade.py @@ -2,15 +2,17 @@ import os from switch_model.upgrade.manager import upgrade_plugins + upgrade_module, upgrade_from, upgrade_to = upgrade_plugins[-1] -if __name__ == '__main__': +if __name__ == "__main__": print( - "Re-running upgrade from {} to {} for all subdirectories of current directory" - .format(upgrade_from, upgrade_to) + "Re-running upgrade from {} to {} for all subdirectories of current directory".format( + upgrade_from, upgrade_to + ) ) - for dirpath, dirnames, filenames in os.walk('.'): - if 'switch_inputs_version.txt' in filenames: - print('upgrading {}'.format(dirpath)) + for dirpath, dirnames, filenames in os.walk("."): + if "switch_inputs_version.txt" in filenames: + print("upgrading {}".format(dirpath)) upgrade_module.upgrade_input_dir(dirpath) diff --git a/switch_model/upgrade/upgrade_2_0_0b1.py b/switch_model/upgrade/upgrade_2_0_0b1.py index f0c0944c0..c50ba195c 100644 --- a/switch_model/upgrade/upgrade_2_0_0b1.py +++ b/switch_model/upgrade/upgrade_2_0_0b1.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -31,118 +31,115 @@ import argparse import switch_model.upgrade -upgrades_from = '2.0.0b0' -upgrades_to = '2.0.0b1' +upgrades_from = "2.0.0b0" +upgrades_to = "2.0.0b1" old_modules = { - 'switch_mod.balancing_areas', - 'switch_mod.export', - 'switch_mod.export.__init__', - 'switch_mod.export.dump', - 'switch_mod.export.example_export', - 'switch_mod.financials', - 'switch_mod.fuel_cost', - 'switch_mod.fuel_markets', - 'switch_mod.fuels', - 'switch_mod.gen_tech', - 'switch_mod.generators.hydro_simple', - 'switch_mod.generators.hydro_system', - 'switch_mod.generators.storage', - 'switch_mod.hawaii.batteries', - 'switch_mod.hawaii.batteries_fixed_calendar_life', - 'switch_mod.hawaii.constant_elasticity_demand_system', - 'switch_mod.hawaii.demand_response', - 'switch_mod.hawaii.demand_response_no_reserves', - 'switch_mod.hawaii.demand_response_simple', - 'switch_mod.hawaii.emission_rules', - 'switch_mod.hawaii.ev', - 'switch_mod.hawaii.fed_subsidies', - 'switch_mod.hawaii.fuel_markets_expansion', - 'switch_mod.hawaii.hydrogen', - 'switch_mod.hawaii.kalaeloa', - 'switch_mod.hawaii.lng_conversion', - 'switch_mod.hawaii.no_central_pv', - 'switch_mod.hawaii.no_onshore_wind', - 'switch_mod.hawaii.no_renewables', - 'switch_mod.hawaii.no_wind', - 'switch_mod.hawaii.psip', - 'switch_mod.hawaii.pumped_hydro', - 'switch_mod.hawaii.r_demand_system', - 'switch_mod.hawaii.reserves', - 'switch_mod.hawaii.rps', - 'switch_mod.hawaii.save_results', - 'switch_mod.hawaii.scenario_data', - 'switch_mod.hawaii.scenarios', - 'switch_mod.hawaii.smooth_dispatch', - 'switch_mod.hawaii.switch_patch', - 'switch_mod.hawaii.unserved_load', - 'switch_mod.hawaii.util', - 'switch_mod.load_zones', - 'switch_mod.local_td', - 'switch_mod.main', - 'switch_mod.project.build', - 'switch_mod.project.discrete_build', - 'switch_mod.project.dispatch', - 'switch_mod.project.no_commit', - 'switch_mod.project.unitcommit.commit', - 'switch_mod.project.unitcommit.discrete', - 'switch_mod.project.unitcommit.fuel_use', - 'switch_mod.solve', - 'switch_mod.solve_scenarios', - 'switch_mod.test', - 'switch_mod.timescales', - 'switch_mod.trans_build', - 'switch_mod.trans_dispatch', - 'switch_mod.utilities', - 'switch_mod.project', - 'switch_mod.project.unitcommit', + "switch_mod.balancing_areas", + "switch_mod.export", + "switch_mod.export.__init__", + "switch_mod.export.dump", + "switch_mod.export.example_export", + "switch_mod.financials", + "switch_mod.fuel_cost", + "switch_mod.fuel_markets", + "switch_mod.fuels", + "switch_mod.gen_tech", + "switch_mod.generators.hydro_simple", + "switch_mod.generators.hydro_system", + "switch_mod.generators.storage", + "switch_mod.hawaii.batteries", + "switch_mod.hawaii.batteries_fixed_calendar_life", + "switch_mod.hawaii.constant_elasticity_demand_system", + "switch_mod.hawaii.demand_response", + "switch_mod.hawaii.demand_response_no_reserves", + "switch_mod.hawaii.demand_response_simple", + "switch_mod.hawaii.emission_rules", + "switch_mod.hawaii.ev", + "switch_mod.hawaii.fed_subsidies", + "switch_mod.hawaii.fuel_markets_expansion", + "switch_mod.hawaii.hydrogen", + "switch_mod.hawaii.kalaeloa", + "switch_mod.hawaii.lng_conversion", + "switch_mod.hawaii.no_central_pv", + "switch_mod.hawaii.no_onshore_wind", + "switch_mod.hawaii.no_renewables", + "switch_mod.hawaii.no_wind", + "switch_mod.hawaii.psip", + "switch_mod.hawaii.pumped_hydro", + "switch_mod.hawaii.r_demand_system", + "switch_mod.hawaii.reserves", + "switch_mod.hawaii.rps", + "switch_mod.hawaii.save_results", + "switch_mod.hawaii.scenario_data", + "switch_mod.hawaii.scenarios", + "switch_mod.hawaii.smooth_dispatch", + "switch_mod.hawaii.switch_patch", + "switch_mod.hawaii.unserved_load", + "switch_mod.hawaii.util", + "switch_mod.load_zones", + "switch_mod.local_td", + "switch_mod.main", + "switch_mod.project.build", + "switch_mod.project.discrete_build", + "switch_mod.project.dispatch", + "switch_mod.project.no_commit", + "switch_mod.project.unitcommit.commit", + "switch_mod.project.unitcommit.discrete", + "switch_mod.project.unitcommit.fuel_use", + "switch_mod.solve", + "switch_mod.solve_scenarios", + "switch_mod.test", + "switch_mod.timescales", + "switch_mod.trans_build", + "switch_mod.trans_dispatch", + "switch_mod.utilities", + "switch_mod.project", + "switch_mod.project.unitcommit", } rename_modules = { - 'switch_mod.load_zones': 'switch_mod.balancing.load_zones', - 'switch_mod.fuels': 'switch_mod.energy_sources.properties', - 'switch_mod.trans_build': 'switch_mod.transmission.transport.build', - 'switch_mod.trans_dispatch': 'switch_mod.transmission.transport.dispatch', - 'switch_mod.project.build': 'switch_mod.generators.core.build', - 'switch_mod.project.discrete_build': 'switch_mod.generators.core.gen_discrete_build', - 'switch_mod.project.dispatch': 'switch_mod.generators.core.dispatch', - 'switch_mod.project.no_commit': 'switch_mod.generators.core.no_commit', - 'switch_mod.project.unitcommit.commit': 'switch_mod.generators.core.commit.operate', - 'switch_mod.project.unitcommit.fuel_use': 'switch_mod.generators.core.commit.fuel_use', - 'switch_mod.project.unitcommit.discrete': 'switch_mod.generators.core.commit.discrete', - 'switch_mod.fuel_cost': 'switch_mod.energy_sources.fuel_costs.simple', - 'switch_mod.fuel_markets': 'switch_mod.energy_sources.fuel_costs.markets', - 'switch_mod.export': 'switch_mod.reporting', - 'switch_mod.local_td': 'switch_mod.transmission.local_td', - 'switch_mod.balancing_areas': 'switch_mod.balancing.operating_reserves.areas', - 'switch_mod.export.dump': 'switch_mod.reporting.dump', - 'switch_mod.generators.hydro_simple': - 'switch_mod.generators.extensions.hydro_simple', - 'switch_mod.generators.hydro_system': - 'switch_mod.generators.extensions.hydro_system', - 'switch_mod.generators.storage': - 'switch_mod.generators.extensions.storage', + "switch_mod.load_zones": "switch_mod.balancing.load_zones", + "switch_mod.fuels": "switch_mod.energy_sources.properties", + "switch_mod.trans_build": "switch_mod.transmission.transport.build", + "switch_mod.trans_dispatch": "switch_mod.transmission.transport.dispatch", + "switch_mod.project.build": "switch_mod.generators.core.build", + "switch_mod.project.discrete_build": "switch_mod.generators.core.gen_discrete_build", + "switch_mod.project.dispatch": "switch_mod.generators.core.dispatch", + "switch_mod.project.no_commit": "switch_mod.generators.core.no_commit", + "switch_mod.project.unitcommit.commit": "switch_mod.generators.core.commit.operate", + "switch_mod.project.unitcommit.fuel_use": "switch_mod.generators.core.commit.fuel_use", + "switch_mod.project.unitcommit.discrete": "switch_mod.generators.core.commit.discrete", + "switch_mod.fuel_cost": "switch_mod.energy_sources.fuel_costs.simple", + "switch_mod.fuel_markets": "switch_mod.energy_sources.fuel_costs.markets", + "switch_mod.export": "switch_mod.reporting", + "switch_mod.local_td": "switch_mod.transmission.local_td", + "switch_mod.balancing_areas": "switch_mod.balancing.operating_reserves.areas", + "switch_mod.export.dump": "switch_mod.reporting.dump", + "switch_mod.generators.hydro_simple": "switch_mod.generators.extensions.hydro_simple", + "switch_mod.generators.hydro_system": "switch_mod.generators.extensions.hydro_system", + "switch_mod.generators.storage": "switch_mod.generators.extensions.storage", } -module_prefix = 'switch_mod.' -expand_modules = { # Old module name: [new module names] - 'switch_mod': [ - '### begin core modules ###', - 'switch_mod', - 'switch_mod.timescales', - 'switch_mod.financials', - 'switch_mod.balancing.load_zones', - 'switch_mod.energy_sources.properties', - 'switch_mod.generators.core.build', - 'switch_mod.generators.core.dispatch', - 'switch_mod.reporting', - '### end core modules ###' +module_prefix = "switch_mod." +expand_modules = { # Old module name: [new module names] + "switch_mod": [ + "### begin core modules ###", + "switch_mod", + "switch_mod.timescales", + "switch_mod.financials", + "switch_mod.balancing.load_zones", + "switch_mod.energy_sources.properties", + "switch_mod.generators.core.build", + "switch_mod.generators.core.dispatch", + "switch_mod.reporting", + "### end core modules ###", ], - 'switch_mod.project': [ - 'switch_mod.generators.core.build', - 'switch_mod.generators.core.dispatch' + "switch_mod.project": [ + "switch_mod.generators.core.build", + "switch_mod.generators.core.dispatch", ], - 'switch_mod.project.unitcommit': [ - 'switch_mod.generators.core.commit.operate', - 'switch_mod.generators.core.commit.fuel_use' + "switch_mod.project.unitcommit": [ + "switch_mod.generators.core.commit.operate", + "switch_mod.generators.core.commit.fuel_use", ], } @@ -163,19 +160,20 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=r"\s+", index_col=False) + df = pandas.read_csv(path, na_values=["."], sep=r"\s+", index_col=False) df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) + df.to_csv(path, sep="\t", na_rep=".", index=False) - rename_file('modules', 'modules.txt') - modules_path = os.path.join(inputs_dir, 'modules.txt') + rename_file("modules", "modules.txt") + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " - "This file should be located in the input directory or its parent." - .format(inputs_dir) + "This file should be located in the input directory or its parent.".format( + inputs_dir + ) ) ### @@ -186,12 +184,14 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): # If the original file didn't specify either switch_mod or the list of # core modules, we need to insert switch_mod. - if not('switch_mod' in module_list or - 'timescales' in module_list or - 'switch_mod.timescales' in module_list): - module_list.insert(0, 'switch_mod') + if not ( + "switch_mod" in module_list + or "timescales" in module_list + or "switch_mod.timescales" in module_list + ): + module_list.insert(0, "switch_mod") - new_module_list=[] + new_module_list = [] for module in module_list: # add prefix if appropriate # (standardizes names for further processing) @@ -211,66 +211,73 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): if module not in final_module_list: final_module_list.append(module) - with open(modules_path, 'w') as f: - for module in final_module_list: + with open(modules_path, "w") as f: + for module in final_module_list: f.write(module + "\n") ### # Get load zone economic multipliers (if available), then drop that column. - load_zone_path = os.path.join(inputs_dir, 'load_zones.tab') - load_zone_df = pandas.read_csv(load_zone_path, na_values=['.'], sep=r'\s+') - if 'lz_cost_multipliers' in load_zone_df: - load_zone_df['lz_cost_multipliers'].fillna(1) + load_zone_path = os.path.join(inputs_dir, "load_zones.tab") + load_zone_df = pandas.read_csv(load_zone_path, na_values=["."], sep=r"\s+") + if "lz_cost_multipliers" in load_zone_df: + load_zone_df["lz_cost_multipliers"].fillna(1) else: - load_zone_df['lz_cost_multipliers'] = 1 - load_zone_keep_cols = [c for c in load_zone_df if c != 'lz_cost_multipliers'] - load_zone_df.to_csv(load_zone_path, sep='\t', na_rep='.', - index=False, columns=load_zone_keep_cols) + load_zone_df["lz_cost_multipliers"] = 1 + load_zone_keep_cols = [c for c in load_zone_df if c != "lz_cost_multipliers"] + load_zone_df.to_csv( + load_zone_path, sep="\t", na_rep=".", index=False, columns=load_zone_keep_cols + ) ### # Merge generator_info with project_info - gen_info_path = os.path.join(inputs_dir, 'generator_info.tab') - gen_info_df = pandas.read_csv(gen_info_path, na_values=['.'], sep=r'\s+') + gen_info_path = os.path.join(inputs_dir, "generator_info.tab") + gen_info_df = pandas.read_csv(gen_info_path, na_values=["."], sep=r"\s+") gen_info_col_renames = { - 'generation_technology': 'proj_gen_tech', - 'g_energy_source': 'proj_energy_source', - 'g_max_age': 'proj_max_age', - 'g_scheduled_outage_rate': 'proj_scheduled_outage_rate.default', - 'g_forced_outage_rate': 'proj_forced_outage_rate.default', - 'g_variable_o_m': 'proj_variable_om.default', - 'g_full_load_heat_rate': 'proj_full_load_heat_rate.default', - 'g_is_variable': 'proj_is_variable', - 'g_is_baseload': 'proj_is_baseload', - 'g_min_build_capacity': 'proj_min_build_capacity', - 'g_is_cogen': 'proj_is_cogen', - 'g_storage_efficiency': 'proj_storage_efficiency.default', - 'g_store_to_release_ratio': 'proj_store_to_release_ratio.default', - 'g_unit_size': 'proj_unit_size.default', - 'g_min_load_fraction': 'proj_min_load_fraction.default', - 'g_startup_fuel': 'proj_startup_fuel.default', - 'g_startup_om': 'proj_startup_om.default', - 'g_ccs_capture_efficiency': 'proj_ccs_capture_efficiency.default', - 'g_ccs_energy_load': 'proj_ccs_energy_load.default' + "generation_technology": "proj_gen_tech", + "g_energy_source": "proj_energy_source", + "g_max_age": "proj_max_age", + "g_scheduled_outage_rate": "proj_scheduled_outage_rate.default", + "g_forced_outage_rate": "proj_forced_outage_rate.default", + "g_variable_o_m": "proj_variable_om.default", + "g_full_load_heat_rate": "proj_full_load_heat_rate.default", + "g_is_variable": "proj_is_variable", + "g_is_baseload": "proj_is_baseload", + "g_min_build_capacity": "proj_min_build_capacity", + "g_is_cogen": "proj_is_cogen", + "g_storage_efficiency": "proj_storage_efficiency.default", + "g_store_to_release_ratio": "proj_store_to_release_ratio.default", + "g_unit_size": "proj_unit_size.default", + "g_min_load_fraction": "proj_min_load_fraction.default", + "g_startup_fuel": "proj_startup_fuel.default", + "g_startup_om": "proj_startup_om.default", + "g_ccs_capture_efficiency": "proj_ccs_capture_efficiency.default", + "g_ccs_energy_load": "proj_ccs_energy_load.default", } drop_cols = [c for c in gen_info_df if c not in gen_info_col_renames] for c in drop_cols: del gen_info_df[c] gen_info_df.rename(columns=gen_info_col_renames, inplace=True) - proj_info_path = os.path.join(inputs_dir, 'project_info.tab') - proj_info_df = pandas.read_csv(proj_info_path, na_values=['.'], sep=r'\s+') - proj_info_df = pandas.merge(proj_info_df, gen_info_df, on='proj_gen_tech', how='left') + proj_info_path = os.path.join(inputs_dir, "project_info.tab") + proj_info_df = pandas.read_csv(proj_info_path, na_values=["."], sep=r"\s+") + proj_info_df = pandas.merge( + proj_info_df, gen_info_df, on="proj_gen_tech", how="left" + ) # Factor in the load zone cost multipliers proj_info_df = pandas.merge( - load_zone_df[['LOAD_ZONE', 'lz_cost_multipliers']], proj_info_df, - left_on='LOAD_ZONE', right_on='proj_load_zone', how='right') - proj_info_df['proj_variable_om.default'] *= proj_info_df['lz_cost_multipliers'] - for c in ['LOAD_ZONE', 'lz_cost_multipliers']: + load_zone_df[["LOAD_ZONE", "lz_cost_multipliers"]], + proj_info_df, + left_on="LOAD_ZONE", + right_on="proj_load_zone", + how="right", + ) + proj_info_df["proj_variable_om.default"] *= proj_info_df["lz_cost_multipliers"] + for c in ["LOAD_ZONE", "lz_cost_multipliers"]: del proj_info_df[c] # An internal function to apply a column of default values to the actual column def update_cols_with_defaults(df, col_list): for col in col_list: - default_col = col + '.default' + default_col = col + ".default" if default_col not in df: continue if col not in df: @@ -279,151 +286,191 @@ def update_cols_with_defaults(df, col_list): df[col].fillna(df[default_col], inplace=True) del df[default_col] - columns_with_defaults = ['proj_scheduled_outage_rate', 'proj_forced_outage_rate', - 'proj_variable_om', 'proj_full_load_heat_rate', - 'proj_storage_efficiency', 'proj_store_to_release_ratio', - 'proj_unit_size', 'proj_min_load_fraction', - 'proj_startup_fuel', 'proj_startup_om', - 'proj_ccs_capture_efficiency', 'proj_ccs_energy_load'] + columns_with_defaults = [ + "proj_scheduled_outage_rate", + "proj_forced_outage_rate", + "proj_variable_om", + "proj_full_load_heat_rate", + "proj_storage_efficiency", + "proj_store_to_release_ratio", + "proj_unit_size", + "proj_min_load_fraction", + "proj_startup_fuel", + "proj_startup_om", + "proj_ccs_capture_efficiency", + "proj_ccs_energy_load", + ] update_cols_with_defaults(proj_info_df, columns_with_defaults) - proj_info_df.to_csv(proj_info_path, sep='\t', na_rep='.', index=False) + proj_info_df.to_csv(proj_info_path, sep="\t", na_rep=".", index=False) os.remove(gen_info_path) ### # Merge gen_new_build_costs into proj_build_costs # Translate default generator costs into costs for each project - gen_build_path = os.path.join(inputs_dir, 'gen_new_build_costs.tab') + gen_build_path = os.path.join(inputs_dir, "gen_new_build_costs.tab") if os.path.isfile(gen_build_path): - gen_build_df = pandas.read_csv(gen_build_path, na_values=['.'], sep=r'\s+') + gen_build_df = pandas.read_csv(gen_build_path, na_values=["."], sep=r"\s+") new_col_names = { - 'generation_technology': 'proj_gen_tech', - 'investment_period': 'build_year', - 'g_overnight_cost': 'proj_overnight_cost.default', - 'g_storage_energy_overnight_cost': 'proj_storage_energy_overnight_cost.default', - 'g_fixed_o_m': 'proj_fixed_om.default'} + "generation_technology": "proj_gen_tech", + "investment_period": "build_year", + "g_overnight_cost": "proj_overnight_cost.default", + "g_storage_energy_overnight_cost": "proj_storage_energy_overnight_cost.default", + "g_fixed_o_m": "proj_fixed_om.default", + } gen_build_df.rename(columns=new_col_names, inplace=True) new_g_builds = pandas.merge( - gen_build_df, proj_info_df[['PROJECT', 'proj_gen_tech', 'proj_load_zone']], - on='proj_gen_tech') + gen_build_df, + proj_info_df[["PROJECT", "proj_gen_tech", "proj_load_zone"]], + on="proj_gen_tech", + ) # Factor in the load zone cost multipliers new_g_builds = pandas.merge( - load_zone_df[['LOAD_ZONE', 'lz_cost_multipliers']], new_g_builds, - left_on='LOAD_ZONE', right_on='proj_load_zone', how='right') - new_g_builds['proj_overnight_cost.default'] *= new_g_builds['lz_cost_multipliers'] - new_g_builds['proj_fixed_om.default'] *= new_g_builds['lz_cost_multipliers'] + load_zone_df[["LOAD_ZONE", "lz_cost_multipliers"]], + new_g_builds, + left_on="LOAD_ZONE", + right_on="proj_load_zone", + how="right", + ) + new_g_builds["proj_overnight_cost.default"] *= new_g_builds[ + "lz_cost_multipliers" + ] + new_g_builds["proj_fixed_om.default"] *= new_g_builds["lz_cost_multipliers"] # Clean up - for drop_col in ['LOAD_ZONE', 'proj_gen_tech', 'proj_load_zone', - 'lz_cost_multipliers']: + for drop_col in [ + "LOAD_ZONE", + "proj_gen_tech", + "proj_load_zone", + "lz_cost_multipliers", + ]: del new_g_builds[drop_col] # Merge the expanded gen_new_build_costs data into proj_build_costs - project_build_path = os.path.join(inputs_dir, 'proj_build_costs.tab') + project_build_path = os.path.join(inputs_dir, "proj_build_costs.tab") if os.path.isfile(project_build_path): - project_build_df = pandas.read_csv(project_build_path, na_values=['.'], sep=r'\s+') - project_build_df = pandas.merge(project_build_df, new_g_builds, - on=['PROJECT', 'build_year'], how='outer') + project_build_df = pandas.read_csv( + project_build_path, na_values=["."], sep=r"\s+" + ) + project_build_df = pandas.merge( + project_build_df, + new_g_builds, + on=["PROJECT", "build_year"], + how="outer", + ) else: # Make sure the order of the columns is ok since merge won't ensuring that. - idx_cols = ['PROJECT', 'build_year'] + idx_cols = ["PROJECT", "build_year"] dat_cols = [c for c in new_g_builds if c not in idx_cols] col_order = idx_cols + dat_cols project_build_df = new_g_builds[col_order] - columns_with_defaults = ['proj_overnight_cost', 'proj_fixed_om', - 'proj_storage_energy_overnight_cost'] + columns_with_defaults = [ + "proj_overnight_cost", + "proj_fixed_om", + "proj_storage_energy_overnight_cost", + ] update_cols_with_defaults(project_build_df, columns_with_defaults) - project_build_df.to_csv(project_build_path, sep='\t', na_rep='.', index=False) + project_build_df.to_csv(project_build_path, sep="\t", na_rep=".", index=False) os.remove(gen_build_path) # Merge gen_inc_heat_rates.tab into proj_inc_heat_rates.tab - g_hr_path = os.path.join(inputs_dir, 'gen_inc_heat_rates.tab') + g_hr_path = os.path.join(inputs_dir, "gen_inc_heat_rates.tab") if os.path.isfile(g_hr_path): - g_hr_df = pandas.read_csv(g_hr_path, na_values=['.'], sep=r'\s+') - proj_hr_default = pandas.merge(g_hr_df, proj_info_df[['PROJECT', 'proj_gen_tech']], - left_on='generation_technology', - right_on='proj_gen_tech') + g_hr_df = pandas.read_csv(g_hr_path, na_values=["."], sep=r"\s+") + proj_hr_default = pandas.merge( + g_hr_df, + proj_info_df[["PROJECT", "proj_gen_tech"]], + left_on="generation_technology", + right_on="proj_gen_tech", + ) col_renames = { - 'PROJECT': 'project', - 'power_start_mw': 'power_start_mw.default', - 'power_end_mw': 'power_end_mw.default', - 'incremental_heat_rate_mbtu_per_mwhr': 'incremental_heat_rate_mbtu_per_mwhr.default', - 'fuel_use_rate_mmbtu_per_h': 'fuel_use_rate_mmbtu_per_h.default' + "PROJECT": "project", + "power_start_mw": "power_start_mw.default", + "power_end_mw": "power_end_mw.default", + "incremental_heat_rate_mbtu_per_mwhr": "incremental_heat_rate_mbtu_per_mwhr.default", + "fuel_use_rate_mmbtu_per_h": "fuel_use_rate_mmbtu_per_h.default", } proj_hr_default.rename(columns=col_renames, inplace=True) - proj_hr_path = os.path.join(inputs_dir, 'proj_inc_heat_rates.tab') + proj_hr_path = os.path.join(inputs_dir, "proj_inc_heat_rates.tab") if os.path.isfile(proj_hr_path): - proj_hr_df = pandas.read_csv(proj_hr_path, na_values=['.'], sep=r'\s+') - proj_hr_df = pandas.merge(proj_hr_df, proj_hr_default, on='proj_gen_tech', how='left') + proj_hr_df = pandas.read_csv(proj_hr_path, na_values=["."], sep=r"\s+") + proj_hr_df = pandas.merge( + proj_hr_df, proj_hr_default, on="proj_gen_tech", how="left" + ) else: proj_hr_df = proj_hr_default - columns_with_defaults = ['power_start_mw', 'power_end_mw', - 'incremental_heat_rate_mbtu_per_mwhr', - 'fuel_use_rate_mmbtu_per_h'] + columns_with_defaults = [ + "power_start_mw", + "power_end_mw", + "incremental_heat_rate_mbtu_per_mwhr", + "fuel_use_rate_mmbtu_per_h", + ] update_cols_with_defaults(proj_hr_df, columns_with_defaults) - cols = ['project', 'power_start_mw', 'power_end_mw', - 'incremental_heat_rate_mbtu_per_mwhr', 'fuel_use_rate_mmbtu_per_h'] - proj_hr_df.to_csv(proj_hr_path, sep='\t', na_rep='.', index=False, columns=cols) + cols = [ + "project", + "power_start_mw", + "power_end_mw", + "incremental_heat_rate_mbtu_per_mwhr", + "fuel_use_rate_mmbtu_per_h", + ] + proj_hr_df.to_csv(proj_hr_path, sep="\t", na_rep=".", index=False, columns=cols) os.remove(g_hr_path) # Done with restructuring. Now apply component renaming. old_new_file_names = { - 'proj_existing_builds.tab':'gen_build_predetermined.tab', - 'project_info.tab':'generation_projects_info.tab', - 'proj_build_costs.tab':'gen_build_costs.tab', - 'proj_inc_heat_rates.tab':'gen_inc_heat_rates.tab', - 'hydro_projects.tab':'hydro_generation_projects.tab', - 'lz_peak_loads.tab':'zone_coincident_peak_demand.tab', - 'lz_to_regional_fuel_market.tab':'zone_to_regional_fuel_market.tab' + "proj_existing_builds.tab": "gen_build_predetermined.tab", + "project_info.tab": "generation_projects_info.tab", + "proj_build_costs.tab": "gen_build_costs.tab", + "proj_inc_heat_rates.tab": "gen_inc_heat_rates.tab", + "hydro_projects.tab": "hydro_generation_projects.tab", + "lz_peak_loads.tab": "zone_coincident_peak_demand.tab", + "lz_to_regional_fuel_market.tab": "zone_to_regional_fuel_market.tab", } for old, new in old_new_file_names.items(): rename_file(old, new) old_new_column_names_in_file = { - 'gen_build_predetermined.tab':[ - ('proj_existing_cap','gen_predetermined_cap') + "gen_build_predetermined.tab": [("proj_existing_cap", "gen_predetermined_cap")], + "gen_build_costs.tab": [ + ("proj_overnight_cost", "gen_overnight_cost"), + ("proj_fixed_om", "gen_fixed_om"), + ("proj_storage_energy_overnight_cost", "gen_storage_energy_overnight_cost"), ], - 'gen_build_costs.tab':[ - ('proj_overnight_cost','gen_overnight_cost'), - ('proj_fixed_om','gen_fixed_om'), - ('proj_storage_energy_overnight_cost','gen_storage_energy_overnight_cost') + "generation_projects_info.tab": [ + ("proj_dbid", "gen_dbid"), + ("proj_gen_tech", "gen_tech"), + ("proj_load_zone", "gen_load_zone"), + ("proj_connect_cost_per_mw", "gen_connect_cost_per_mw"), + ("proj_capacity_limit_mw", "gen_capacity_limit_mw"), + ("proj_variable_om", "gen_variable_om"), + ("proj_max_age", "gen_max_age"), + ("proj_min_build_capacity", "gen_min_build_capacity"), + ("proj_scheduled_outage_rate", "gen_scheduled_outage_rate"), + ("proj_forced_outage_rate", "gen_forced_outage_rate"), + ("proj_is_variable", "gen_is_variable"), + ("proj_is_baseload", "gen_is_baseload"), + ("proj_is_cogen", "gen_is_cogen"), + ("proj_energy_source", "gen_energy_source"), + ("proj_full_load_heat_rate", "gen_full_load_heat_rate"), + ("proj_storage_efficiency", "gen_storage_efficiency"), + ("proj_min_load_fraction", "gen_min_load_fraction"), + ("proj_startup_fuel", "gen_startup_fuel"), + ("proj_startup_om", "gen_startup_om"), + ("proj_min_uptime", "gen_min_uptime"), + ("proj_min_downtime", "gen_min_downtime"), + ("proj_min_commit_fraction", "gen_min_commit_fraction"), + ("proj_max_commit_fraction", "gen_max_commit_fraction"), + ("proj_min_load_fraction_TP", "gen_min_load_fraction_TP"), + ("proj_unit_size", "gen_unit_size"), ], - 'generation_projects_info.tab':[ - ('proj_dbid','gen_dbid'),('proj_gen_tech','gen_tech'), - ('proj_load_zone','gen_load_zone'), - ('proj_connect_cost_per_mw','gen_connect_cost_per_mw'), - ('proj_capacity_limit_mw','gen_capacity_limit_mw'), - ('proj_variable_om','gen_variable_om'), - ('proj_max_age','gen_max_age'), - ('proj_min_build_capacity','gen_min_build_capacity'), - ('proj_scheduled_outage_rate','gen_scheduled_outage_rate'), - ('proj_forced_outage_rate','gen_forced_outage_rate'), - ('proj_is_variable','gen_is_variable'), - ('proj_is_baseload','gen_is_baseload'), - ('proj_is_cogen','gen_is_cogen'), - ('proj_energy_source','gen_energy_source'), - ('proj_full_load_heat_rate','gen_full_load_heat_rate'), - ('proj_storage_efficiency','gen_storage_efficiency'), - ('proj_min_load_fraction','gen_min_load_fraction'), - ('proj_startup_fuel','gen_startup_fuel'), - ('proj_startup_om','gen_startup_om'), - ('proj_min_uptime','gen_min_uptime'), - ('proj_min_downtime','gen_min_downtime'), - ('proj_min_commit_fraction','gen_min_commit_fraction'), - ('proj_max_commit_fraction','gen_max_commit_fraction'), - ('proj_min_load_fraction_TP','gen_min_load_fraction_TP'), - ('proj_unit_size','gen_unit_size') + "loads.tab": [("lz_demand_mw", "zone_demand_mw")], + "zone_coincident_peak_demand.tab": [ + ("peak_demand_mw", "zone_expected_coincident_peak_demand") ], - 'loads.tab':[ - ('lz_demand_mw','zone_demand_mw') + "variable_capacity_factors.tab": [ + ("proj_max_capacity_factor", "gen_max_capacity_factor") ], - 'zone_coincident_peak_demand.tab':[ - ('peak_demand_mw','zone_expected_coincident_peak_demand') - ], - 'variable_capacity_factors.tab':[ - ('proj_max_capacity_factor','gen_max_capacity_factor') - ] } for fname, old_new_pairs in old_new_column_names_in_file.items(): diff --git a/switch_model/upgrade/upgrade_2_0_0b2.py b/switch_model/upgrade/upgrade_2_0_0b2.py index cbcb5b5e7..04f46c154 100644 --- a/switch_model/upgrade/upgrade_2_0_0b2.py +++ b/switch_model/upgrade/upgrade_2_0_0b2.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,8 +11,9 @@ import os import switch_model.upgrade -upgrades_from = '2.0.0b1' -upgrades_to = '2.0.0b2' +upgrades_from = "2.0.0b1" +upgrades_to = "2.0.0b2" + def upgrade_input_dir(inputs_dir): """ @@ -21,14 +22,15 @@ def upgrade_input_dir(inputs_dir): """ # Find modules.txt; it should be either in the inputs directory or in its # parent directory. - modules_path = os.path.join(inputs_dir, 'modules.txt') + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " - "This file should be located in the input directory or its parent." - .format(inputs_dir) + "This file should be located in the input directory or its parent.".format( + inputs_dir + ) ) # Replace switch_mod with switch_model in modules.txt @@ -41,13 +43,14 @@ def upgrade_input_dir(inputs_dir): with open(modules_path) as f: module_list = [line.strip() for line in f.read().splitlines()] final_module_list = [ - 'switch_model' + line[10:] if line.startswith('switch_mod.') or line == 'switch_mod' + "switch_model" + line[10:] + if line.startswith("switch_mod.") or line == "switch_mod" else line for line in module_list ] - with open(modules_path, 'w') as f: - for module in final_module_list: + with open(modules_path, "w") as f: + for module in final_module_list: f.write(module + "\n") # Write a new version text file. diff --git a/switch_model/upgrade/upgrade_2_0_0b4.py b/switch_model/upgrade/upgrade_2_0_0b4.py index 6cf0185e2..1dc79d202 100644 --- a/switch_model/upgrade/upgrade_2_0_0b4.py +++ b/switch_model/upgrade/upgrade_2_0_0b4.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,8 +11,9 @@ import pandas import switch_model.upgrade -upgrades_from = '2.0.0b2' -upgrades_to = '2.0.0b4' +upgrades_from = "2.0.0b2" +upgrades_to = "2.0.0b4" + def upgrade_input_dir(inputs_dir): """ @@ -30,12 +31,12 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=r"\s+", index_col=False) + df = pandas.read_csv(path, na_values=["."], sep=r"\s+", index_col=False) df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) + df.to_csv(path, sep="\t", na_rep=".", index=False) old_new_column_names_in_file = { - 'gen_inc_heat_rates.tab': [('project', 'GENERATION_PROJECT')] + "gen_inc_heat_rates.tab": [("project", "GENERATION_PROJECT")] } for fname, old_new_pairs in old_new_column_names_in_file.items(): @@ -43,14 +44,16 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): rename_column(fname, old_col_name=old, new_col_name=new) # merge trans_optional_params.tab with transmission_lines.tab - trans_lines_path = os.path.join(inputs_dir, 'transmission_lines.tab') - trans_opt_path = os.path.join(inputs_dir, 'trans_optional_params.tab') + trans_lines_path = os.path.join(inputs_dir, "transmission_lines.tab") + trans_opt_path = os.path.join(inputs_dir, "trans_optional_params.tab") if os.path.isfile(trans_lines_path) and os.path.isfile(trans_lines_path): - trans_lines = pandas.read_csv(trans_lines_path, na_values=['.'], sep=r'\s+') + trans_lines = pandas.read_csv(trans_lines_path, na_values=["."], sep=r"\s+") if os.path.isfile(trans_opt_path): - trans_opt = pandas.read_csv(trans_opt_path, na_values=['.'], sep=r'\s+') - trans_lines = trans_lines.merge(trans_opt, on='TRANSMISSION_LINE', how='left') - trans_lines.to_csv(trans_lines_path, sep='\t', na_rep='.', index=False) + trans_opt = pandas.read_csv(trans_opt_path, na_values=["."], sep=r"\s+") + trans_lines = trans_lines.merge( + trans_opt, on="TRANSMISSION_LINE", how="left" + ) + trans_lines.to_csv(trans_lines_path, sep="\t", na_rep=".", index=False) if os.path.isfile(trans_opt_path): os.remove(trans_opt_path) diff --git a/switch_model/upgrade/upgrade_2_0_1.py b/switch_model/upgrade/upgrade_2_0_1.py index 1c1b33993..904405392 100644 --- a/switch_model/upgrade/upgrade_2_0_1.py +++ b/switch_model/upgrade/upgrade_2_0_1.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,37 +11,37 @@ import pandas import switch_model.upgrade -upgrades_from = '2.0.0b4' -upgrades_to = '2.0.1' +upgrades_from = "2.0.0b4" +upgrades_to = "2.0.1" # note: we could keep switch_model.hawaii.reserves active, but then we would need special code to switch # the model to the main reserves module if and only if they are using the iterative demand response system # which seems unnecessarily complicated replace_modules = { - 'switch_model.hawaii.demand_response': - ['switch_model.balancing.demand_response.iterative'], - 'switch_model.hawaii.r_demand_system': - ['switch_model.balancing.demand_response.iterative.r_demand_system'], - 'switch_model.hawaii.reserves': [ - 'switch_model.balancing.operating_reserves.areas', - 'switch_model.balancing.operating_reserves.spinning_reserves', - ] + "switch_model.hawaii.demand_response": [ + "switch_model.balancing.demand_response.iterative" + ], + "switch_model.hawaii.r_demand_system": [ + "switch_model.balancing.demand_response.iterative.r_demand_system" + ], + "switch_model.hawaii.reserves": [ + "switch_model.balancing.operating_reserves.areas", + "switch_model.balancing.operating_reserves.spinning_reserves", + ], } module_messages = { # description of significant changes to particular modules (other than moving) # old_module: message - 'switch_model.hawaii.r_demand_system': - 'The switch_model.hawaii.r_demand_system module has been moved. Please update ' - 'the --dr-demand-module flag to point to the new location.', - 'switch_model.hawaii.demand_response': - 'The switch_model.hawaii.demand_response module has been moved. Please update ' - 'iterate.txt to refer to the new location.', - 'switch_model.hawaii.switch_patch': - 'The switch_model.hawaii.switch_patch module no longer patches ' - 'the cplex solver to generate dual values for mixed-integer programs. ' - 'Use the new --retrieve-cplex-mip-duals flag if you need this behavior.' + "switch_model.hawaii.r_demand_system": "The switch_model.hawaii.r_demand_system module has been moved. Please update " + "the --dr-demand-module flag to point to the new location.", + "switch_model.hawaii.demand_response": "The switch_model.hawaii.demand_response module has been moved. Please update " + "iterate.txt to refer to the new location.", + "switch_model.hawaii.switch_patch": "The switch_model.hawaii.switch_patch module no longer patches " + "the cplex solver to generate dual values for mixed-integer programs. " + "Use the new --retrieve-cplex-mip-duals flag if you need this behavior.", } + def upgrade_input_dir(inputs_dir): """ Upgrade the input directory. @@ -60,32 +60,36 @@ def rename_file(old_name, new_name, optional_file=True): return shutil.move(old_path, new_path) + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=r"\s+", index_col=False) + df = pandas.read_csv(path, na_values=["."], sep=r"\s+", index_col=False) df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) + df.to_csv(path, sep="\t", na_rep=".", index=False) + def item_list(items): """Generate normal-text version of list of items, with commas and "and" as needed.""" - return ' and '.join(', '.join(items).rsplit(', ', 1)) + return " and ".join(", ".join(items).rsplit(", ", 1)) + def update_modules(inputs_dir): """Rename modules in the module list if needed (list is sought in standard locations) and return list of alerts for user.""" - modules_path = os.path.join(inputs_dir, 'modules.txt') + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " - "This file should be located in the input directory or its parent." - .format(inputs_dir) + "This file should be located in the input directory or its parent.".format( + inputs_dir + ) ) - modules_path = os.path.normpath(modules_path) # tidy up for display later + modules_path = os.path.normpath(modules_path) # tidy up for display later # Upgrade module listings # Each line of the original file is either a module identifier or a comment @@ -93,13 +97,14 @@ def update_modules(inputs_dir): old_module_list = [line.strip() for line in f.read().splitlines()] # rename modules as needed - new_module_list=[] + new_module_list = [] for module in old_module_list: try: new_modules = replace_modules[module] - print ( - "Module {old} has been replaced by {new} in {file}." - .format(old=module, new=item_list(new_modules), file=modules_path) + print( + "Module {old} has been replaced by {new} in {file}.".format( + old=module, new=item_list(new_modules), file=modules_path + ) ) except KeyError: new_modules = [module] @@ -112,10 +117,10 @@ def update_modules(inputs_dir): # components defined by other modules, but # switch_model.balancing.operating_reserves.spinning_reserves should # load early so other modules can register reserves with it. - if 'switch_model.hawaii.reserves' in old_module_list: - new_spin = 'switch_model.balancing.operating_reserves.areas' + if "switch_model.hawaii.reserves" in old_module_list: + new_spin = "switch_model.balancing.operating_reserves.areas" try: - insert_pos = new_module_list.index('switch_model.balancing.load_zones') + 1 + insert_pos = new_module_list.index("switch_model.balancing.load_zones") + 1 if insert_pos < new_module_list.index(new_spin): new_module_list.remove(new_spin) new_module_list.insert(insert_pos, new_spin) @@ -126,17 +131,16 @@ def update_modules(inputs_dir): # ) except ValueError: # couldn't find the location to insert spinning reserves module - print ( - '{} module should be moved early in the module list, ' - 'before any modules that define reserve elements.' - .format(new_spin) + print( + "{} module should be moved early in the module list, " + "before any modules that define reserve elements.".format(new_spin) ) - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() # write new modules list - with open(modules_path, 'w') as f: - for module in new_module_list: + with open(modules_path, "w") as f: + for module in new_module_list: f.write(module + "\n") # report any significant changes in the previously active modules diff --git a/switch_model/upgrade/upgrade_2_0_4.py b/switch_model/upgrade/upgrade_2_0_4.py index 72041af34..0dd8e1b9e 100644 --- a/switch_model/upgrade/upgrade_2_0_4.py +++ b/switch_model/upgrade/upgrade_2_0_4.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -11,8 +11,8 @@ import pandas import switch_model.upgrade -upgrades_from = '2.0.1' -upgrades_to = '2.0.4' +upgrades_from = "2.0.1" +upgrades_to = "2.0.4" replace_modules = { # no renames in this version @@ -21,26 +21,23 @@ module_messages = { # description of significant changes to particular modules (other than moving) # old_module: message - 'switch_model.transmission.local_td': - 'Switch 2.0.4 makes two changes to the local_td module. ' - '1. The carrying cost of pre-existing local transmission and ' - 'distribution is now included in the total system costs. ' - '2. The legacy transmission is no longer reported in the ' - 'BuildLocalTD.tab output file.', - 'switch_model.reporting': - 'Output files (*.tab) now use native line endings instead of ' - 'always using Unix-style line endings. On Windows systems, these ' - 'files will now use "\\r\\n" instead of "\\n".', - 'switch_model.reporting.basic_exports': - 'Output files (*.csv) now use native line endings instead of ' - 'always using Unix-style line endings. On Windows systems, these ' - 'files will now use "\\r\\n" instead of "\\n".', - 'switch_model.hawaii.save_results': - 'Output files (*.tsv) now use native line endings instead of ' - 'always using Unix-style line endings. On Windows systems, these ' - 'files will now use "\\r\\n" instead of "\\n".', + "switch_model.transmission.local_td": "Switch 2.0.4 makes two changes to the local_td module. " + "1. The carrying cost of pre-existing local transmission and " + "distribution is now included in the total system costs. " + "2. The legacy transmission is no longer reported in the " + "BuildLocalTD.tab output file.", + "switch_model.reporting": "Output files (*.tab) now use native line endings instead of " + "always using Unix-style line endings. On Windows systems, these " + 'files will now use "\\r\\n" instead of "\\n".', + "switch_model.reporting.basic_exports": "Output files (*.csv) now use native line endings instead of " + "always using Unix-style line endings. On Windows systems, these " + 'files will now use "\\r\\n" instead of "\\n".', + "switch_model.hawaii.save_results": "Output files (*.tsv) now use native line endings instead of " + "always using Unix-style line endings. On Windows systems, these " + 'files will now use "\\r\\n" instead of "\\n".', } + def upgrade_input_dir(inputs_dir): """ Upgrade the input directory. @@ -59,35 +56,37 @@ def rename_file(old_name, new_name, optional_file=True): return shutil.move(old_path, new_path) + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=r"\s+", index_col=False) + df = pandas.read_csv(path, na_values=["."], sep=r"\s+", index_col=False) df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) + df.to_csv(path, sep="\t", na_rep=".", index=False) + def item_list(items): """Generate normal-text version of list of items, with commas and "and" as needed.""" - return ' and '.join(', '.join(items).rsplit(', ', 1)) + return " and ".join(", ".join(items).rsplit(", ", 1)) + def update_modules(inputs_dir): """Rename modules in the module list if needed (list is sought in standard locations) and return list of alerts for user.""" - modules_path = os.path.join(inputs_dir, 'modules.txt') + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): - modules_path = 'modules.txt' + modules_path = "modules.txt" if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " "This file should be located in the input directory, its parent, or " - "the current working directory." - .format(inputs_dir) + "the current working directory.".format(inputs_dir) ) - modules_path = os.path.normpath(modules_path) # tidy up for display later + modules_path = os.path.normpath(modules_path) # tidy up for display later # Upgrade module listings # Each line of the original file is either a module identifier or a comment @@ -95,13 +94,14 @@ def update_modules(inputs_dir): old_module_list = [line.strip() for line in f.read().splitlines()] # rename modules as needed - new_module_list=[] + new_module_list = [] for module in old_module_list: try: new_modules = replace_modules[module] switch_model.upgrade.print_verbose( - "Module {old} has been replaced by {new} in {file}." - .format(old=module, new=item_list(new_modules), file=modules_path) + "Module {old} has been replaced by {new} in {file}.".format( + old=module, new=item_list(new_modules), file=modules_path + ) ) except KeyError: new_modules = [module] @@ -109,8 +109,8 @@ def update_modules(inputs_dir): if new_module_list != old_module_list: # write new modules list - with open(modules_path, 'w') as f: - for module in new_module_list: + with open(modules_path, "w") as f: + for module in new_module_list: f.write(module + "\n") # report any significant changes in the previously active modules diff --git a/switch_model/upgrade/upgrade_2_0_5.py b/switch_model/upgrade/upgrade_2_0_5.py index dd48829ef..8e9003c9c 100644 --- a/switch_model/upgrade/upgrade_2_0_5.py +++ b/switch_model/upgrade/upgrade_2_0_5.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ @@ -13,8 +13,8 @@ from pyomo.environ import DataPortal -upgrades_from = '2.0.4' -upgrades_to = '2.0.5' +upgrades_from = "2.0.4" +upgrades_to = "2.0.5" replace_modules = { # no renames in this version @@ -23,11 +23,11 @@ module_messages = { # description of significant changes to particular modules (other than moving) # old_module: message - 'switch_model': - 'Beginning with Switch 2.0.5, all inputs must be in .csv files and all ' - 'outputs will be written to .csv files.', + "switch_model": "Beginning with Switch 2.0.5, all inputs must be in .csv files and all " + "outputs will be written to .csv files." } + def upgrade_input_dir(inputs_dir): """ Upgrade the input directory. @@ -40,20 +40,25 @@ def upgrade_input_dir(inputs_dir): # Convert all .tab input files to .csv (maybe it should # work with a list of specific files instead?) - for old_path in glob.glob(os.path.join(inputs_dir, '*.tab')): - new_path = old_path[:-4] + '.csv' + for old_path in glob.glob(os.path.join(inputs_dir, "*.tab")): + new_path = old_path[:-4] + ".csv" convert_tab_to_csv(old_path, new_path) # Convert certain .tab input files to .csv # These are all simple ampl/pyomo files with only un-indexed parameters for f in [ - 'financials.dat', 'trans_params.dat', 'spillage_penalty.dat', - 'spinning_reserve_params.dat', 'lost_load_cost.dat', 'hydrogen.dat' - ]: + "financials.dat", + "trans_params.dat", + "spillage_penalty.dat", + "spinning_reserve_params.dat", + "lost_load_cost.dat", + "hydrogen.dat", + ]: old_path = os.path.join(inputs_dir, f) - new_path = old_path[:-4] + '.csv' + new_path = old_path[:-4] + ".csv" if os.path.exists(old_path): convert_dat_to_csv(old_path, new_path) + def convert_tab_to_csv(old_path, new_path): # Note: we assume the old file is a simple ampl/pyomo dat file, with only # non-indexed parameters (that is the case for all the ones listed above) @@ -61,23 +66,22 @@ def convert_tab_to_csv(old_path, new_path): # Allow any whitespace as a delimiter because that is how ampl/pyomo .tab # files work, and some of our older examples use spaces instead of tabs # (e.g., tests/upgrade_dat/copperplate1/inputs/variable_capacity_factors.tab). - df = pandas.read_csv(old_path, na_values=['.'], sep=r'\s+') - df.to_csv(new_path, sep=',', na_rep='.', index=False) + df = pandas.read_csv(old_path, na_values=["."], sep=r"\s+") + df.to_csv(new_path, sep=",", na_rep=".", index=False) os.remove(old_path) except Exception as e: - print( - '\nERROR converting {} to {}:\n{}' - .format(old_path, new_path, e.message) - ) + print("\nERROR converting {} to {}:\n{}".format(old_path, new_path, e.message)) raise + def convert_dat_to_csv(old_path, new_path): # define a dummy "model" where every "parameter" reports a dimension of 0. # otherwise Pyomo assumes they have dim=1 and looks for index values. - class DummyModel(): + class DummyModel: def __getattr__(self, pname): return DummyParam() - class DummyParam(): + + class DummyParam: def dim(self): return 0 @@ -86,15 +90,13 @@ def dim(self): data.load(filename=old_path) # this happens to be in a pandas-friendly format df = pandas.DataFrame(data.data()) - df.to_csv(new_path, sep=',', na_rep='.', index=False) + df.to_csv(new_path, sep=",", na_rep=".", index=False) os.remove(old_path) except Exception as e: - print( - '\nERROR converting {} to {}:\n{}' - .format(old_path, new_path, e.message) - ) + print("\nERROR converting {} to {}:\n{}".format(old_path, new_path, e.message)) raise + # These functions are not used in the 2.0.5 upgrade, but kept here for the future def rename_file(old_name, new_name, optional_file=True): old_path = os.path.join(inputs_dir, old_name) @@ -103,35 +105,37 @@ def rename_file(old_name, new_name, optional_file=True): return shutil.move(old_path, new_path) + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=',') # for 2.0.5+ + df = pandas.read_csv(path, na_values=["."], sep=",") # for 2.0.5+ df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep=',', na_rep='.', index=False) + df.to_csv(path, sep=",", na_rep=".", index=False) + def item_list(items): """Generate normal-text version of list of items, with commas and "and" as needed.""" - return ' and '.join(', '.join(items).rsplit(', ', 1)) + return " and ".join(", ".join(items).rsplit(", ", 1)) + def update_modules(inputs_dir): """Rename modules in the module list if needed (list is sought in standard locations) and return list of alerts for user.""" - modules_path = os.path.join(inputs_dir, 'modules.txt') + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): - modules_path = 'modules.txt' + modules_path = "modules.txt" if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " "This file should be located in the input directory, its parent, or " - "the current working directory." - .format(inputs_dir) + "the current working directory.".format(inputs_dir) ) - modules_path = os.path.normpath(modules_path) # tidy up for display later + modules_path = os.path.normpath(modules_path) # tidy up for display later # Upgrade module listings # Each line of the original file is either a module identifier or a comment @@ -139,13 +143,14 @@ def update_modules(inputs_dir): old_module_list = [line.strip() for line in f.read().splitlines()] # rename modules as needed - new_module_list=[] + new_module_list = [] for module in old_module_list: try: new_modules = replace_modules[module] switch_model.upgrade.print_verbose( - "Module {old} has been replaced by {new} in {file}." - .format(old=module, new=item_list(new_modules), file=modules_path) + "Module {old} has been replaced by {new} in {file}.".format( + old=module, new=item_list(new_modules), file=modules_path + ) ) except KeyError: new_modules = [module] @@ -153,8 +158,8 @@ def update_modules(inputs_dir): if new_module_list != old_module_list: # write new modules list - with open(modules_path, 'w') as f: - for module in new_module_list: + with open(modules_path, "w") as f: + for module in new_module_list: f.write(module + "\n") # report any significant changes in the previously active modules diff --git a/switch_model/upgrade/upgrade_2_0_6.py b/switch_model/upgrade/upgrade_2_0_6.py new file mode 100644 index 000000000..9497d7911 --- /dev/null +++ b/switch_model/upgrade/upgrade_2_0_6.py @@ -0,0 +1,160 @@ +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. +# Licensed under the Apache License, Version 2.0, which is in the LICENSE file. +import os, shutil, argparse, glob +import pandas as pd +import switch_model.upgrade +from pyomo.environ import DataPortal + +upgrades_from = "2.0.5" +upgrades_to = "2.0.6" + +replace_modules = { + # modules to be replaced in the module list + # old_module: [new_module1, new_module2, ...], + "switch_model.hawaii.psip_2016_12": ["switch_model.hawaii.heco_outlook_2020_06"], + "switch_model.hawaii.kalaeloa": ["switch_model.hawaii.oahu_plants"], +} + +module_messages = { + # description of significant changes to particular modules other than + # moving/renaming + # old_module: message + "switch_model.generators.core.build": "Beginning with Switch 2.0.6, gen_multiple_fuels.dat should " + "be replaced with gen_multiple_fuels.csv. The .csv file should have " + "two columns: GENERATION_PROJECT and fuel. It should have one row for " + "each allowed fuel for each multi-fuel generator." +} + + +def upgrade_input_dir(inputs_dir): + """ + Upgrade the input directory. + """ + # Write a new version text file. We do this early so that if the update + # fails and then the user tries again it won't try to upgrade a second time, + # overwriting their backup. + switch_model.upgrade._write_input_version(inputs_dir, upgrades_to) + + # rename modules and report changes + update_modules(inputs_dir) + + # convert the multi-fuels file to csv format (this is the last .dat input) + convert_gen_multiple_fuels_to_csv(inputs_dir) + + # rename_file('fuel_cost.csv', 'fuel_cost_per_period.csv') + # rename_column('fuel_cost_per_period.csv', 'fuel_cost', 'period_fuel_cost') + + +def rename_file(old_name, new_name, optional_file=True): + old_path = os.path.join(inputs_dir, old_name) + new_path = os.path.join(inputs_dir, new_name) + if optional_file and not os.path.isfile(old_path): + return + shutil.move(old_path, new_path) + + +def rename_column(file_name, old_col_name, new_col_name, optional_file=True): + path = os.path.join(inputs_dir, file_name) + if optional_file and not os.path.isfile(path): + return + df = pd.read_csv(path, na_values=["."], sep=",") # for 2.0.5+ + df.rename(columns={old_col_name: new_col_name}, inplace=True) + df.to_csv(path, sep=",", na_rep=".", index=False) + + +def item_list(items): + """Generate normal-text version of list of items, with commas and "and" as needed.""" + return " and ".join(", ".join(items).rsplit(", ", 1)) + + +def update_modules(inputs_dir): + """Rename modules in the module list if needed (list is sought in + standard locations) and return list of alerts for user.""" + + modules_path = os.path.join(inputs_dir, "modules.txt") + if not os.path.isfile(modules_path): + modules_path = os.path.join(inputs_dir, "..", "modules.txt") + if not os.path.isfile(modules_path): + modules_path = "modules.txt" + if not os.path.isfile(modules_path): + raise RuntimeError( + "Unable to find modules or modules.txt file for input directory '{}'. " + "This file should be located in the input directory, its parent, or " + "the current working directory.".format(inputs_dir) + ) + modules_path = os.path.normpath(modules_path) # tidy up for display later + + # Upgrade module listings + # Each line of the original file is either a module identifier or a comment + with open(modules_path) as f: + old_module_list = [line.strip() for line in f.read().splitlines()] + + # rename modules as needed + new_module_list = [] + for module in old_module_list: + try: + new_modules = replace_modules[module] + switch_model.upgrade.print_verbose( + "Module {old} has been replaced by {new} in {file}.".format( + old=module, new=item_list(new_modules), file=modules_path + ) + ) + except KeyError: + new_modules = [module] + new_module_list.extend(new_modules) + + if new_module_list != old_module_list: + # write new modules list + with open(modules_path, "w") as f: + for module in new_module_list: + f.write(module + "\n") + + # report any significant changes in the previously active modules + for module in old_module_list: + try: + switch_model.upgrade.print_verbose( + "ATTENTION: {}".format(module_messages[module]) + ) + except KeyError: + pass + + +def convert_gen_multiple_fuels_to_csv(inputs_dir): + old_path = os.path.join(inputs_dir, "gen_multiple_fuels.dat") + new_path = os.path.join(inputs_dir, "gen_multiple_fuels.csv") + + if os.path.exists(old_path): + old_df = read_dat_file(old_path) + # df has one row for each gen; gen is the index and a list of fuels is the value + # unpack list of allowed fuels for each generator + gen_fuels = [ + (gen, fuel) for gen, fuels in old_df.itertuples() for fuel in fuels + ] + new_df = pd.DataFrame.from_records( + gen_fuels, columns=["GENERATION_PROJECT", "fuel"] + ) + new_df.to_csv(new_path, sep=",", na_rep=".", index=False) + os.remove(old_path) + + +def read_dat_file(path): + # define a dummy "model" where every "parameter" reports a dimension of 0. + # otherwise Pyomo assumes they have dim=1 and looks for index values. + class DummyModel: + def __getattr__(self, pname): + return DummyParam() + + class DummyParam: + def dim(self): + return 0 + + try: + data = DataPortal(model=DummyModel()) + data.load(filename=path) + # this happens to be in a pd-friendly format + df = pd.DataFrame(data.data()) + except Exception as e: + print("\nERROR reading {}:\n{}".format(path, e.message)) + raise + else: + return df diff --git a/switch_model/upgrade/upgrade_2_0_7.py b/switch_model/upgrade/upgrade_2_0_7.py new file mode 100644 index 000000000..0babd9d93 --- /dev/null +++ b/switch_model/upgrade/upgrade_2_0_7.py @@ -0,0 +1,191 @@ +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. +# Licensed under the Apache License, Version 2.0, which is in the LICENSE file. +import os, shutil, argparse, glob +import pandas as pd +import switch_model.upgrade +from pyomo.environ import DataPortal + +upgrades_from = "2.0.6" +upgrades_to = "2.0.7" + +replace_modules = { + # modules to be replaced in the module list + # old_module: [new_module1, new_module2, ...], +} + +module_messages = { + # description of significant changes to particular modules other than + # moving/renaming + # old_module: message +} + + +def upgrade_input_dir(inputs_dir): + """ + Upgrade the input directory. + """ + # Write a new version text file. We do this early so that if the update + # fails and then the user tries again it won't try to upgrade a second time, + # overwriting their backup. + switch_model.upgrade._write_input_version(inputs_dir, upgrades_to) + + # rename modules and report changes + # update_modules(inputs_dir) + + rename_file(inputs_dir, "generation_projects_info.csv", "gen_info.csv", False) + + rename_column( + inputs_dir, + "gen_build_predetermined.csv", + "gen_predetermined_cap", + "build_gen_predetermined", + ) + + rename_column( + inputs_dir, + "gen_build_predetermined.csv", + "gen_predetermined_storage_energy_mwh", # briefly used pre-release + "build_gen_energy_predetermined", + ) + + move_column( + inputs_dir, + old_file_name="trans_params.csv", + old_col_name="distribution_loss_rate", + new_file_name="load_zones.csv", + new_col_name="local_td_loss_rate", + join_cols=tuple(), + optional_col=True, + ) + + +def rename_file(inputs_dir, old_name, new_name, optional_file=True): + old_path = os.path.join(inputs_dir, old_name) + new_path = os.path.join(inputs_dir, new_name) + if optional_file and not os.path.isfile(old_path): + pass + elif os.path.isfile(new_path) and not os.path.isfile(old_path): + switch_model.upgrade.print_verbose( + f"Input file {old_name} was already renamed to {new_name}." + ) + else: + shutil.move(old_path, new_path) + switch_model.upgrade.print_verbose( + f"Input file {old_name} has been renamed to {new_name}." + ) + + +def rename_column( + inputs_dir, file_name, old_col_name, new_col_name, optional_file=True +): + path = os.path.join(inputs_dir, file_name) + if optional_file and not os.path.isfile(path): + return + df = pd.read_csv(path, na_values=["."], sep=",") # for 2.0.5+ + if old_col_name in df.columns: + df.rename(columns={old_col_name: new_col_name}, inplace=True) + df.to_csv(path, sep=",", na_rep=".", index=False) + switch_model.upgrade.print_verbose( + f"Column {old_col_name} has been renamed to {new_col_name} in {file_name}." + ) + elif new_col_name in df.columns: + switch_model.upgrade.print_verbose( + f"Column {old_col_name} was already renamed to {new_col_name} in {file_name}." + ) + + +def move_column( + inputs_dir, + old_file_name, + old_col_name, + new_file_name, + new_col_name, + join_cols, + optional_col=True, +): + old_path = os.path.join(inputs_dir, old_file_name) + new_path = os.path.join(inputs_dir, new_file_name) + if optional_col and not os.path.isfile(old_path): + return + # add dummy key to allow cross-joins + fixed_join_cols = list(join_cols) + ["dummy_join_key"] + old_df = pd.read_csv(old_path, na_values=["."], sep=",").assign(dummy_join_key=0) + # TODO: create new_path if it doesn't exist + new_df = pd.read_csv(new_path, na_values=["."], sep=",").assign(dummy_join_key=0) + if old_col_name in old_df.columns: + new_col = old_df.loc[:, fixed_join_cols + [old_col_name]].merge( + new_df.loc[:, fixed_join_cols], on=fixed_join_cols + ) + new_df[new_col_name] = new_col[old_col_name] + new_df.drop("dummy_join_key", axis=1, inplace=True) + new_df.to_csv(new_path, sep=",", na_rep=".", index=False) + old_df.drop([old_col_name, "dummy_join_key"], axis=1, inplace=True) + old_df.to_csv(old_path, sep=",", na_rep=".", index=False) + switch_model.upgrade.print_verbose( + f"Column {old_file_name} > {old_col_name} has been moved to {new_file_name} > {new_col_name}." + ) + elif new_col_name in new_df.columns: + switch_model.upgrade.print_verbose( + f"Column {old_file_name} > {old_col_name} was already moved to {new_file_name} > {new_col_name}." + ) + elif not optional_col: + # column wasn't found and isn't optional + raise ValueError(f"Mandatory column {old_col_name} not found in {old_path}.") + + +def item_list(items): + """Generate normal-text version of list of items, with commas and "and" as needed.""" + return " and ".join(", ".join(items).rsplit(", ", 1)) + + +def update_modules(inputs_dir): + """Rename modules in the module list if needed (list is sought in + standard locations) and return list of alerts for user.""" + + modules_path = os.path.join(inputs_dir, "modules.txt") + if not os.path.isfile(modules_path): + modules_path = os.path.join(inputs_dir, "..", "modules.txt") + if not os.path.isfile(modules_path): + modules_path = "modules.txt" + if not os.path.isfile(modules_path): + raise RuntimeError( + "Unable to find modules or modules.txt file for input directory '{}'. " + "This file should be located in the input directory, its parent, or " + "the current working directory.".format(inputs_dir) + ) + modules_path = os.path.normpath(modules_path) # tidy up for display later + + # Upgrade module listings + # Each line of the original file is either a module identifier or a comment + with open(modules_path) as f: + old_module_list = [line.strip() for line in f.read().splitlines()] + + # rename modules as needed + new_module_list = [] + for module in old_module_list: + try: + new_modules = replace_modules[module] + switch_model.upgrade.print_verbose( + "Module {old} has been replaced by {new} in {file}.".format( + old=module, new=item_list(new_modules), file=modules_path + ) + ) + except KeyError: + new_modules = [module] + new_module_list.extend(new_modules) + + if new_module_list != old_module_list: + # write new modules list + with open(modules_path, "w") as f: + for module in new_module_list: + f.write(module + "\n") + + # report any significant changes in the previously active modules + for module in old_module_list: + try: + switch_model.upgrade.print_verbose( + "ATTENTION: {}".format(module_messages[module]) + ) + except KeyError: + pass + diff --git a/switch_model/utilities.py b/switch_model/utilities.py index b6a0ea384..98df6bfd0 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -1,15 +1,30 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ Utility functions for Switch. """ -from __future__ import print_function +from __future__ import print_function, division + +import argparse +import datetime +import importlib +import os +import re +import sys +import logging +import time +import types +import textwrap -import os, types, importlib, re, sys, argparse, time, datetime -import __main__ as main from pyomo.environ import * -import pyomo.opt +import pyomo.opt, pyomo.version + +try: + # sentinel for sets with no dimension specified in Pyomo 5.7+ + from pyomo.core.base.set import UnknownSetDimen +except ImportError: + UnknownSetDimen = object() # shouldn't ever match # Define string_types (same as six.string_types). This is useful for # distinguishing between strings and other iterables. @@ -20,95 +35,301 @@ # Python 3 string_types = (str,) -# Check whether this is an interactive session (determined by whether -# __main__ has a __file__ attribute). Scripts can check this value to -# determine what level of output to display. -interactive_session = not hasattr(main, '__file__') def define_AbstractModel(*module_list, **kwargs): # stub to provide old functionality as we move to a simpler calling convention args = kwargs.get("args", sys.argv[1:]) return create_model(module_list, args) -def create_model(module_list=None, args=sys.argv[1:]): + +class SwitchAbstractModel(AbstractModel): + """ + Subclass of standard Pyomo ConcreteModel with methods to implement Switch- + specific behavior (initializing via modules, etc.). """ - Construct a Pyomo AbstractModel using the Switch modules or packages - in the given list and return the model. The following utility methods - are attached to the model as class methods to simplify their use: - min_data_check(), load_inputs(), pre_solve(), post_solve(). + def __init__(self, module_list=None, args=sys.argv[1:], logger=None): + """ + Construct a customized Pyomo AbstractModel using the Switch modules or + packages in the given list. - This is implemented as calling the following functions for each module - that has them defined: + This is implemented as calling the following functions for each module + that has them defined: - define_dynamic_lists(model): Add lists to the model that other modules can - register with. Used for power balance equations, cost components of the - objective function, etc. + define_dynamic_lists(model): Add lists to the model that other modules + can register with. Used for power balance equations, cost components of + the objective function, etc. - define_components(model): Add components to the model object (parameters, - sets, decisions variables, expressions, and/or constraints). Also register - with relevant dynamic_lists. + define_components(model): Add components to the model object + (parameters, sets, decisions variables, expressions, and/or + constraints). Also register with relevant dynamic_lists. - define_dynamic_components(model): Add dynamic components to the model that - depend on the contents of dyanmics lists. Power balance constraints and - the objective function are defined in this manner. + define_dynamic_components(model): Add dynamic components to the model + that depend on the contents of dyanmics lists. Power balance constraints + and the objective function are defined in this manner. - See financials and balancing.load_zones for examples of dynamic definitions. + See financials and balancing.load_zones for examples of dynamic + definitions. - All modules can request access to command line parameters and set their - default values for those options. If this codebase is being used more like - a library than a stand-alone executable, this behavior can cause problems. - For example, running this model with PySP's runph tool will cause errors - where a runph argument such as --instance-directory is unknown to the - switch modules, so parse_args() generates an error. This behavior can be - avoided calling this function with an empty list for args: - create_model(module_list, args=[]) + All modules can request access to command line parameters and set their + default values for those options. If this codebase is being used more + like a library than a stand-alone executable, this behavior can cause + problems. For example, running this model with PySP's runph tool will + cause errors where a runph argument such as --instance-directory is + unknown to the switch modules, so parse_args() generates an error. This + behavior can be avoided calling this function with an empty list for + args: create_model(module_list, args=[]) - """ - model = AbstractModel() + """ + # do standard Pyomo initialization + AbstractModel.__init__(self) - # Load modules - if module_list is None: + # late import to minimize circular dependency import switch_model.solve - module_list = switch_model.solve.get_module_list(args) - model.module_list = module_list - for m in module_list: - importlib.import_module(m) - - # Bind utility functions to the model as class objects - # Should we be formally extending their class instead? - _add_min_data_check(model) - model.has_discrete_variables = types.MethodType(has_discrete_variables, model) - model.get_modules = types.MethodType(get_modules, model) - model.load_inputs = types.MethodType(load_inputs, model) - model.pre_solve = types.MethodType(pre_solve, model) - model.post_solve = types.MethodType(post_solve, model) - - # Define and parse model configuration options - argparser = _ArgumentParser(allow_abbrev=False) - for module in model.get_modules(): - if hasattr(module, 'define_arguments'): - module.define_arguments(argparser) - model.options = argparser.parse_args(args) - - # Define model components - for module in model.get_modules(): - if hasattr(module, 'define_dynamic_lists'): - module.define_dynamic_lists(model) - for module in model.get_modules(): - if hasattr(module, 'define_components'): - module.define_components(model) - for module in model.get_modules(): - if hasattr(module, 'define_dynamic_components'): - module.define_dynamic_components(model) - - return model - - -def get_modules(model): - """ Return a list of loaded module objects for this model. """ - for m in model.module_list: - yield sys.modules[m] + + # Load modules + if module_list is None: + module_list = switch_model.solve.get_module_list(args) + self.module_list = module_list + + for m in self.module_list: + importlib.import_module(m) + + # Each model usually has its own logger, passed in by + # switch_model.solve, because users may specify different logging + # settings for each model. If not provided, we attach a default logger, + # since all modules assume there's one in place. + # (This is used to maintain consistent logging level throughout the + # model life cycle. In the future it could be used to log different + # models to different files. Currently that is handled by redirecting + # all output to the right file via a wrapper around any code that may + # produce output, to ensure we catch print() calls (deprecated) and + # messages from Pyomo and its solver. + if logger is None: + logger = logging.getLogger("Switch Default Logger") + self.logger = logger + + # Define and parse model configuration options + argparser = _ArgumentParser(allow_abbrev=False) + for module in self.get_modules(): + if hasattr(module, "define_arguments"): + module.define_arguments(argparser) + self.options = argparser.parse_args(args) + + # Apply verbose flag to support code that still uses it (newer code should + # use model.logger.isEnabledFor(logging.LEVEL) + self.options.verbose = self.logger.isEnabledFor(logging.INFO) + + # get a list of modules to iterate through + self.iterate_modules = switch_model.solve.get_iteration_list(self) + + # Describe model (if wanted) before constructing it + self.logger.info( + "=======================================================================" + ) + self.logger.info( + "Arguments:\n" + + ", ".join(k + "=" + repr(v) for k, v in vars(self.options).items() if v) + ) + self.logger.info("\nModules:\n" + ", ".join(m for m in self.module_list)) + if self.iterate_modules: + self.logger.info("\nIteration modules:" + str(self.iterate_modules)) + self.logger.info( + "=======================================================================\n" + ) + + # Define model components + for module in self.get_modules(): + if hasattr(module, "define_dynamic_lists"): + module.define_dynamic_lists(self) + for module in self.get_modules(): + if hasattr(module, "define_components"): + module.define_components(self) + for module in self.get_modules(): + if hasattr(module, "define_dynamic_components"): + module.define_dynamic_components(self) + + def get_modules(self): + """Return a list of loaded module objects for this model.""" + for m in self.module_list: + yield sys.modules[m] + + def min_data_check(self, *mandatory_components): + """ + This function checks that an instance of Pyomo abstract model has + mandatory components defined. If a user attempts to create an instance + without defining all of the necessary data, this will produce fatal + errors with clear messages stating specifically what components have + missing data. + + Without this check, Switch gives fatal errors if the users forgets to + specify data for a component that doesn't have a default value, but the + error message is obscure and references the first code that tries to + reference the component with missing data. + + BuildCheck's message lists the name of the check that failed, but + doesn't provide mechanisms for printing a specific error message. Just + printing to screen is easy to miss, so we raise a ValueError with a + clear and specific message. + """ + try: + self.__num_min_data_checks += 1 + except AttributeError: + self.__num_min_data_checks = 0 # initialize + new_data_check_name = "min_data_check_" + str(self.__num_min_data_checks) + setattr( + self, + new_data_check_name, + BuildCheck( + rule=lambda m: check_mandatory_components(m, *mandatory_components) + ), + ) + + def _initialize_component(self, *args, **kwargs): + """ + This method is called to initialize each Pyomo component; we hook onto + it to report construction progress + """ + AbstractModel._initialize_component(self, *args, **kwargs) + + try: + self.__n_components_constructed = self.__n_components_constructed + 1 + except AttributeError: + self.__n_components_constructed = 1 + + try: + next_report = self.__next_report_components_construction + except: + next_report = 0 + + fraction_constructed = self.__n_components_constructed / len(self._decl_order) + + # TODO: add code to produce output like this, even if + # n_components_to_construct changes between calls + # Constructed 10% of components + # Constructed 20% of components + if fraction_constructed >= next_report: + self.logger.info( + f"Constructed " + f"{self.__n_components_constructed} of {len(self._decl_order)} " + f"components ({fraction_constructed:.0%})" + ) + self.__next_report_components_construction = next_report + 0.1 + + def load_inputs(self, inputs_dir=None, attach_data_portal=True): + """ + Load input data using the appropriate modules and return a model + instance. This is implemented by calling the load_inputs() function of + each module, if the module has that function. + """ + if inputs_dir is None: + inputs_dir = getattr(self.options, "inputs_dir", "inputs") + + # Load data; add a fancier load function to the data portal + timer = StepTimer() + data = DataPortal(model=self) + data.load_aug = types.MethodType(load_aug, data) + for module in self.get_modules(): + if hasattr(module, "load_inputs"): + module.load_inputs(self, data, inputs_dir) + + if self.options.verbose: + print("Data read in {:.2f} s.\n".format(timer.step_time())) + + if self.logger.isEnabledFor(logging.DEBUG): + instance = self.create_instance(data, report_timing=True) + else: + instance = self.create_instance(data, report_timing=False) + + if attach_data_portal: + instance.DataPortal = data + + if self.options.verbose: + print("Instance created from data in {:.2f} s.\n".format(timer.step_time())) + + return instance + + def create_instance(*args, **kwargs): + """ + Use standard Pyomo create_instance method, then convert to + SwitchConcreteModel + + Pyomo deepcopies the AbstractModel during create_instance and then + reassigns its __class__ as ConcreteModel before returning it (with a + note that "It is absolutely crazy that this is allowed in Python"). This + doesn't give a natural way to use our subclass of ConcreteModel (with + pre_solve and post_solve methods). So we just use the same trick again + and reassign as SwitchConcreteModel + """ + instance = AbstractModel.create_instance(*args, **kwargs) + instance.__class__ = SwitchConcreteModel + return instance + + +class SwitchConcreteModel(ConcreteModel): + """ + Subclass of standard Pyomo ConcreteModel with methods to implement Switch- + specific behavior (pre_solve, post_solve, has_discrete_variables). + """ + + get_modules = SwitchAbstractModel.get_modules + + def has_discrete_variables(model): + all_elements = lambda v: v.values() if v.is_indexed() else [v] + return any( + v.is_binary() or v.is_integer() + for variable in model.component_objects(Var, active=True) + for v in all_elements(variable) + ) + + def preprocess(self, *args, **kwargs): + # continue to use in Pyomo 5 but avoid deprecation warning in Pyomo 6+ + if pyomo.version.version_info[:2] < (6, 0): + return ConcreteModel.preprocess(self, *args, **kwargs) + + def pre_solve(self, outputs_dir=None): + """ + Call pre-solve function (if present) in all modules used to compose this + model. This method can be used to adjust the instance after it is + created and before it is solved. + """ + for module in self.get_modules(): + if hasattr(module, "pre_solve"): + module.pre_solve(self) + + def post_solve(self, outputs_dir=None): + """ + Call post-solve function (if present) in all modules used to compose + this model. This method can be used to report or save results from the + solved model. + """ + if outputs_dir is None: + outputs_dir = getattr(self.options, "outputs_dir", "outputs") + if not os.path.exists(outputs_dir): + os.makedirs(outputs_dir) + + for module in self.get_modules(): + if hasattr(module, "post_solve"): + module.post_solve(self, outputs_dir) + + +def create_model(*args, **kwargs): + """Stub function to implement old functionality, now achieved via subclass.""" + return SwitchAbstractModel(*args, **kwargs) + + +def unique_list(seq): + """ + Create a list with the unique elements from seq, preserving original order. + + This is often useful instead of `set()` when creating Pyomo Sets from unique + members of a collection, since Pyomo >= 5.7 always creates ordered sets and + deprecates use of Python's unordered sets for initialization. + """ + # from https://stackoverflow.com/a/17016257/ + # Note that this solution depends on Python's order-preserving dicts after + # in version 3.7+, which is fine since Switch requires Python >= 3.7. + return list(dict.fromkeys(seq)) def make_iterable(item): @@ -123,14 +344,17 @@ def make_iterable(item): i = iter([item]) return i + class StepTimer(object): """ Keep track of elapsed time for steps of a process. Use timer = StepTimer() to create a timer, then retrieve elapsed time and/or reset the timer at each step by calling timer.step_time() """ + def __init__(self): self.start_time = time.time() + def step_time(self): """ Reset timer to current time and return time elapsed since last step. @@ -139,41 +363,14 @@ def step_time(self): self.start_time = now = time.time() return now - last_start -def load_inputs(model, inputs_dir=None, attach_data_portal=True): - """ - Load input data for an AbstractModel using the modules in the given - list and return a model instance. This is implemented as calling the - load_inputs() function of each module, if the module has that function. - """ - if inputs_dir is None: - inputs_dir = getattr(model.options, "inputs_dir", "inputs") - - # Load data; add a fancier load function to the data portal - timer = StepTimer() - data = DataPortal(model=model) - data.load_aug = types.MethodType(load_aug, data) - for module in model.get_modules(): - if hasattr(module, 'load_inputs'): - module.load_inputs(model, data, inputs_dir) - if model.options.verbose: - print("Data read in {:.2f} s.\n".format(timer.step_time())) - - # At some point, pyomo deprecated 'create' in favor of 'create_instance'. - # Determine which option is available and use that. - if hasattr(model, 'create_instance'): - instance = model.create_instance(data) - else: - instance = model.create(data) - if model.options.verbose: - print("Instance created from data in {:.2f} s.\n".format(timer.step_time())) - - if attach_data_portal: - instance.DataPortal = data - return instance - -def save_inputs_as_dat(model, instance, save_path="inputs/complete_inputs.dat", - exclude=[], sorted_output=False): +def save_inputs_as_dat( + model, + instance, + save_path="inputs/complete_inputs.dat", + exclude=[], + sorted_output=False, +): """ Save input data to a .dat file for use with PySP or other command line tools that have not been fully integrated with DataPortal. @@ -182,26 +379,41 @@ def save_inputs_as_dat(model, instance, save_path="inputs/complete_inputs.dat", """ # helper function to convert values to strings, # putting quotes around values that start as strings - quote_str = lambda v: '"{}"'.format(v) if isinstance(v, string_types) else '{}'.format(str(v)) + quote_str = ( + lambda v: '"{}"'.format(v) + if isinstance(v, string_types) + else "{}".format(str(v)) + ) # helper function to create delimited lists from single items or iterables of any data type from switch_model.reporting import make_iterable - join_space = lambda items: ' '.join(map(str, make_iterable(items))) # space-separated list - join_comma = lambda items: ','.join(map(str, make_iterable(items))) # comma-separated list + + join_space = lambda items: " ".join( + map(str, make_iterable(items)) + ) # space-separated list + join_comma = lambda items: ",".join( + map(str, make_iterable(items)) + ) # comma-separated list with open(save_path, "w") as f: for component_name in instance.DataPortal.data(): if component_name in exclude: - continue # don't write data for components in exclude list - # (they're in scenario-specific files) + continue # don't write data for components in exclude list + # (they're in scenario-specific files) component = getattr(model, component_name) comp_class = type(component).__name__ component_data = instance.DataPortal.data(name=component_name) - if comp_class == 'SimpleSet' or comp_class == 'OrderedSimpleSet': + if comp_class in { + "SimpleSet", # Pyomo < 6.0 + "OrderedSimpleSet", + "AbstractOrderedSimpleSet", + "ScalarSet", # Pyomo >= 6.0 + "OrderedScalarSet", + "AbstractOrderedScalarSet", + }: f.write( - "set {} := {};\n" - .format(component_name, join_space(component_data)) + "set {} := {};\n".format(component_name, join_space(component_data)) ) - elif comp_class == 'IndexedParam': + elif comp_class == "IndexedParam": if component_data: # omit components for which no data were provided f.write("param {} := \n".format(component_name)) for key, value in ( @@ -211,104 +423,28 @@ def save_inputs_as_dat(model, instance, save_path="inputs/complete_inputs.dat", ): f.write(" {} {}\n".format(join_space(key), quote_str(value))) f.write(";\n") - elif comp_class == 'SimpleParam': + elif comp_class in {"SimpleParam", "ScalarParam"}: # Pyomo < or >= 6.0 f.write("param {} := {};\n".format(component_name, component_data)) - elif comp_class == 'IndexedSet': + elif comp_class == "IndexedSet": for key, vals in iteritems(component_data): f.write( - "set {}[{}] := {};\n" - .format(component_name, join_comma(key), join_space(vals)) + "set {}[{}] := {};\n".format( + component_name, join_comma(key), join_space(vals) + ) ) else: raise ValueError( - "Error! Component type {} not recognized for model element '{}'.". - format(comp_class, component_name)) - -def pre_solve(instance, outputs_dir=None): - """ - Call pre-solve function (if present) in all modules used to compose this model. - This function can be used to adjust the instance after it is created and before it is solved. - """ - for module in instance.get_modules(): - if hasattr(module, 'pre_solve'): - module.pre_solve(instance) - -def post_solve(instance, outputs_dir=None): - """ - Call post-solve function (if present) in all modules used to compose this model. - This function can be used to report or save results from the solved model. - """ - if outputs_dir is None: - outputs_dir = getattr(instance.options, "outputs_dir", "outputs") - if not os.path.exists(outputs_dir): - os.makedirs(outputs_dir) - - # TODO: implement a check to call post solve functions only if - # solver termination condition is not 'infeasible' or 'unknown' - # (the latter may occur when there are problems with licenses, etc) - - for module in instance.get_modules(): - if hasattr(module, 'post_solve'): - module.post_solve(instance, outputs_dir) - - -def min_data_check(model, *mandatory_model_components): - """ + "Error! Component type {} not recognized for model element '{}'.".format( + comp_class, component_name + ) + ) - This function checks that an instance of Pyomo abstract model has - mandatory components defined. If a user attempts to create an - instance without defining all of the necessary data, this will - produce fatal errors with clear messages stating specifically what - components have missing data. This function is attached to an - abstract model by the _add_min_data_check() function. See - _add_min_data_check() documentation for usage examples. - - Without this check, I would get fatal errors if I forgot to specify data - for a component that didn't have a default value, but the error message - was obscure and gave me a line number with the first snippet of code - that tried to reference the component with missing data. It took me a - little bit of time to figure out what was causing that failure, and I'm - a skilled programmer. I would like this model to be accessible to non- - programmers as well, so I felt it was important to use the BuildCheck - Pyomo function to validate data during construction of a model instance. - - I found that BuildCheck's message listed the name of the check that - failed, but did not provide mechanisms for printing a specific error - message. I tried printing to the screen, but that output tended to be - obscured or hidden. I've settled on raising a ValueError for now with a - clear and specific message. I could also use logging.error() or related - logger methods, and rely on BuildCheck to throw an error, but I've - already implemented this, and those other methods don't offer any clear - advantages that I can see. - """ - model.__num_min_data_checks += 1 - new_data_check_name = "min_data_check_" + str(model.__num_min_data_checks) - setattr(model, new_data_check_name, BuildCheck( - rule=lambda m: check_mandatory_components( - m, *mandatory_model_components))) +def unwrap(message): + return textwrap.dedent(message).replace(" \n", " ").replace("\n", " ").strip() -def _add_min_data_check(model): - """ - Bind the min_data_check() method to an instance of a Pyomo AbstractModel - object if it has not already been added. Also add a counter to keep - track of what to name the next check that is added. - """ - if getattr(model, 'min_data_check', None) is None: - model.__num_min_data_checks = 0 - model.min_data_check = types.MethodType(min_data_check, model) - - -def has_discrete_variables(model): - all_elements = lambda v: v.itervalues() if v.is_indexed() else [v] - return any( - v.is_binary() or v.is_integer() - for variable in model.component_objects(Var, active=True) - for v in all_elements(variable) - ) - -def check_mandatory_components(model, *mandatory_model_components): +def check_mandatory_components(model, *mandatory_components): """ Checks whether mandatory elements of a Pyomo model are populated, and returns a clear error message if they don't exist. @@ -330,37 +466,54 @@ def check_mandatory_components(model, *mandatory_model_components): This does not work with indexed sets. """ - for component_name in mandatory_model_components: + for component_name in mandatory_components: obj = getattr(model, component_name) o_class = type(obj).__name__ - if o_class == 'SimpleSet' or o_class == 'OrderedSimpleSet': + if o_class in { + "SimpleSet", # Pyomo < 6.0 + "OrderedSimpleSet", + "ScalarSet", # Pyomo >= 6.0 + "OrderedScalarSet", + }: if len(obj) == 0: raise ValueError( - "No data is defined for the mandatory set '{}'.". - format(component_name)) - elif o_class == 'IndexedParam': - if len(obj) != len(obj._index): - missing_index_elements = [v for v in set(obj._index) - set( obj.sparse_keys())] + "No data is defined for the mandatory set '{}'.".format( + component_name + ) + ) + elif o_class == "IndexedParam": + if len(obj) != len(obj.index_set()): + missing_index_elements = [k for k in obj.index_set() if k not in obj] raise ValueError( "Values are not provided for every element of the " "mandatory parameter '{}'. " - "Missing data for {} values, including: {}" - .format(component_name, len(missing_index_elements), missing_index_elements[:10]) + "Missing data for {} values, including: {}".format( + component_name, + len(missing_index_elements), + missing_index_elements[:10], + ) ) - elif o_class == 'IndexedSet': - if len(obj) != len(obj._index): + elif o_class == "IndexedSet": + if len(obj) != len(obj.index_set()): raise ValueError( - ("Sets are not defined for every index of " + - "the mandatory indexed set '{}'").format(component_name)) - elif o_class == 'SimpleParam': + ( + "Sets are not defined for every index of " + + "the mandatory indexed set '{}'" + ).format(component_name) + ) + elif o_class in {"SimpleParam", "ScalarParam"}: # Pyomo < or >= 6.0 if obj.value is None: raise ValueError( - "Value not provided for mandatory parameter '{}'". - format(component_name)) + "Value not provided for mandatory parameter '{}'".format( + component_name + ) + ) else: raise ValueError( - "Error! Object type {} not recognized for model element '{}'.". - format(o_class, component_name)) + "Error! Object type {} not recognized for model element '{}'.".format( + o_class, component_name + ) + ) return True @@ -379,13 +532,86 @@ def __str__(self): return repr(self.value) -def load_aug(switch_data, optional=False, auto_select=False, - optional_params=[], **kwds): +def apply_input_aliases(switch_data, path): + """ + Translate filenames based on --input-alias[es] arguments. + + Filename substitutions are specified like + --input-aliases ev_share.csv=ev_share.ev_flat.csv rps.csv=rps.2030.csv + + Filename 'none' will be converted to an empty string and usually be ignored. + + This enables use of alternative files to study sensitivities without + creating complete input directories for each permutation. + """ + try: + file_aliases = switch_data.file_aliases + except AttributeError: + file_aliases = switch_data.file_aliases = { + standard: alternative + for standard, alternative in ( + pair.split("=") for pair in switch_data._model.options.input_aliases + ) + } + + root, filename = os.path.split(path) + if filename in file_aliases: + old_path = path + if file_aliases[filename].lower() == "none": + path = "" + else: + # Note: We could use os.path.normpath() to clean up paths like + # 'inputs/../inputs_alt', but leaving them as-is may make it more + # clear that an alias is in use if errors crop up later. + path = os.path.join(root, file_aliases[filename]) + if not os.path.isfile(path): + # catch alias naming errors (should always point to a real file) + raise ValueError( + 'Alias "{}" specified for file "{}" does not exist. ' + "Specify {}=none if you want to supply no data.".format( + path, old_path, filename + ) + ) + switch_data._model.logger.info("Applying alias {}={}".format(old_path, path)) + + return path + + +def load_aug(switch_data, optional=False, optional_params=[], **kwargs): """ This is a wrapper for the DataPortal object that accepts additional - keywords. This currently supports a flag for the file being optional. - The name load_aug() is not great and may be changed. + keywords to allow optional files, optional columns, and auto-select + columns based on parameter names. The name is an abbreviation of + load_augmented. + + * optional: Indicates the input file is entirely optional. If absent, the + sets and/or parameters will either be blank or set to their default values + as defined in the model. + * optional_params: Indicates specific parameter columns are optional, and + will be skipped during loading if they are not present in the input file. + All params in optional files are added to this list automatically. + optional_params are ignored for `.dat` files (rarely used). + + To do: + * Come up with a better name for this function. + * Streamline the API so each param is specified exactly once, either in + param or optional_param, and use the same style for each (component + objects rather than component names). Alternatively, have an option to + auto-detect whether a param is optional based on whether it has a default + value specified. + * Replace this function with more auto-detection. Allow user to specify + filename when defining parameters and sets. Also allow user to specify the + name(s) of the column(s) in each set. Then use those automatically to pull + data from the right file (and to write correct index column names in the + generic output files). This will simplify code and ease comprehension + (user can see immediately where the data come from for each component). + This can also support auto-documenting of parameters and input files. + * Maybe each input file should have the same name as the matching index set? + gen_info -> generation_projects + gen_build_predetermined -> predetermined_gen_bld_yrs + gen_build_costs -> gen_bld_yrs """ + # TODO: # Allow user to specify filename when defining parameters and sets. # Also allow user to specify the name(s) of the column(s) in each set. @@ -395,32 +621,50 @@ def load_aug(switch_data, optional=False, auto_select=False, # immediately where the data come from for each component). This can # also support auto-documenting of parameters and input files. - path = kwds['filename'] - # Skip if the file is missing + # convert filename if needed + kwargs["filename"] = apply_input_aliases(switch_data, kwargs["filename"]) + # store filename in local variable for easier access + path = kwargs["filename"] + + # catch obsolete auto_select argument (not used in 2.0.6 and later) + for a in ["auto_select", "autoselect"]: + if a in kwargs: + del kwargs[a] + # TODO: receive a reference to the model and use the logger for this + print( + "WARNING: obsolete argument {} ignored while reading {}. " + "Please remove this from your code. Columns are always " + "auto-selected now unless a 'select' argument is passed.".format( + a, path + ) + ) + + # Skip if an optional file is unavailable if optional and not os.path.isfile(path): return + # If this is a .dat file, then skip the rest of this fancy business; we'll # only check if the file is missing and optional for .csv files. filename, extension = os.path.splitext(path) - if extension == '.dat': - switch_data.load(**kwds) + if extension == ".dat": + switch_data.load(**kwargs) return - # copy the optional_params to avoid side-effects when the list is altered below - optional_params=list(optional_params) + # copy optional_params to avoid side-effects when the list is altered below + optional_params = list(optional_params) # Parse header and first row with open(path) as infile: headers_line = infile.readline() second_line = infile.readline() - file_is_empty = (headers_line == '') - file_has_no_data_rows = (second_line == '') - suffix = path.split('.')[-1] - if suffix in {'tab', 'tsv'}: - separator = '\t' - elif suffix == 'csv': - separator = ',' + file_is_empty = headers_line == "" + file_has_no_data_rows = second_line == "" + suffix = path.split(".")[-1] + if suffix in {"tab", "tsv"}: + separator = "\t" + elif suffix == "csv": + separator = "," else: - raise switch_model.utilities.InputError('Unrecognized file type for input file {}'.format(path)) + raise InputError("Unrecognized file type for input file {}".format(path)) # TODO: parse this more formally, e.g. using csv module headers = headers_line.strip().split(separator) # Skip if the file is empty. @@ -429,31 +673,31 @@ def load_aug(switch_data, optional=False, auto_select=False, # Try to get a list of parameters. If param was given as a # singleton or a tuple, make it into a list that can be edited. params = [] - if 'param' in kwds: + if "param" in kwargs: # Tuple -> list - if isinstance(kwds['param'], tuple): - kwds['param'] = list(kwds['param']) + if isinstance(kwargs["param"], tuple): + kwargs["param"] = list(kwargs["param"]) # Singleton -> list - elif not isinstance(kwds['param'], list): - kwds['param'] = [kwds['param']] - params = kwds['param'] + elif not isinstance(kwargs["param"], list): + kwargs["param"] = [kwargs["param"]] + params = kwargs["param"] # optional_params may include Param objects instead of names. In # those cases, convert objects to names. for (i, p) in enumerate(optional_params): if not isinstance(p, string_types): optional_params[i] = p.name - # Expand the list optional parameters to include any parameter that - # has default() defined. I need to allow an explicit list of default - # parameters to support optional parameters like gen_unit_size which - # don't have default value because it is undefined for generators - # for which it does not apply. + # Expand the list of optional parameters to include any parameter that has + # default() defined or that comes from an optional table. We also allow an + # explicit list of optional parameters to support parameters like + # gen_unit_size, which doesn't have a default value because it is undefined + # for generators for which it does not apply. for p in params: - if p.default() is not None: + if (optional or p.default() is not None) and p.name not in optional_params: optional_params.append(p.name) # How many index columns do we expect? # Grab the dimensionality of the index param if it was provided. - if 'index' in kwds: - num_indexes = kwds['index'].dimen + if "index" in kwargs: + num_indexes = kwargs["index"].dimen # Next try the first parameter's index. elif len(params) > 0: try: @@ -463,6 +707,23 @@ def load_aug(switch_data, optional=False, auto_select=False, # Default to 0 if both methods failed. else: num_indexes = 0 + + if num_indexes is UnknownSetDimen: + # Pyomo < 5.7 assumes dimension is 1 if not specified. But Pyomo 5.7 and + # later use a sentinel and don't set the dimension if no dimension is + # specified. We can't assume the dimension is 1, because SetProducts + # (e.g., index_set() for a multi-indexed Param) or filtered versions of + # sets inherit UnknownSetDimen. We could potentially only raise this + # error when the user doesn't provide a select statement, but it's a + # general enough problem to just raise all the time. We could + # potentially use pyomo.dataportal.process_data._guess_set_dimen() but + # it is undocumented and not needed if all the sets have dimen + # specified, which they do now. + raise ValueError( + f"Set {params[0].index_set()} has unknown dimen; unable to infer " + f"number of index columns to read from {path}." + ) + # Make a select list if requested. Assume the left-most columns are # indexes and that other columns are named after their parameters. # Maybe this could be extended to use a standard prefix for each data file? @@ -470,35 +731,27 @@ def load_aug(switch_data, optional=False, auto_select=False, # could all get the prefix "rfm_supply_tier_". Then they could get shorter names # within the file (e.g., "cost" and "limit"). We could also require the data file # to be called "rfm_supply_tier.csv" for greater consistency/predictability. - if auto_select: - if 'select' in kwds: - raise InputError('You may not specify a select parameter if ' + - 'auto_select is set to True.') - kwds['select'] = headers[0:num_indexes] - kwds['select'].extend([p.name for p in params]) + if "select" not in kwargs: + kwargs["select"] = headers[0:num_indexes] + [p.name for p in params] # Check to see if expected column names are in the file. If a column # name is missing and its parameter is optional, then drop it from # the select & param lists. - if 'select' in kwds: - if isinstance(kwds['select'], tuple): - kwds['select'] = list(kwds['select']) - del_items = [] - for (i, col) in enumerate(kwds['select']): - p_i = i - num_indexes - if col not in headers: - if(len(params) > p_i >= 0 and - params[p_i].name in optional_params): - del_items.append((i, p_i)) - else: - raise InputError( - 'Column {} not found in file {}.' - .format(col, path)) - # When deleting entries from select & param lists, go from last - # to first so that the indexes won't get messed up as we go. - del_items.sort(reverse=True) - for (i, p_i) in del_items: - del kwds['select'][i] - del kwds['param'][p_i] + if isinstance(kwargs["select"], tuple): + kwargs["select"] = list(kwargs["select"]) + del_items = [] + for (i, col) in enumerate(kwargs["select"]): + p_i = i - num_indexes + if col not in headers: + if len(params) > p_i >= 0 and params[p_i].name in optional_params: + del_items.append((i, p_i)) + else: + raise InputError("Column {} not found in file {}.".format(col, path)) + # When deleting entries from select & param lists, go from last + # to first so that the indexes won't get messed up as we go. + del_items.sort(reverse=True) + for (i, p_i) in del_items: + del kwargs["select"][i] + del kwargs["param"][p_i] if optional and file_has_no_data_rows: # Skip the file. Note that we are only doing this after having @@ -506,7 +759,7 @@ def load_aug(switch_data, optional=False, auto_select=False, return # All done with cleaning optional bits. Pass the updated arguments # into the DataPortal.load() function. - switch_data.load(**kwds) + switch_data.load(**kwargs) # Define an argument parser that accepts the allow_abbrev flag to @@ -532,7 +785,10 @@ def __init__(self, *args, **kwargs): # see https://bugs.python.org/issue14910#msg204678 def new_get_option_tuples(self, option_string): return [] - self._get_option_tuples = types.MethodType(new_get_option_tuples, self) + + self._get_option_tuples = types.MethodType( + new_get_option_tuples, self + ) else: raise RuntimeError( "Incompatible argparse module detected. This software requires " @@ -543,36 +799,43 @@ def new_get_option_tuples(self, option_string): kwargs.pop("allow_abbrev", None) return argparse.ArgumentParser.__init__(self, *args, **kwargs) + class ExtendAction(argparse.Action): """Create or extend list with the provided items""" + # from https://stackoverflow.com/a/41153081/3830997 def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] items.extend(values) setattr(namespace, self.dest, items) + class IncludeAction(argparse.Action): """Flag the specified items for inclusion in the model""" + def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] - items.append(('include', values)) + items.append(("include", values)) setattr(namespace, self.dest, items) + + class ExcludeAction(argparse.Action): """Flag the specified items for exclusion from the model""" + def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] - items.append(('exclude', values)) + items.append(("exclude", values)) setattr(namespace, self.dest, items) + # Test whether we need to issue warnings about the Python parsing bug. # (applies to at least Python 2.7.11 and 3.6.2) # This bug messes up solve-scenarios if the user specifies # --scenario x --solver-options-string="a=b c=d" test_parser = argparse.ArgumentParser() -test_parser.add_argument('--arg1', nargs='+', default=[]) +test_parser.add_argument("--arg1", nargs="+", default=[]) bad_equal_parser = ( - len(test_parser.parse_known_args(['--arg1', 'a', '--arg2=a=1 b=2'])[1]) - == 0 + len(test_parser.parse_known_args(["--arg1", "a", "--arg2=a=1 b=2"])[1]) == 0 ) # TODO: merge the _ArgumentParserAllowAbbrev code into this class @@ -583,11 +846,12 @@ class _ArgumentParser(_ArgumentParserAllowAbbrev): - allows use of 'extend', 'include' and 'exclude' actions to accumulate lists with multiple calls """ + def __init__(self, *args, **kwargs): super(_ArgumentParser, self).__init__(*args, **kwargs) - self.register('action', 'extend', ExtendAction) - self.register('action', 'include', IncludeAction) - self.register('action', 'exclude', ExcludeAction) + self.register("action", "extend", ExtendAction) + self.register("action", "include", IncludeAction) + self.register("action", "exclude", ExcludeAction) def parse_known_args(self, args=None, namespace=None): # parse_known_args parses arguments like --list-arg a b --other-arg="something with space" @@ -596,29 +860,31 @@ def parse_known_args(self, args=None, namespace=None): # We issue a warning to avoid this. if bad_equal_parser and args is not None: for a in args: - if a.startswith('--') and '=' in a: + if a.startswith("--") and "=" in a: print( "Warning: argument '{}' may be parsed incorrectly. It is " - "safer to use ' ' instead of '=' as a separator." - .format(a) + "safer to use ' ' instead of '=' as a separator.".format(a) ) time.sleep(2) # give users a chance to see it return super(_ArgumentParser, self).parse_known_args(args, namespace) def approx_equal(a, b, tolerance=0.01): - return abs(a-b) <= (abs(a) + abs(b)) / 2.0 * tolerance + return abs(a - b) <= (abs(a) + abs(b)) / 2.0 * tolerance def default_solver(): - return pyomo.opt.SolverFactory('glpk') + return pyomo.opt.SolverFactory("glpk") + def warn(message): """ Send warning message to sys.stderr. Unlike warnings.warn, this does not add the current line of code to the message. + TODO: replace all calls to this with model.logger.warn() """ - sys.stderr.write("WARNING: " + message + '\n') + sys.stderr.write("WARNING: " + message + "\n") + class TeeStream(object): """ @@ -627,9 +893,11 @@ class TeeStream(object): `sys.stdout=TeeStream(sys.stdout, log_file_handle)` will copy output destined for sys.stdout to log_file_handle as well. """ + def __init__(self, stream1, stream2): self.stream1 = stream1 self.stream2 = stream2 + def __getattr__(self, *args, **kwargs): """ Provide stream1 attributes when attributes are requested for this class. @@ -637,48 +905,113 @@ def __getattr__(self, *args, **kwargs): methods, etc. """ return getattr(self.stream1, *args, **kwargs) - def write(self, *args, **kwargs): - self.stream1.write(*args, **kwargs) - self.stream2.write(*args, **kwargs) - def flush(self, *args, **kwargs): - self.stream1.flush(*args, **kwargs) - self.stream2.flush(*args, **kwargs) + + def write(self, text): + self.stream1.write(text) + self.stream2.write(text) + return len(text) + + def flush(self): + self.stream1.flush() + self.stream2.flush() + class LogOutput(object): """ - Copy output sent to stdout or stderr to a log file in the specified directory. - Takes no action if directory is None. Log file is named based on the current - date and time. Directory will be created if needed, and file will be overwritten - if it already exists (unlikely). + Copy output sent to stdout or stderr to a log file in the specified + directory. Takes no action if directory is None. Log file is named based on + the current date and time. Directory will be created if needed, and file + will have microseconds added to the name if needed to avoid overwriting + existing any existing file. + + TODO: + - make this thread-aware (register and lookup an output stream for this + particular thread), + - accept model as argument instead of logs_dir and get the file name from + model.options or else create a name as shown here + - allow nesting (requesting same log file that is already open), so we can + wrap the body of solve.main but also wrap solve.solve, solve.presolve, + etc., so we can + - make sure it appends to existing files rather than replacing + - wrap all our API code (solve.main, solve.iterate, solve.solve, + model.pre_solve, model.post_solve, model.__init__, etc.) with + LogOutput(model) so that all log messages for one model will go to the + same file, even if multiple models are processed at the same time in + different threads or sequentially in the same thread. + - Alternatively, treat logging as app-specific rather than model-specific, + so switch solve or switch solve-scenarios identifies the active log file + and log level when it first starts, and sticks with that until it exits. + - Once all modules use loggers instead of print, it may be possible to have + this work by creating file handlers for the root logger, each of which has + a filter that only accepts messages if the current thread matches the + thread that created that logger (i.e., only accepts messages from the + thread that created it). Then the handler is removed when the block + finishes. + + Note: Python/pyomo holds logging info on a singleton basis (in the logging module + for each pyomo module name), so we either need to + (1) patch the logging module to have/lookup different loggers per + thread, probably based on thread ID, then wrap our API with a logger + configuration (or stdout capture), so every operation on a particular + model is wrapped with logger configuration for that model; or + (2) treat logging as an application setting (either switch solve or + switch solve-scenarios), not a model setting, so we just start + logging/capture at startup and continue till the app exits; or + (3) like (1) but with locks on our API to prevent multithreading, so we + don't have to patch the logging module, just reconfigure the root logger + at the start of each function (but this precludes any multithreaded + loading/running of Switch models). (preferred) """ + def __init__(self, logs_dir): self.logs_dir = logs_dir + def __enter__(self): - """ start copying output to log file """ + """start copying output to log file""" if self.logs_dir is not None: - if not os.path.exists(self.logs_dir): - os.makedirs(self.logs_dir) - log_file_path = os.path.join( - self.logs_dir, - datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + ".log" - ) + log_file_path = self.make_file_path() self.log_file = open(log_file_path, "w", buffering=1) self.stdout = sys.stdout self.stderr = sys.stderr sys.stdout = TeeStream(sys.stdout, self.log_file) - sys.stderr = TeeStream(sys.stderr, self.log_file) + # sys.stderr = TeeStream(sys.stderr, self.log_file) print("logging output to " + str(log_file_path)) + def __exit__(self, type, value, traceback): - """ restore original output streams and close log file """ + """restore original output streams and close log file""" if self.logs_dir is not None: sys.stdout = self.stdout sys.stderr = self.stderr self.log_file.close() + def make_file_path(self): + """ + Create a log file on disk and return the file name (guaranteed unique). + When this function returns, the file exists but is empty and closed. + """ + path = lambda format: os.path.join( + self.logs_dir, datetime.datetime.now().strftime(format) + ".log" + ) + # make sure logs directory exists + if not os.path.exists(self.logs_dir): + os.makedirs(self.logs_dir) + file_path = path("%Y-%m-%d_%H-%M-%S") + while True: + try: + f = os.open(file_path, os.O_CREAT | os.O_EXCL) + # succeeded + os.close(f) + break + except FileExistsError: + # try again with microseconds in name and a little delay + file_path = path("%Y-%m-%d_%H-%M-%S.%f") + return file_path + + def iteritems(obj): - """ Iterator of key, value pairs for obj; - equivalent to obj.items() on Python 3+ and obj.iteritems() on Python 2 """ + """Iterator of key, value pairs for obj; + equivalent to obj.items() on Python 3+ and obj.iteritems() on Python 2""" try: return obj.iteritems() - except AttributeError: # Python 3+ + except AttributeError: # Python 3+ return obj.items() diff --git a/switch_model/version.py b/switch_model/version.py index 6f8b3ad38..2918cad56 100644 --- a/switch_model/version.py +++ b/switch_model/version.py @@ -1,9 +1,8 @@ -# Copyright (c) 2015-2019 The Switch Authors. All rights reserved. +# Copyright (c) 2015-2022 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ -This file should only include the version. Do not import any packages or -modules here because this file needs to be executed before Switch is -installed and executed in environments that don't have any dependencies -installed. +This file should not contain anything that is not part of a minimal python +distribution because it needs to be executed before Switch (and its +dependencies) are installed. """ -__version__='2.0.6' +__version__ = "2.0.7" diff --git a/tests/examples_test.py b/tests/examples_test.py index ccdbe6700..f715c630b 100644 --- a/tests/examples_test.py +++ b/tests/examples_test.py @@ -18,6 +18,7 @@ UPDATE_EXPECTATIONS = False + def _remove_temp_dir(path): for retry in range(100): try: @@ -26,6 +27,7 @@ def _remove_temp_dir(path): except: pass + def read_file(filename): with open(filename, "r") as fh: return fh.read() @@ -37,18 +39,17 @@ def write_file(filename, data): def find_example_dirs(): - examples_dir = os.path.join(TOP_DIR, 'examples') + examples_dir = os.path.join(TOP_DIR, "examples") for dirpath, dirnames, filenames in os.walk(examples_dir): for dirname in dirnames: path = os.path.join(dirpath, dirname) - if os.path.exists(os.path.join(path, 'inputs', 'modules.txt')): + if os.path.exists(os.path.join(path, "inputs", "modules.txt")): yield path def get_expectation_path(example_dir): - expectation_file = os.path.join(example_dir, 'outputs', - 'total_cost.txt') - if not os.path.isfile( expectation_file ): + expectation_file = os.path.join(example_dir, "outputs", "total_cost.txt") + if not os.path.isfile(expectation_file): return False else: return expectation_file @@ -56,16 +57,21 @@ def get_expectation_path(example_dir): def make_test(example_dir): def test_example(): - temp_dir = tempfile.mkdtemp(prefix='switch_test_') + temp_dir = tempfile.mkdtemp(prefix="switch_test_") try: # Custom python modules may be in the example's working directory sys.path.append(example_dir) - args = switch_model.solve.get_option_file_args(dir=example_dir, + args = switch_model.solve.get_option_file_args( + dir=example_dir, extra_args=[ - '--inputs-dir', os.path.join(example_dir, 'inputs'), - '--outputs-dir', temp_dir]) + "--inputs-dir", + os.path.join(example_dir, "inputs"), + "--outputs-dir", + temp_dir, + ], + ) switch_model.solve.main(args) - total_cost = read_file(os.path.join(temp_dir, 'total_cost.txt')) + total_cost = read_file(os.path.join(temp_dir, "total_cost.txt")) finally: sys.path.remove(example_dir) _remove_temp_dir(temp_dir) @@ -75,19 +81,21 @@ def test_example(): else: expected = float(read_file(expectation_file)) actual = float(total_cost) - if not switch_model.utilities.approx_equal(expected, actual, - tolerance=0.0001): + if not switch_model.utilities.approx_equal( + expected, actual, tolerance=0.0001 + ): raise AssertionError( - 'Mismatch for total_cost (the objective function value):\n' - 'Expected value: {}\n' - 'Actual value: {}\n' + "Mismatch for total_cost (the objective function value):\n" + "Expected value: {}\n" + "Actual value: {}\n" 'Run "python -m tests.examples_test --update" to ' - 'update the expectations if this change is expected.' - .format(expected, actual)) + "update the expectations if this change is expected.".format( + expected, actual + ) + ) name = os.path.relpath(example_dir, TOP_DIR) - return unittest.FunctionTestCase( - test_example, description='Example: %s' % name) + return unittest.FunctionTestCase(test_example, description="Example: %s" % name) def load_tests(loader, tests, pattern): @@ -98,8 +106,8 @@ def load_tests(loader, tests, pattern): return suite -if __name__ == '__main__': - if sys.argv[1:2] == ['--update']: +if __name__ == "__main__": + if sys.argv[1:2] == ["--update"]: UPDATE_EXPECTATIONS = True sys.argv.pop(1) unittest.main() diff --git a/tests/upgrade_dat/3zone_toy/inputs/fuel_supply_curves.tab b/tests/upgrade_dat/3zone_toy/inputs/fuel_supply_curves.tab index 191b2db37..8bb5eccc0 100644 --- a/tests/upgrade_dat/3zone_toy/inputs/fuel_supply_curves.tab +++ b/tests/upgrade_dat/3zone_toy/inputs/fuel_supply_curves.tab @@ -1,10 +1,10 @@ regional_fuel_market period tier unit_cost max_avail_at_cost -All_DistOil 2020 0 21.9802 inf +All_DistOil 2020 0 21.9802 . All_DistOil 2030 0 24.5216 100000 All_NG 2020 0 4.4647 1950514555 -All_NG 2020 1 5.0709 inf +All_NG 2020 1 5.0709 . All_NG 2030 0 5.925 2368354558 -All_NG 2030 1 5.925 inf +All_NG 2030 1 5.925 . North_Bio 2020 0 1.7102 6864985 North_Bio 2020 1 3.3941 6782413 North_Bio 2030 0 2.0438 6064415 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/financials.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/financials.csv new file mode 100644 index 000000000..a40129d05 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/financials.csv @@ -0,0 +1,2 @@ +base_financial_year,discount_rate,interest_rate +2015,0.05,0.07 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuel_cost.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuel_cost.csv new file mode 100644 index 000000000..5541ac8f3 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuel_cost.csv @@ -0,0 +1,17 @@ +load_zone,fuel,period,fuel_cost +North,Uranium,2020,2.19 +Central,Uranium,2020,2.19 +South,Uranium,2020,2.19 +North,Uranium,2030,2.2 +Central,Uranium,2030,2.2 +South,Uranium,2030,2.2 +North,Coal,2020,1.9012 +Central,Coal,2020,1.9012 +North,Coal,2030,2.0467 +Central,Coal,2030,2.0467 +North,ResidualFuelOil,2020,18.5755 +Central,ResidualFuelOil,2020,18.5755 +South,ResidualFuelOil,2020,18.5755 +North,ResidualFuelOil,2030,20.3021 +Central,ResidualFuelOil,2030,20.3021 +South,ResidualFuelOil,2030,20.3021 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuel_supply_curves.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuel_supply_curves.csv new file mode 100644 index 000000000..a799d5e8f --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuel_supply_curves.csv @@ -0,0 +1,15 @@ +regional_fuel_market,period,tier,unit_cost,max_avail_at_cost +All_DistOil,2020,0,21.9802,. +All_DistOil,2030,0,24.5216,100000.0 +All_NG,2020,0,4.4647,1950514555.0 +All_NG,2020,1,5.0709,. +All_NG,2030,0,5.925,2368354558.0 +All_NG,2030,1,5.925,. +North_Bio,2020,0,1.7102,6864985.0 +North_Bio,2020,1,3.3941,6782413.0 +North_Bio,2030,0,2.0438,6064415.0 +North_Bio,2030,1,3.2218,7680076.0 +South_Bio,2020,0,1.7115,26427258.0 +South_Bio,2020,1,17.1714,486066.0 +South_Bio,2030,0,1.7115,26427258.0 +South_Bio,2030,1,17.1714,26427258.0 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuels.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuels.csv new file mode 100644 index 000000000..b1383ef0a --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/fuels.csv @@ -0,0 +1,7 @@ +fuel,co2_intensity,upstream_co2_intensity +Coal,0.09552,0.0 +ResidualFuelOil,0.0788,0.0 +DistillateFuelOil,0.07315,0.0 +NaturalGas,0.05306,0.0 +Uranium,0.0,. +BioSolid,0.09435,-0.09435 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/gen_build_costs.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/gen_build_costs.csv new file mode 100644 index 000000000..1f9c146dd --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/gen_build_costs.csv @@ -0,0 +1,77 @@ +GENERATION_PROJECT,build_year,gen_overnight_cost,gen_fixed_om +N-Coal_ST,1995,2687700.0,21390.0 +N-Geothermal,2000,5524200.0,0.0 +N-NG_CC,2008,1143900.0,5868.3 +N-NG_GT,2009,605430.0,4891.8 +C-Coal_ST,1985,2687700.0,21390.0 +C-NG_CC,2005,1143900.0,5868.3 +C-NG_GT,2005,605430.0,4891.8 +S-Geothermal,1998,5524200.0,0.0 +S-NG_CC,2000,1143900.0,5868.3 +S-NG_GT,1990,605430.0,4891.8 +S-NG_GT,2002,605430.0,4891.8 +N-Geothermal,2020,5524200.0,0.0 +N-Geothermal,2030,5524200.0,0.0 +N-Coal_IGCC,2020,3729300.0,28923.0 +N-Coal_IGCC,2030,3729300.0,28923.0 +N-Coal_IGCC_CCS,2030,6138000.0,41292.0 +N-NG_CC,2020,1143900.0,5868.3 +N-NG_CC,2030,1143900.0,5868.3 +N-NG_CC_CCS,2030,3487500.0,17112.0 +N-NG_GT,2020,605430.0,4891.8 +N-NG_GT,2030,605430.0,4891.8 +N-Nuclear,2030,5673000.0,118110.0 +N-Biomass_IGCC,2020,3561900.0,88350.0 +N-Biomass_IGCC,2030,3561900.0,88350.0 +N-Biomass_IGCC_CCS,2030,5970600.0,100719.0 +N-Residential_PV,2020,3487500.0,41850.0 +N-Residential_PV,2030,3059700.0,38130.0 +N-Commercial_PV,2020,3106200.0,41850.0 +N-Commercial_PV,2030,2752800.0,38130.0 +N-Central_PV-1,2020,2334300.0,41850.0 +N-Central_PV-2,2020,2334300.0,41850.0 +N-Central_PV-1,2030,2148300.0,38130.0 +N-Central_PV-2,2030,2148300.0,38130.0 +N-Wind-1,2020,1841400.0,55800.0 +N-Wind-2,2020,1841400.0,55800.0 +N-Wind-1,2030,1841400.0,55800.0 +N-Wind-2,2030,1841400.0,55800.0 +C-Coal_IGCC,2020,2983440.0,23138.4 +C-Coal_IGCC,2030,2983440.0,23138.4 +C-NG_CC,2020,915120.0,4694.64 +C-NG_CC,2030,915120.0,4694.64 +C-NG_GT,2020,484344.0,3913.44 +C-NG_GT,2030,484344.0,3913.44 +C-Nuclear,2030,4538400.0,94488.0 +C-Biomass_IGCC,2020,2849520.0,70680.0 +C-Biomass_IGCC,2030,2849520.0,70680.0 +C-Residential_PV,2020,2790000.0,33480.0 +C-Residential_PV,2030,2447760.0,30504.0 +C-Commercial_PV,2020,2484960.0,33480.0 +C-Commercial_PV,2030,2202240.0,30504.0 +C-Central_PV-1,2020,1867440.0,33480.0 +C-Central_PV-2,2020,1867440.0,33480.0 +C-Central_PV-1,2030,1718640.0,30504.0 +C-Central_PV-2,2030,1718640.0,30504.0 +C-Wind-1,2020,1473120.0,44640.0 +C-Wind-2,2020,1473120.0,44640.0 +C-Wind-1,2030,1473120.0,44640.0 +C-Wind-2,2030,1473120.0,44640.0 +S-Geothermal,2020,6629040.0,0.0 +S-Geothermal,2030,6629040.0,0.0 +S-NG_CC,2020,1372680.0,7041.96 +S-NG_CC,2030,1372680.0,7041.96 +S-NG_CC_CCS,2030,4185000.0,20534.4 +S-NG_GT,2020,726516.0,5870.16 +S-NG_GT,2030,726516.0,5870.16 +S-Biomass_IGCC,2020,4274280.0,106020.0 +S-Biomass_IGCC,2030,4274280.0,106020.0 +S-Biomass_IGCC_CCS,2030,7164720.0,120862.8 +S-Residential_PV,2020,4185000.0,50220.0 +S-Residential_PV,2030,3671640.0,45756.0 +S-Commercial_PV,2020,3727440.0,50220.0 +S-Commercial_PV,2030,3303360.0,45756.0 +S-Central_PV-1,2020,2801160.0,50220.0 +S-Central_PV-2,2020,2801160.0,50220.0 +S-Central_PV-1,2030,2577960.0,45756.0 +S-Central_PV-2,2030,2577960.0,45756.0 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/gen_build_predetermined.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/gen_build_predetermined.csv new file mode 100644 index 000000000..1dbc91204 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/gen_build_predetermined.csv @@ -0,0 +1,12 @@ +GENERATION_PROJECT,build_year,gen_predetermined_cap +N-Coal_ST,1995,2 +N-Geothermal,2000,1 +N-NG_CC,2008,2 +N-NG_GT,2009,2 +C-Coal_ST,1985,2 +C-NG_CC,2005,2 +C-NG_GT,2005,2 +S-Geothermal,1998,3 +S-NG_CC,2000,5 +S-NG_GT,1990,3 +S-NG_GT,2002,2 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/generation_projects_info.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/generation_projects_info.csv new file mode 100644 index 000000000..f7c1415cb --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/generation_projects_info.csv @@ -0,0 +1,39 @@ +GENERATION_PROJECT,gen_tech,gen_load_zone,gen_connect_cost_per_mw,gen_capacity_limit_mw,gen_full_load_heat_rate,gen_variable_om,gen_max_age,gen_min_build_capacity,gen_scheduled_outage_rate,gen_forced_outage_rate,gen_is_variable,gen_is_baseload,gen_is_cogen,gen_energy_source,gen_unit_size,gen_ccs_capture_efficiency,gen_ccs_energy_load,gen_storage_efficiency,gen_store_to_release_ratio +N-Geothermal,Geothermal,North,163081.1,1.5,.,28.83,30,0,0.0075,0.0241,0,1,0,Geothermal,.,.,.,.,. +N-Coal_IGCC,Coal_IGCC,North,57566.6,.,7.95,6.0822,40,0,0.08,0.12,0,1,0,Coal,10.0,.,.,.,. +N-Coal_IGCC_CCS,Coal_IGCC_CCS,North,57566.6,.,10.38,9.858,40,0,0.08,0.12,0,1,0,Coal,.,0.85,0.234104046,.,. +N-Coal_ST,Coal_ST,North,57566.6,.,9.0,3.4,40,0,0.06,0.1,0,1,0,Coal,.,.,.,.,. +N-NG_CC,NG_CC,North,57566.6,.,6.705,3.4131,20,0,0.04,0.06,0,0,0,NaturalGas,.,.,.,.,. +N-NG_CC_CCS,NG_CC_CCS,North,57566.6,.,10.08,9.3,20,0,0.04,0.06,0,0,0,NaturalGas,.,0.85,0.334821429,.,. +N-NG_GT,NG_GT,North,57566.6,.,10.39,27.807,20,0,0.04,0.06,0,0,0,NaturalGas,.,.,.,.,. +N-Nuclear,Nuclear,North,57566.6,.,9.72,0.0,40,1000,0.04,0.06,0,1,0,Uranium,.,.,.,.,. +N-Biomass_IGCC,Biomass_IGCC,North,57566.6,.,12.5,13.95,40,0,0.09,0.076,0,1,0,BioSolid,.,.,.,.,. +N-Biomass_IGCC_CCS,Biomass_IGCC_CCS,North,57566.6,.,16.3208,20.1307,40,0,0.09,0.076,0,1,0,BioSolid,.,0.85,0.234115557,.,. +N-Residential_PV,Residential_PV,North,0.0,1.5,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +N-Commercial_PV,Commercial_PV,North,0.0,2.0,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +N-Central_PV-1,Central_PV,North,51272.0,3.0,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +N-Central_PV-2,Central_PV,North,101661.0,2.0,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +N-Wind-1,Wind,North,71602.0,4.0,.,0.0,30,0,0.05,0.006,1,0,0,Wind,.,.,.,.,. +N-Wind-2,Wind,North,80259.0,1.0,.,0.0,30,0,0.05,0.006,1,0,0,Wind,.,.,.,.,. +C-Coal_IGCC,Coal_IGCC,Central,57566.6,.,7.95,4.86576,40,0,0.08,0.12,0,1,0,Coal,10.0,.,.,.,. +C-Coal_ST,Coal_ST,Central,57566.6,.,9.5,3.6,40,0,0.06,0.1,0,1,0,Coal,.,.,.,.,. +C-NG_CC,NG_CC,Central,57566.6,.,6.705,2.73048,20,0,0.04,0.06,0,0,0,NaturalGas,.,.,.,.,. +C-NG_GT,NG_GT,Central,57566.6,.,10.39,22.2456,20,0,0.04,0.06,0,0,0,NaturalGas,.,.,.,.,. +C-Nuclear,Nuclear,Central,57566.6,.,9.72,0.0,40,1000,0.04,0.06,0,1,0,Uranium,.,.,.,.,. +C-Biomass_IGCC,Biomass_IGCC,Central,57566.6,.,12.5,11.16,40,0,0.09,0.076,0,1,0,BioSolid,.,.,.,.,. +C-Residential_PV,Residential_PV,Central,0.0,0.5,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +C-Commercial_PV,Commercial_PV,Central,0.0,0.7,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +C-Central_PV-1,Central_PV,Central,122526.8,2.0,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +C-Central_PV-2,Central_PV,Central,45197.2,3.0,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +C-Wind-1,Wind,Central,72541.5,4.0,.,0.0,30,0,0.05,0.006,1,0,0,Wind,.,.,.,.,. +C-Wind-2,Wind,Central,77892.2,3.0,.,0.0,30,0,0.05,0.006,1,0,0,Wind,.,.,.,.,. +S-Geothermal,Geothermal,South,134222.0,3.0,.,34.596,30,0,0.0075,0.0241,0,1,0,Geothermal,.,.,.,.,. +S-NG_CC,NG_CC,South,57566.6,.,6.705,4.09572,20,0,0.04,0.06,0,0,0,NaturalGas,.,.,.,.,. +S-NG_CC_CCS,NG_CC_CCS,South,57566.6,.,10.08,11.16,20,0,0.04,0.06,0,0,0,NaturalGas,.,0.85,0.334821429,.,. +S-NG_GT,NG_GT,South,57566.6,5.0,10.39,33.3684,20,0,0.04,0.06,0,0,0,NaturalGas,.,.,.,.,. +S-Biomass_IGCC,Biomass_IGCC,South,57566.6,.,12.5,16.74,40,0,0.09,0.076,0,1,0,BioSolid,.,.,.,.,. +S-Biomass_IGCC_CCS,Biomass_IGCC_CCS,South,57566.6,.,16.3208,24.15684,40,0,0.09,0.076,0,1,0,BioSolid,.,0.85,0.234115557,.,. +S-Residential_PV,Residential_PV,South,0.0,3.0,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +S-Commercial_PV,Commercial_PV,South,0.0,3.3,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +S-Central_PV-1,Central_PV,South,74881.9,0.8,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. +S-Central_PV-2,Central_PV,South,65370.3,0.4,.,0.0,20,0,0.0,0.02,1,0,0,Solar,.,.,.,.,. diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/load_zones.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/load_zones.csv new file mode 100644 index 000000000..af45f83ac --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/load_zones.csv @@ -0,0 +1,4 @@ +LOAD_ZONE,dbid,existing_local_td,local_td_annual_cost_per_mw +North,1,5.5,66406.5 +Central,2,3.5,61663.4 +South,3,9.5,128040.0 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/loads.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/loads.csv new file mode 100644 index 000000000..cb47e96e6 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/loads.csv @@ -0,0 +1,22 @@ +LOAD_ZONE,TIMEPOINT,zone_demand_mw +North,1,5.0 +North,2,4.0 +North,3,4.5 +North,4,4.2 +North,5,4.0 +North,6,6.0 +North,7,6.0 +Central,1,3.7 +Central,2,3.0 +Central,3,3.6 +Central,4,3.3 +Central,5,3.0 +Central,6,4.0 +Central,7,4.6 +South,1,6.0 +South,2,7.0 +South,3,6.5 +South,4,7.2 +South,5,8.0 +South,6,10.0 +South,7,10.5 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/modules.txt b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/modules.txt new file mode 100644 index 000000000..00c3ed3e0 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/modules.txt @@ -0,0 +1,15 @@ +# Core Modules +switch_model +switch_model.timescales +switch_model.financials +switch_model.balancing.load_zones +switch_model.energy_sources.properties +switch_model.generators.core.build +switch_model.generators.core.dispatch +switch_model.reporting +# Custom Modules +switch_model.transmission.local_td +switch_model.generators.core.no_commit +switch_model.energy_sources.fuel_costs.markets +switch_model.transmission.transport.build +switch_model.transmission.transport.dispatch diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/non_fuel_energy_sources.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/non_fuel_energy_sources.csv new file mode 100644 index 000000000..1118eff52 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/non_fuel_energy_sources.csv @@ -0,0 +1,6 @@ +energy_source +Wind +Solar +Geothermal +Water +Electricity diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/periods.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/periods.csv new file mode 100644 index 000000000..ce603d49e --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/periods.csv @@ -0,0 +1,3 @@ +INVESTMENT_PERIOD,period_start,period_end +2020,2017,2026 +2030,2027,2036 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/regional_fuel_markets.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/regional_fuel_markets.csv new file mode 100644 index 000000000..d9f2713c9 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/regional_fuel_markets.csv @@ -0,0 +1,5 @@ +regional_fuel_market,fuel +All_DistOil,DistillateFuelOil +All_NG,NaturalGas +North_Bio,BioSolid +South_Bio,BioSolid diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/switch_inputs_version.txt b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/switch_inputs_version.txt new file mode 100644 index 000000000..157e54f3e --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/switch_inputs_version.txt @@ -0,0 +1 @@ +2.0.6 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/timepoints.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/timepoints.csv new file mode 100644 index 000000000..fe469d3b1 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/timepoints.csv @@ -0,0 +1,8 @@ +timepoint_id,timestamp,timeseries +1,2025011500,2020_01winter +2,2025011512,2020_01winter +3,2025011600,2020_01winter +4,2025011612,2020_01winter +5,2025061500,2020_06summer +6,2025061512,2020_06summer +7,2035011512,2030_all diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/timeseries.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/timeseries.csv new file mode 100644 index 000000000..e86db389e --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/timeseries.csv @@ -0,0 +1,4 @@ +TIMESERIES,ts_period,ts_duration_of_tp,ts_num_tps,ts_scale_to_period +2020_01winter,2020,12,4,913.12 +2020_06summer,2020,12,2,1826.25 +2030_all,2030,24,1,3652.5 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/trans_params.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/trans_params.csv new file mode 100644 index 000000000..6d8350120 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/trans_params.csv @@ -0,0 +1,2 @@ +trans_capital_cost_per_mw_km,trans_lifetime_yrs,trans_fixed_om_fraction,distribution_loss_rate +1000.0,20,0.03,0.053 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/transmission_lines.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/transmission_lines.csv new file mode 100644 index 000000000..b7fb0cc60 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/transmission_lines.csv @@ -0,0 +1,3 @@ +TRANSMISSION_LINE,trans_lz1,trans_lz2,trans_length_km,trans_efficiency,existing_trans_cap +N-C,North,Central,100,0.96,3 +C-S,Central,South,200,0.94,6 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/variable_capacity_factors.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/variable_capacity_factors.csv new file mode 100644 index 000000000..dd56a9211 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/variable_capacity_factors.csv @@ -0,0 +1,113 @@ +GENERATION_PROJECT,timepoint,gen_max_capacity_factor +N-Residential_PV,1,0.0 +N-Residential_PV,2,0.55 +N-Residential_PV,3,0.0 +N-Residential_PV,4,0.6 +N-Residential_PV,5,0.0 +N-Residential_PV,6,0.72 +N-Residential_PV,7,0.33 +N-Commercial_PV,1,0.0 +N-Commercial_PV,2,0.65 +N-Commercial_PV,3,0.0 +N-Commercial_PV,4,0.66 +N-Commercial_PV,5,0.0 +N-Commercial_PV,6,0.73 +N-Commercial_PV,7,0.38 +N-Central_PV-1,1,0.0 +N-Central_PV-1,2,0.56 +N-Central_PV-1,3,0.0 +N-Central_PV-1,4,0.62 +N-Central_PV-1,5,0.0 +N-Central_PV-1,6,0.73 +N-Central_PV-1,7,0.41 +N-Central_PV-2,1,0.0 +N-Central_PV-2,2,0.6 +N-Central_PV-2,3,0.0 +N-Central_PV-2,4,0.61 +N-Central_PV-2,5,0.0 +N-Central_PV-2,6,0.81 +N-Central_PV-2,7,0.36 +C-Residential_PV,1,0.0 +C-Residential_PV,2,0.59 +C-Residential_PV,3,0.0 +C-Residential_PV,4,0.64 +C-Residential_PV,5,0.0 +C-Residential_PV,6,0.74 +C-Residential_PV,7,0.34 +C-Commercial_PV,1,0.0 +C-Commercial_PV,2,0.61 +C-Commercial_PV,3,0.0 +C-Commercial_PV,4,0.66 +C-Commercial_PV,5,0.0 +C-Commercial_PV,6,0.74 +C-Commercial_PV,7,0.4 +C-Central_PV-1,1,0.0 +C-Central_PV-1,2,0.61 +C-Central_PV-1,3,0.0 +C-Central_PV-1,4,0.64 +C-Central_PV-1,5,0.0 +C-Central_PV-1,6,0.79 +C-Central_PV-1,7,0.39 +C-Central_PV-2,1,0.0 +C-Central_PV-2,2,0.64 +C-Central_PV-2,3,0.0 +C-Central_PV-2,4,0.68 +C-Central_PV-2,5,0.0 +C-Central_PV-2,6,0.72 +C-Central_PV-2,7,0.41 +S-Residential_PV,1,0.0 +S-Residential_PV,2,0.57 +S-Residential_PV,3,0.0 +S-Residential_PV,4,0.66 +S-Residential_PV,5,0.0 +S-Residential_PV,6,0.75 +S-Residential_PV,7,0.34 +S-Commercial_PV,1,0.0 +S-Commercial_PV,2,0.57 +S-Commercial_PV,3,0.0 +S-Commercial_PV,4,0.63 +S-Commercial_PV,5,0.0 +S-Commercial_PV,6,0.79 +S-Commercial_PV,7,0.34 +S-Central_PV-1,1,0.0 +S-Central_PV-1,2,0.61 +S-Central_PV-1,3,0.0 +S-Central_PV-1,4,0.7 +S-Central_PV-1,5,0.0 +S-Central_PV-1,6,0.75 +S-Central_PV-1,7,0.37 +S-Central_PV-2,1,0.0 +S-Central_PV-2,2,0.64 +S-Central_PV-2,3,0.0 +S-Central_PV-2,4,0.63 +S-Central_PV-2,5,0.0 +S-Central_PV-2,6,0.74 +S-Central_PV-2,7,0.4 +N-Wind-1,1,0.6 +N-Wind-1,2,0.3 +N-Wind-1,3,0.65 +N-Wind-1,4,0.42 +N-Wind-1,5,0.12 +N-Wind-1,6,0.05 +N-Wind-1,7,0.8 +N-Wind-2,1,0.68 +N-Wind-2,2,0.33 +N-Wind-2,3,0.71 +N-Wind-2,4,0.46 +N-Wind-2,5,0.17 +N-Wind-2,6,0.13 +N-Wind-2,7,0.87 +C-Wind-1,1,0.66 +C-Wind-1,2,0.4 +C-Wind-1,3,0.73 +C-Wind-1,4,0.5 +C-Wind-1,5,0.22 +C-Wind-1,6,0.05 +C-Wind-1,7,0.8 +C-Wind-2,1,0.62 +C-Wind-2,2,0.36 +C-Wind-2,3,0.66 +C-Wind-2,4,0.48 +C-Wind-2,5,0.14 +C-Wind-2,6,0.05 +C-Wind-2,7,0.86 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_balancing_areas.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_balancing_areas.csv new file mode 100644 index 000000000..0ce4eaf88 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_balancing_areas.csv @@ -0,0 +1,4 @@ +LOAD_ZONE,balancing_area +North,NorthCentral +Central,NorthCentral +South,South diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_coincident_peak_demand.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_coincident_peak_demand.csv new file mode 100644 index 000000000..2a68786e8 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_coincident_peak_demand.csv @@ -0,0 +1,7 @@ +LOAD_ZONE,PERIOD,zone_expected_coincident_peak_demand +North,2020,6 +Central,2020,4 +South,2020,10 +North,2030,8 +Central,2030,6 +South,2030,12 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_fuel_cost_diff.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_fuel_cost_diff.csv new file mode 100644 index 000000000..0b506d4e6 --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_fuel_cost_diff.csv @@ -0,0 +1,11 @@ +load_zone,fuel,period,fuel_cost_adder +North,Coal,2020,0.1 +North,Coal,2030,0.1 +Central,Coal,2020,-0.2 +Central,Coal,2030,-0.2 +North,NaturalGas,2020,-0.2434 +North,NaturalGas,2030,-0.4021 +Central,NaturalGas,2020,0.1 +Central,NaturalGas,2030,0.15 +South,NaturalGas,2020,0.3497 +South,NaturalGas,2020,0.4676 diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_to_regional_fuel_market.csv b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_to_regional_fuel_market.csv new file mode 100644 index 000000000..8c1a89f2b --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/inputs/zone_to_regional_fuel_market.csv @@ -0,0 +1,9 @@ +load_zone,regional_fuel_market +North,All_DistOil +Central,All_DistOil +South,All_DistOil +North,All_NG +Central,All_NG +South,All_NG +North,North_Bio +South,South_Bio diff --git a/tests/upgrade_dat/3zone_toy_2_0_6/outputs/total_cost.txt b/tests/upgrade_dat/3zone_toy_2_0_6/outputs/total_cost.txt new file mode 100644 index 000000000..7ae87fd4b --- /dev/null +++ b/tests/upgrade_dat/3zone_toy_2_0_6/outputs/total_cost.txt @@ -0,0 +1 @@ +134733088.42929107 diff --git a/tests/upgrade_dat/copperplate1/inputs/fuel_supply_curves.tab b/tests/upgrade_dat/copperplate1/inputs/fuel_supply_curves.tab index 9f3f2517c..38655eca5 100644 --- a/tests/upgrade_dat/copperplate1/inputs/fuel_supply_curves.tab +++ b/tests/upgrade_dat/copperplate1/inputs/fuel_supply_curves.tab @@ -1,5 +1,5 @@ regional_fuel_market period tier unit_cost max_avail_at_cost All_NG 2020 0 4.4647 1950514555 -All_NG 2020 1 5.0709 inf +All_NG 2020 1 5.0709 . South_Bio 2020 0 1.7115 26427258 South_Bio 2020 1 17.1714 486066 diff --git a/tests/upgrade_dat/custom_extension/sunk_costs.py b/tests/upgrade_dat/custom_extension/sunk_costs.py index 32986070f..c2b9b136e 100644 --- a/tests/upgrade_dat/custom_extension/sunk_costs.py +++ b/tests/upgrade_dat/custom_extension/sunk_costs.py @@ -28,6 +28,6 @@ def define_components(mod): mod.administration_fees = Param( - mod.PERIODS, - initialize=lambda m, p: 1000000) - mod.Cost_Components_Per_Period.append('administration_fees') + mod.PERIODS, within=NonNegativeReals, initialize=lambda m, p: 1000000 + ) + mod.Cost_Components_Per_Period.append("administration_fees") diff --git a/tests/upgrade_dat/storage_206/inputs/financials.csv b/tests/upgrade_dat/storage_206/inputs/financials.csv new file mode 100644 index 000000000..a40129d05 --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/financials.csv @@ -0,0 +1,2 @@ +base_financial_year,discount_rate,interest_rate +2015,0.05,0.07 diff --git a/tests/upgrade_dat/storage_206/inputs/fuel_cost.csv b/tests/upgrade_dat/storage_206/inputs/fuel_cost.csv new file mode 100644 index 000000000..7783a8744 --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/fuel_cost.csv @@ -0,0 +1,2 @@ +load_zone,fuel,period,fuel_cost +South,NaturalGas,2020,4 diff --git a/tests/upgrade_dat/storage_206/inputs/fuels.csv b/tests/upgrade_dat/storage_206/inputs/fuels.csv new file mode 100644 index 000000000..54dfca062 --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/fuels.csv @@ -0,0 +1,2 @@ +fuel,co2_intensity,upstream_co2_intensity +NaturalGas,0.05306,0 diff --git a/tests/upgrade_dat/storage_206/inputs/gen_build_costs.csv b/tests/upgrade_dat/storage_206/inputs/gen_build_costs.csv new file mode 100644 index 000000000..43608acff --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/gen_build_costs.csv @@ -0,0 +1,7 @@ +GENERATION_PROJECT,build_year,gen_overnight_cost,gen_fixed_om,gen_storage_energy_overnight_cost +S-Central_PV-1,2000,2334300.0,41850.0,. +S-Geothermal,1998,5524200.0,0.0,. +S-Geothermal,2020,5524200.0,0.0,. +S-Central_PV-1,2020,2334300.0,41850.0,. +Battery_Storage,2020,10000.0,100.0,1000.0 +Battery_Storage,2010,10000.0,100.0,1000.0 diff --git a/tests/upgrade_dat/storage_206/inputs/gen_build_predetermined.csv b/tests/upgrade_dat/storage_206/inputs/gen_build_predetermined.csv new file mode 100644 index 000000000..028e7dc6e --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/gen_build_predetermined.csv @@ -0,0 +1,4 @@ +GENERATION_PROJECT,build_year,gen_predetermined_cap,build_gen_energy_predetermined +S-Central_PV-1,2000,1,. +S-Geothermal,1998,1,. +Battery_Storage,2010,1,4 \ No newline at end of file diff --git a/tests/upgrade_dat/storage_206/inputs/generation_projects_info.csv b/tests/upgrade_dat/storage_206/inputs/generation_projects_info.csv new file mode 100644 index 000000000..57d9e8d86 --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/generation_projects_info.csv @@ -0,0 +1,4 @@ +GENERATION_PROJECT,gen_dbid,gen_tech,gen_load_zone,gen_connect_cost_per_mw,gen_capacity_limit_mw,gen_variable_om,gen_max_age,gen_min_build_capacity,gen_scheduled_outage_rate,gen_forced_outage_rate,gen_is_variable,gen_is_baseload,gen_is_cogen,gen_energy_source,gen_full_load_heat_rate,gen_storage_efficiency +S-Geothermal,33.0,Geothermal,South,134222.0,10.0,28.83,30,0,0.0075,0.0241,0,1,0,Geothermal,.,. +S-Central_PV-1,41.0,Central_PV,South,74881.9,2.0,0.0,20,0,0.0,0.02,1,0,0,Solar,.,. +Battery_Storage,.,Battery_Storage,South,1.0,.,0.01,10,0,0.04,0.02,0,0,0,Electricity,.,0.75 diff --git a/tests/upgrade_dat/storage_206/inputs/load_zones.csv b/tests/upgrade_dat/storage_206/inputs/load_zones.csv new file mode 100644 index 000000000..9810665f2 --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/load_zones.csv @@ -0,0 +1,2 @@ +LOAD_ZONE,cost_multipliers,ccs_distance_km,dbid +South,1,0,3 diff --git a/tests/upgrade_dat/storage_206/inputs/loads.csv b/tests/upgrade_dat/storage_206/inputs/loads.csv new file mode 100644 index 000000000..84c59e19a --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/loads.csv @@ -0,0 +1,3 @@ +LOAD_ZONE,TIMEPOINT,zone_demand_mw +South,1,8.0 +South,2,0.5 diff --git a/tests/upgrade_dat/storage_206/inputs/modules.txt b/tests/upgrade_dat/storage_206/inputs/modules.txt new file mode 100644 index 000000000..c878b1aae --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/modules.txt @@ -0,0 +1,13 @@ +# Core Modules +switch_model +switch_model.timescales +switch_model.financials +switch_model.balancing.load_zones +switch_model.energy_sources.properties +switch_model.generators.core.build +switch_model.generators.core.dispatch +switch_model.reporting +# Custom Modules +switch_model.generators.core.no_commit +switch_model.energy_sources.fuel_costs.simple +switch_model.generators.extensions.storage diff --git a/tests/upgrade_dat/storage_206/inputs/non_fuel_energy_sources.csv b/tests/upgrade_dat/storage_206/inputs/non_fuel_energy_sources.csv new file mode 100644 index 000000000..70aa6b0f7 --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/non_fuel_energy_sources.csv @@ -0,0 +1,4 @@ +energy_source +Solar +Geothermal +Electricity diff --git a/tests/upgrade_dat/storage_206/inputs/periods.csv b/tests/upgrade_dat/storage_206/inputs/periods.csv new file mode 100644 index 000000000..27c58e07f --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/periods.csv @@ -0,0 +1,2 @@ +INVESTMENT_PERIOD,period_start,period_end +2020,2017,2026 diff --git a/tests/upgrade_dat/storage_206/inputs/switch_inputs_version.txt b/tests/upgrade_dat/storage_206/inputs/switch_inputs_version.txt new file mode 100644 index 000000000..157e54f3e --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/switch_inputs_version.txt @@ -0,0 +1 @@ +2.0.6 diff --git a/tests/upgrade_dat/storage_206/inputs/timepoints.csv b/tests/upgrade_dat/storage_206/inputs/timepoints.csv new file mode 100644 index 000000000..54d33b02a --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/timepoints.csv @@ -0,0 +1,3 @@ +timepoint_id,timestamp,timeseries +1,2025011512,2020_all +2,2025011600,2020_all diff --git a/tests/upgrade_dat/storage_206/inputs/timeseries.csv b/tests/upgrade_dat/storage_206/inputs/timeseries.csv new file mode 100644 index 000000000..14e1fdeb0 --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/timeseries.csv @@ -0,0 +1,2 @@ +TIMESERIES,ts_period,ts_duration_of_tp,ts_num_tps,ts_scale_to_period +2020_all,2020,12,2,3652.5 diff --git a/tests/upgrade_dat/storage_206/inputs/variable_capacity_factors.csv b/tests/upgrade_dat/storage_206/inputs/variable_capacity_factors.csv new file mode 100644 index 000000000..d8898d1c1 --- /dev/null +++ b/tests/upgrade_dat/storage_206/inputs/variable_capacity_factors.csv @@ -0,0 +1,3 @@ +GENERATION_PROJECT,timepoint,gen_max_capacity_factor +S-Central_PV-1,1,0.61 +S-Central_PV-1,2,0.0 diff --git a/tests/upgrade_dat/storage_206/outputs/total_cost.txt b/tests/upgrade_dat/storage_206/outputs/total_cost.txt new file mode 100644 index 000000000..5cf7013c1 --- /dev/null +++ b/tests/upgrade_dat/storage_206/outputs/total_cost.txt @@ -0,0 +1 @@ +24484908.913032416 diff --git a/tests/upgrade_test.py b/tests/upgrade_test.py index 7ca609673..867ea38f4 100644 --- a/tests/upgrade_test.py +++ b/tests/upgrade_test.py @@ -29,6 +29,7 @@ UPDATE_EXPECTATIONS = False + def _remove_temp_dir(path): for retry in range(100): try: @@ -37,32 +38,41 @@ def _remove_temp_dir(path): except: pass + def find_example_dirs(path): for dirpath, dirnames, filenames in os.walk(path): for dirname in dirnames: path = os.path.join(dirpath, dirname) - if os.path.exists(os.path.join(path, 'inputs', 'modules.txt')): + if os.path.exists(os.path.join(path, "inputs", "modules.txt")): yield path + def make_test(example_dir): def test_upgrade(): - temp_dir = tempfile.mkdtemp(prefix='switch_test_') + temp_dir = tempfile.mkdtemp(prefix="switch_test_") example_name = os.path.basename(os.path.normpath(example_dir)) upgrade_dir = os.path.join(temp_dir, example_name) - shutil.copytree(example_dir, upgrade_dir, ignore=shutil.ignore_patterns('outputs')) - upgrade_dir_inputs = os.path.join(upgrade_dir, 'inputs') - upgrade_dir_outputs = os.path.join(upgrade_dir, 'outputs') + shutil.copytree( + example_dir, upgrade_dir, ignore=shutil.ignore_patterns("outputs") + ) + upgrade_dir_inputs = os.path.join(upgrade_dir, "inputs") + upgrade_dir_outputs = os.path.join(upgrade_dir, "outputs") switch_model.upgrade.manager.set_verbose(False) try: # Custom python modules may be in the example's working directory upgrade_inputs(upgrade_dir_inputs) sys.path.append(upgrade_dir) - switch_model.solve.main([ - '--inputs-dir', upgrade_dir_inputs, - '--outputs-dir', upgrade_dir_outputs]) - total_cost = read_file(os.path.join(upgrade_dir_outputs, 'total_cost.txt')) + switch_model.solve.main( + [ + "--inputs-dir", + upgrade_dir_inputs, + "--outputs-dir", + upgrade_dir_outputs, + ] + ) + total_cost = read_file(os.path.join(upgrade_dir_outputs, "total_cost.txt")) finally: - if upgrade_dir in sys.path: # code above may have failed before appending + if upgrade_dir in sys.path: # code above may have failed before appending sys.path.remove(upgrade_dir) _remove_temp_dir(temp_dir) expectation_file = get_expectation_path(example_dir) @@ -71,30 +81,35 @@ def test_upgrade(): else: expected = float(read_file(expectation_file)) actual = float(total_cost) - if not switch_model.utilities.approx_equal(expected, actual, - tolerance=0.0001): + if not switch_model.utilities.approx_equal( + expected, actual, tolerance=0.0001 + ): raise AssertionError( - 'Mismatch for total_cost (the objective function value):\n' - 'Expected value: {}\n' - 'Actual value: {}\n' + "Mismatch for total_cost (the objective function value):\n" + "Expected value: {}\n" + "Actual value: {}\n" 'Run "python -m tests.upgrade_test --update" to ' - 'update the expectations if this change is expected.' - .format(expected, actual)) + "update the expectations if this change is expected.".format( + expected, actual + ) + ) name = os.path.basename(os.path.normpath(example_dir)) return unittest.FunctionTestCase( - test_upgrade, description='Test Upgrade Example: %s' % name) + test_upgrade, description="Test Upgrade Example: %s" % name + ) + def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - for example_dir in find_example_dirs(os.path.join(TOP_DIR, 'tests', 'upgrade_dat')): + for example_dir in find_example_dirs(os.path.join(TOP_DIR, "tests", "upgrade_dat")): if get_expectation_path(example_dir): suite.addTest(make_test(example_dir)) return suite -if __name__ == '__main__': - if sys.argv[1:2] == ['--update']: +if __name__ == "__main__": + if sys.argv[1:2] == ["--update"]: UPDATE_EXPECTATIONS = True sys.argv.pop(1) unittest.main() diff --git a/tests/utilities_test.py b/tests/utilities_test.py index 297ee5ea5..83c884fa6 100644 --- a/tests/utilities_test.py +++ b/tests/utilities_test.py @@ -12,18 +12,59 @@ from pyomo.environ import DataPortal from testfixtures import compare -class UtilitiesTest(unittest.TestCase): +class UtilitiesTest(unittest.TestCase): def test_approx_equal(self): assert not utilities.approx_equal(1, 2) assert not utilities.approx_equal(1, 1.02) assert utilities.approx_equal(1, 1.01) assert utilities.approx_equal(1, 1) + def test_retrieve_cplex_mip_duals(self): + try: + m = switch_model.solve.main( + args=[ + "--inputs-dir", + os.path.join( + os.path.dirname(__file__), + "..", + "examples", + "discrete_and_min_build", + "inputs", + ), + "--log-level", + "error", + "--suffix", + "dual", + "--retrieve-cplex-mip-duals", + "--solver", + "cplex", + ] + ) + except Exception as e: # cplex unavailable + if str(e) == "No executable found for solver 'cplex'": + pass + else: + raise + else: + # breakpoint() # inspect model to get new values + model_vals = [ + m.dual[m.Distributed_Energy_Balance["South", 1]], + m.dual[m.Enforce_Min_Build_Lower["S-NG_CC", 2020]], + ] + expected_vals = [980032.4664183848, -835405.9051712567] + compare(model_vals, expected_vals) + def test_save_inputs_as_dat(self): (model, instance) = switch_model.solve.main( - args=["--inputs-dir", os.path.join('examples', '3zone_toy', 'inputs')], - return_model=True, return_instance=True + args=[ + "--inputs-dir", + os.path.join( + os.path.dirname(__file__), "..", "examples", "3zone_toy", "inputs" + ), + ], + return_model=True, + return_instance=True, ) temp_dir = tempfile.mkdtemp(prefix="switch_test_") try: @@ -36,40 +77,40 @@ def test_save_inputs_as_dat(self): shutil.rmtree(temp_dir) def test_check_mandatory_components(self): - from pyomo.environ import ConcreteModel, Param, Set + from pyomo.environ import ConcreteModel, Param, Set, Any from switch_model.utilities import check_mandatory_components + mod = ConcreteModel() - mod.set_A = Set(initialize=[1,2]) - mod.paramA_full = Param(mod.set_A, initialize={1:'a',2:'b'}) + mod.set_A = Set(dimen=1, initialize=[1, 2]) + mod.paramA_full = Param(mod.set_A, initialize={1: "a", 2: "b"}, within=Any) mod.paramA_empty = Param(mod.set_A) - mod.set_B = Set() + mod.set_B = Set(dimen=1) mod.paramB_empty = Param(mod.set_B) mod.paramC = Param(initialize=1) mod.paramD = Param() - check_mandatory_components(mod, 'set_A', 'paramA_full') - check_mandatory_components(mod, 'paramB_empty') - check_mandatory_components(mod, 'paramC') + check_mandatory_components(mod, "set_A", "paramA_full") + check_mandatory_components(mod, "paramB_empty") + check_mandatory_components(mod, "paramC") with self.assertRaises(ValueError): - check_mandatory_components(mod, 'set_A', 'paramA_empty') + check_mandatory_components(mod, "set_A", "paramA_empty") with self.assertRaises(ValueError): - check_mandatory_components(mod, 'set_A', 'set_B') + check_mandatory_components(mod, "set_A", "set_B") with self.assertRaises(ValueError): - check_mandatory_components(mod, 'paramC', 'paramD') - + check_mandatory_components(mod, "paramC", "paramD") def test_min_data_check(self): - from switch_model.utilities import _add_min_data_check - from pyomo.environ import AbstractModel, Param, Set - mod = AbstractModel() - _add_min_data_check(mod) - mod.set_A = Set(initialize=[1,2]) - mod.paramA_full = Param(mod.set_A, initialize={1:'a',2:'b'}) + from switch_model.utilities import SwitchAbstractModel + from pyomo.environ import Param, Set, Any + + mod = SwitchAbstractModel(module_list=[], args=[]) + mod.set_A = Set(initialize=[1, 2], dimen=1) + mod.paramA_full = Param(mod.set_A, initialize={1: "a", 2: "b"}, within=Any) mod.paramA_empty = Param(mod.set_A) - mod.min_data_check('set_A', 'paramA_full') + mod.min_data_check("set_A", "paramA_full") self.assertIsNotNone(mod.create_instance()) - mod.min_data_check('set_A', 'paramA_empty') + mod.min_data_check("set_A", "paramA_empty") # Fiddle with the pyomo logger to suppress its error message - logger = logging.getLogger('pyomo.core') + logger = logging.getLogger("pyomo.core") orig_log_level = logger.level logger.setLevel(logging.FATAL) with self.assertRaises(ValueError): @@ -77,5 +118,5 @@ def test_min_data_check(self): logger.setLevel(orig_log_level) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main()