diff --git a/.gitignore b/.gitignore index 0c43fd99c..177181a78 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ gurobi.log switch_model.egg-info/ venv +build/ diff --git a/examples/3zone_toy/inputs/switch_inputs_version.txt b/examples/3zone_toy/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/3zone_toy/inputs/switch_inputs_version.txt +++ b/examples/3zone_toy/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt b/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt +++ b/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/carbon_cap/inputs/switch_inputs_version.txt b/examples/carbon_cap/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/carbon_cap/inputs/switch_inputs_version.txt +++ b/examples/carbon_cap/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/ccs/inputs/switch_inputs_version.txt b/examples/ccs/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/ccs/inputs/switch_inputs_version.txt +++ b/examples/ccs/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/copperplate0/inputs/switch_inputs_version.txt b/examples/copperplate0/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/copperplate0/inputs/switch_inputs_version.txt +++ b/examples/copperplate0/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/copperplate1/inputs/switch_inputs_version.txt b/examples/copperplate1/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/copperplate1/inputs/switch_inputs_version.txt +++ b/examples/copperplate1/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/custom_extension/inputs/switch_inputs_version.txt b/examples/custom_extension/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/custom_extension/inputs/switch_inputs_version.txt +++ b/examples/custom_extension/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/discrete_and_min_build/inputs/switch_inputs_version.txt b/examples/discrete_and_min_build/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/discrete_and_min_build/inputs/switch_inputs_version.txt +++ b/examples/discrete_and_min_build/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/discrete_build/inputs/switch_inputs_version.txt b/examples/discrete_build/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/discrete_build/inputs/switch_inputs_version.txt +++ b/examples/discrete_build/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/dr_simple/inputs/switch_inputs_version.txt b/examples/dr_simple/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/dr_simple/inputs/switch_inputs_version.txt +++ b/examples/dr_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/hydro_simple/inputs/switch_inputs_version.txt b/examples/hydro_simple/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/hydro_simple/inputs/switch_inputs_version.txt +++ b/examples/hydro_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/hydro_system/inputs/switch_inputs_version.txt b/examples/hydro_system/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/hydro_system/inputs/switch_inputs_version.txt +++ b/examples/hydro_system/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/new_builds_only/inputs/switch_inputs_version.txt b/examples/new_builds_only/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/new_builds_only/inputs/switch_inputs_version.txt +++ b/examples/new_builds_only/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/planning_reserves/inputs/switch_inputs_version.txt b/examples/planning_reserves/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/planning_reserves/inputs/switch_inputs_version.txt +++ b/examples/planning_reserves/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt b/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt b/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt b/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt b/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt b/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/spinning_reserves/inputs/gen_inc_heat_rates.tab b/examples/production_cost_models/spinning_reserves/inputs/gen_inc_heat_rates.tab index c005d284c..a44733ea9 100644 --- a/examples/production_cost_models/spinning_reserves/inputs/gen_inc_heat_rates.tab +++ b/examples/production_cost_models/spinning_reserves/inputs/gen_inc_heat_rates.tab @@ -1,4 +1,4 @@ -project power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h +GENERATION_PROJECT power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h S-NG_CC 40 . . 269.4069 S-NG_CC 40 100.0 6.684885 . S-NG_GT 0 . . 0.1039 diff --git a/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt b/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/spinning_reserves_advanced/README.md b/examples/production_cost_models/spinning_reserves_advanced/README.md new file mode 100644 index 000000000..872d49934 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/README.md @@ -0,0 +1,4 @@ +SYNOPSIS + switch solve --verbose --log-run + +This example extends unit_commit by adding spinning reserve requirements. diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/financials.dat b/examples/production_cost_models/spinning_reserves_advanced/inputs/financials.dat new file mode 100644 index 000000000..5260b0024 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/financials.dat @@ -0,0 +1,3 @@ +param base_financial_year := 2015; +param interest_rate := .07; +param discount_rate := .05; diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/fuel_cost.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/fuel_cost.tab new file mode 100644 index 000000000..7ecb71f16 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/fuel_cost.tab @@ -0,0 +1,2 @@ +load_zone fuel period fuel_cost +South NaturalGas 2010 4 \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/fuels.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/fuels.tab new file mode 100644 index 000000000..efbfb672e --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/fuels.tab @@ -0,0 +1,2 @@ +fuel co2_intensity upstream_co2_intensity +NaturalGas 0.05306 0 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_costs.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_costs.tab new file mode 100644 index 000000000..72b4a3f46 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_costs.tab @@ -0,0 +1,6 @@ +GENERATION_PROJECT build_year gen_overnight_cost gen_fixed_om +S-Geothermal 1998 5524200 0 +S-NG_CC 2000 1143900 5868.3 +S-NG_GT 1990 605430 4891.8 +S-NG_GT 2002 605430 4891.8 +S-Central_PV-1 2001 2334300 41850 \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.tab new file mode 100644 index 000000000..ceac9f2fc --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.tab @@ -0,0 +1,6 @@ +GENERATION_PROJECT build_year gen_predetermined_cap +S-Geothermal 1998 2.0 +S-NG_CC 2000 7.0 +S-NG_GT 1990 3.0 +S-NG_GT 2002 4.0 +S-Central_PV-1 2001 3 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_inc_heat_rates.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_inc_heat_rates.tab new file mode 100644 index 000000000..a44733ea9 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_inc_heat_rates.tab @@ -0,0 +1,5 @@ +GENERATION_PROJECT power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h +S-NG_CC 40 . . 269.4069 +S-NG_CC 40 100.0 6.684885 . +S-NG_GT 0 . . 0.1039 +S-NG_GT 0 1.0 10.2861 . diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.tab new file mode 100644 index 000000000..9ec3bb4dc --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.tab @@ -0,0 +1,5 @@ +GENERATION_PROJECT gen_dbid gen_tech gen_load_zone gen_connect_cost_per_mw gen_capacity_limit_mw gen_max_age gen_min_build_capacity gen_scheduled_outage_rate gen_forced_outage_rate gen_is_variable gen_is_baseload gen_is_cogen gen_variable_om gen_energy_source gen_full_load_heat_rate gen_unit_size gen_min_load_fraction gen_startup_fuel gen_startup_om gen_min_downtime gen_can_provide_spinning_reserves +S-Geothermal 33 Geothermal South 134222 3 30 0 0.0075 0.0241 0 1 0 28.83 Geothermal . . . . . . 0 +S-NG_CC 34 NG_CC South 57566.6 . 20 0 0.04 0.06 0 0 0 3.4131 NaturalGas 6.705 1 0.4 9.16 10.3 12 1 +S-NG_GT 36 NG_GT South 57566.6 . 20 0 0.04 0.06 0 0 0 27.807 NaturalGas 10.39 . 0 0.22 0.86 . 1 +S-Central_PV-1 41 Central_PV South 74881.9 4 20 0 0 0.02 1 0 0 0 Solar . . 0 . 0 . 0 \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_reserve_capability.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_reserve_capability.tab new file mode 100644 index 000000000..3f2ff51ab --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_reserve_capability.tab @@ -0,0 +1,3 @@ +GENERATION_PROJECT SPINNING_RESERVE_TYPES +S-NG_CC spinning +S-NG_GT spinning \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/load_zones.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/load_zones.tab new file mode 100644 index 000000000..2bda9cb2b --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/load_zones.tab @@ -0,0 +1,2 @@ +LOAD_ZONE existing_local_td local_td_annual_cost_per_mw +South 10 128040 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/loads.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/loads.tab new file mode 100644 index 000000000..6043e8557 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/loads.tab @@ -0,0 +1,5 @@ +LOAD_ZONE TIMEPOINT zone_demand_mw +South 1 3 +South 2 8 +South 3 10 +South 4 7 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/modules.txt b/examples/production_cost_models/spinning_reserves_advanced/inputs/modules.txt new file mode 100644 index 000000000..a27c2500e --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/modules.txt @@ -0,0 +1,17 @@ +# Core Modules +switch_model +switch_model.timescales +switch_model.financials +switch_model.balancing.load_zones +switch_model.energy_sources.properties +switch_model.generators.core.build +switch_model.generators.core.dispatch +switch_model.reporting +# Custom Modules +switch_model.transmission.local_td +switch_model.generators.core.commit.operate +switch_model.generators.core.commit.fuel_use +switch_model.energy_sources.fuel_costs.simple +switch_model.balancing.operating_reserves.areas +switch_model.balancing.operating_reserves.spinning_reserves_advanced +#switch_model.reporting.dump diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/non_fuel_energy_sources.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/non_fuel_energy_sources.tab new file mode 100644 index 000000000..84ffbd347 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/non_fuel_energy_sources.tab @@ -0,0 +1,3 @@ +energy_source +Geothermal +Solar \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/periods.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/periods.tab new file mode 100644 index 000000000..ed32ef2af --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/periods.tab @@ -0,0 +1,2 @@ +INVESTMENT_PERIOD period_start period_end +2010 2008 2012 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/spinning_reserve_params.dat b/examples/production_cost_models/spinning_reserves_advanced/inputs/spinning_reserve_params.dat new file mode 100644 index 000000000..01558ea51 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/spinning_reserve_params.dat @@ -0,0 +1 @@ +param contingency_safety_factor := 1; diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt b/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt new file mode 100644 index 000000000..94789474f --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt @@ -0,0 +1 @@ +2.0.0b4 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/timepoints.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/timepoints.tab new file mode 100644 index 000000000..9863add2e --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/timepoints.tab @@ -0,0 +1,5 @@ +timepoint_id timestamp timeseries +1 2010011500 2010_all +2 2010011506 2010_all +3 2010011512 2010_all +4 2010011518 2010_all diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/timeseries.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/timeseries.tab new file mode 100644 index 000000000..84cc623a6 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/timeseries.tab @@ -0,0 +1,2 @@ +TIMESERIES ts_period ts_duration_of_tp ts_num_tps ts_scale_to_period +2010_all 2010 6 1 1826.25 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/variable_capacity_factors.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/variable_capacity_factors.tab new file mode 100644 index 000000000..dd2a630c2 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/variable_capacity_factors.tab @@ -0,0 +1,5 @@ +GENERATION_PROJECT timepoint gen_max_capacity_factor +S-Central_PV-1 1 0.0 +S-Central_PV-1 2 0.61 +S-Central_PV-1 3 1 +S-Central_PV-1 4 0.4 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/zone_coincident_peak_demand.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/zone_coincident_peak_demand.tab new file mode 100644 index 000000000..cf4097b3d --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/zone_coincident_peak_demand.tab @@ -0,0 +1,2 @@ +LOAD_ZONE PERIOD zone_expected_coincident_peak_demand +South 2010 10 diff --git a/examples/production_cost_models/spinning_reserves_advanced/options.txt b/examples/production_cost_models/spinning_reserves_advanced/options.txt new file mode 100644 index 000000000..202ef2d6d --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/options.txt @@ -0,0 +1,2 @@ +--spinning-requirement-rule 3+5 +--unit-contingency diff --git a/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt b/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt new file mode 100644 index 000000000..2a19790fa --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt @@ -0,0 +1 @@ +28606194.7452 diff --git a/examples/production_cost_models/unit_commit/inputs/gen_inc_heat_rates.tab b/examples/production_cost_models/unit_commit/inputs/gen_inc_heat_rates.tab index c005d284c..a44733ea9 100644 --- a/examples/production_cost_models/unit_commit/inputs/gen_inc_heat_rates.tab +++ b/examples/production_cost_models/unit_commit/inputs/gen_inc_heat_rates.tab @@ -1,4 +1,4 @@ -project power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h +GENERATION_PROJECT power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h S-NG_CC 40 . . 269.4069 S-NG_CC 40 100.0 6.684885 . S-NG_GT 0 . . 0.1039 diff --git a/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt b/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/rps_simple/inputs/switch_inputs_version.txt b/examples/rps_simple/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/rps_simple/inputs/switch_inputs_version.txt +++ b/examples/rps_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/storage/inputs/switch_inputs_version.txt b/examples/storage/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/storage/inputs/switch_inputs_version.txt +++ b/examples/storage/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/switch_model/hawaii/demand_response.py b/switch_model/balancing/demand_response/iterative/__init__.py similarity index 91% rename from switch_model/hawaii/demand_response.py rename to switch_model/balancing/demand_response/iterative/__init__.py index e68772e8c..1ffcf8fea 100644 --- a/switch_model/hawaii/demand_response.py +++ b/switch_model/balancing/demand_response/iterative/__init__.py @@ -2,8 +2,8 @@ cancel out the basic system load and replace it with a convex combination of bids note: the demand_module (or some subsidiary module) may store calibration data -at the module level (not in the model), so this module should only be used with one -model at a time. An alternative approach would be to receive a calibration_data +at the module level (not in the model), so this module should only be used with one +model at a time. An alternative approach would be to receive a calibration_data object back from demand_module.calibrate(), then add that to the model and pass it back to the bid function when needed. @@ -16,7 +16,7 @@ # (this is a fixed adder to the cost in $/kWh, not a multiplier times the marginal cost) # that module can be used as-is to find the effect of any particular adder # or it can iterate at a level above the demand_response module -# and use something like scipy.optimize.newton() to find the right tax to come out +# and use something like scipy.optimize.newton() to find the right tax to come out # revenue-neutral (i.e., recover any stranded costs, rebate any supply-side rents) import os, sys, time @@ -25,32 +25,12 @@ import pyomo.repn.canonical_repn import switch_model.utilities as utilities -from save_results import DispatchGenByFuel +# TODO: move part of the reporting back into Hawaii module and eliminate these dependencies +from switch_model.hawaii.save_results import DispatchGenByFuel +import switch_model.hawaii.util as util demand_module = None # will be set via command-line options -import util -from util import get - -# patch Pyomo's solver to retrieve duals and reduced costs for MIPs from cplex lp solver -# (This could be made permanent in pyomo.solvers.plugins.solvers.CPLEX.create_command_line) -def new_create_command_line(*args, **kwargs): - # call original command - command = old_create_command_line(*args, **kwargs) - # alter script - if hasattr(command, 'script') and 'optimize\n' in command.script: - command.script = command.script.replace( - 'optimize\n', - 'optimize\nchange problem fix\noptimize\n' - # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 - # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 - ) - return command -from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL -old_create_command_line = CPLEXSHELL.create_command_line -CPLEXSHELL.create_command_line = new_create_command_line - - def define_arguments(argparser): argparser.add_argument("--dr-flat-pricing", action='store_true', default=False, help="Charge a constant (average) price for electricity, rather than varying hour by hour") @@ -86,7 +66,7 @@ def define_components(m): .format(mod=m.options.dr_demand_module) ) demand_module = sys.modules[m.options.dr_demand_module] - + # load scipy.optimize for use later try: global scipy @@ -97,13 +77,13 @@ def define_components(m): print "Please install this via 'conda install scipy' or 'pip install scipy'." print "="*80 raise - + # Make sure the model has dual and rc suffixes if not hasattr(m, "dual"): m.dual = Suffix(direction=Suffix.IMPORT) if not hasattr(m, "rc"): m.rc = Suffix(direction=Suffix.IMPORT) - + ################### # Unserved load, with a penalty. # to ensure the model is always feasible, no matter what demand bids we get @@ -128,7 +108,7 @@ def define_components(m): ################### # Price Responsive Demand bids ################## - + # list of all bids that have been received from the demand system m.DR_BID_LIST = Set(initialize = [], ordered=True) # we need an explicit indexing set for everything that depends on DR_BID_LIST @@ -136,9 +116,9 @@ def define_components(m): # (not needed, and actually doesn't work -- reconstruct() fails for sets) # m.DR_BIDS_LZ_TP = Set(initialize = lambda m: m.DR_BID_LIST * m.LOAD_ZONES * m.TIMEPOINTS) # m.DR_BIDS_LZ_TS = Set(initialize = lambda m: m.DR_BID_LIST * m.LOAD_ZONES * m.TIMESERIES) - + # data for the individual bids; each load_zone gets one bid for each timeseries, - # and each bid covers all the timepoints in that timeseries. So we just record + # and each bid covers all the timepoints in that timeseries. So we just record # the bid for each timepoint for each load_zone. m.dr_bid = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, m.DR_PRODUCTS, mutable=True) @@ -150,7 +130,7 @@ def define_components(m): # weights to assign to the bids for each timeseries when constructing an optimal demand profile m.DRBidWeight = Var(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals) - + # def DR_Convex_Bid_Weight_rule(m, z, ts): # if len(m.DR_BID_LIST) == 0: # print "no items in m.DR_BID_LIST, skipping DR_Convex_Bid_Weight constraint" @@ -158,13 +138,13 @@ def define_components(m): # else: # print "constructing DR_Convex_Bid_Weight constraint" # return (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) - # + # # choose a convex combination of bids for each zone and timeseries m.DR_Convex_Bid_Weight = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - Constraint.Skip if len(m.DR_BID_LIST) == 0 + Constraint.Skip if len(m.DR_BID_LIST) == 0 else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) ) - + # Since we don't have differentiated prices for each zone, we have to use the same # weights for all zones. (Otherwise the model will try to micromanage load in each # zone, but that won't be reflected in the prices we report.) @@ -185,10 +165,10 @@ def define_components(m): m.DRBidWeight[b, z, ts] == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]] ) - - + + # Optimal level of demand, calculated from available bids (negative, indicating consumption) - m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, tp: sum( m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy'] for b in m.DR_BID_LIST @@ -197,13 +177,13 @@ def define_components(m): # provide up and down reserves (from supply perspective, so "up" means less load) # note: the bids are negative quantities, indicating _production_ of reserves; # they contribute to the reserve requirement with opposite sign - m.DemandUpReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.DemandUpReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, tp: -sum( m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy up'] for b in m.DR_BID_LIST ) ) - m.DemandDownReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.DemandDownReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, tp: -sum( m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy down'] for b in m.DR_BID_LIST @@ -236,13 +216,13 @@ def define_components(m): idx = m.Zone_Power_Withdrawals.index('zone_demand_mw') m.Zone_Power_Withdrawals[idx] = 'FlexibleDemand' - # private benefit of the electricity consumption + # private benefit of the electricity consumption # (i.e., willingness to pay for the current electricity supply) # reported as negative cost, i.e., positive benefit # also divide by number of timepoints in the timeseries # to convert from a cost per timeseries to a cost per timepoint. m.DR_Welfare_Cost = Expression(m.TIMEPOINTS, rule=lambda m, tp: - (-1.0) + (-1.0) * sum(m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] for b in m.DR_BID_LIST for z in m.LOAD_ZONES) * m.tp_duration_hrs[tp] / m.ts_num_tps[m.tp_ts[tp]] @@ -254,23 +234,66 @@ def define_components(m): # variable to store the baseline data m.base_data = None + # # TODO: create a data file that lists which timepoints are grouped into each flat + # # pricing block; also enforce a requirement that no block can span periods. + # # Then use that to choose flat prices for each block in each period when flat pricing + # # is turned on (or maybe only when TOU block pricing is turned on). + # # Price must be flat within each block, and total revenue across all blocks in each + # # period must equal total marginal cost for those loads. + # + # # Hours during each day that fall into each flat-pricing block (zero-based). + # # Note: this does not allow for blocks shorter than one hour, and if timepoints + # # are longer than one hour, they will be placed in the first matching hour. + # m.FLAT_PRICING_BLOCKS = Set() + # raise NotImplementedError("The line above just contained `Set(` until 6/27/18; something is missing here.") + # + # # Times during each day to switch from one flat-pricing block to another; should be a float + # # between 0 (midnight) and 24 (following midnight). Timepoints will be assigned to + # # the immediately preceding block. Default is 0 (single block all day). + # # This assumes that timepoints begin at midnight each day and are sequenced + # # from there. + # m.FLAT_PRICING_BREAK_TIMES = Set(default=[0]) + # m.FLAT_PRICING_GROUPS = Set(initialize=m.PERIODS * m.FLAT_PRICING_START_TIMES) + # def rule(m, p, st): + # try: + # d = m.TPS_FOR_FLAT_PRICING_GROUP_dict + # except AttributeError: + # d = m.TPS_FOR_FLAT_PRICING_GROUP_dict = dict() + # # construct a dictionary of which timepoints fall in each block + # # tuples show starting time and + # sorted(range(len(seq)), key=seq.__getitem__) + # start_times = sorted(m.FLAT_PRICING_START_TIMES) + # cur_start = xxx + # raise NotImplementedError("The line above just contained `cur_start =` until 6/27/18; something is missing here.") + # + # start_time_tuples = [(s, 0) for s in m.FLAT_PRICING_START_TIMES] + # for ts in m.TIMESERIES: + # timepoint_tuples = [(i * m.ts_duration_of_tp[ts], tp) for i, tp in enumerate(m.TS_TPS[ts])] + # + # return d.pop(p, st) + # + # m.TPS_FOR_FLAT_PRICING_GROUP = Set(m.FLAT_PRICING_GROUPS, initialize=rule) + # + # m.tp_flat_pricing_block = Param(m.TIMEPOINTS, within=m.FLAT_PRICING_START_TIMES, initialize=rule) + + def pre_iterate(m): # could all prev values be stored in post_iterate? # then this func would just alter the model based on values calculated in post_iterate # (e.g., get a bid based on current prices, add bid to model, rebuild components) - + # NOTE: - # bids must be added to the model here, and the model must be reconstructed here, + # bids must be added to the model here, and the model must be reconstructed here, # so the model can then be solved and remain in a "solved" state through the end # of post-iterate, to avoid problems in final reporting. - + # store various properties from previous model solution for later reference if m.iteration_number == 0: # model hasn't been solved yet m.prev_marginal_cost = { (z, tp, prod): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS - } + } m.prev_demand = { (z, tp, prod): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS } @@ -294,7 +317,7 @@ def pre_iterate(m): # solution based on the prior round of bids, rather than comparing the new # bid to the prior solution to the master problem. This is probably fine. # TODO: does this correctly account for producer surplus? It seems like that's - # being treated as a cost (embedded in MC * demand); maybe this should use + # being treated as a cost (embedded in MC * demand); maybe this should use # total direct cost instead, # or focus specifically on consumer surplus (use prices instead of MC as the # convergence measure). But maybe this is OK, since the question is, "if we @@ -302,7 +325,7 @@ def pre_iterate(m): # we had then? no change for altered volume?), would everyone be much # better off than they are with the allocation we have now chosen?" # Maybe using MC lets us focus on whether there can be another incrementally - # different solution that would be much better than the one we have now. + # different solution that would be much better than the one we have now. # This ignores other solutions far away, where an integer variable is flipped, # but that's OK. (?) prev_direct_cost = value(sum( @@ -339,7 +362,7 @@ def pre_iterate(m): print 'previous direct cost: ${:,.0f}'.format(prev_direct_cost) print 'previous welfare cost: ${:,.0f}'.format(prev_welfare_cost) print "" - + # get the next bid and attach it to the model update_demand(m) @@ -387,7 +410,7 @@ def pre_iterate(m): print 'best direct cost: ${:,.0f}'.format(best_direct_cost) print 'best bid benefit: ${:,.0f}'.format(best_bid_benefit) print "" - + print "lower bound=${:,.0f}, previous cost=${:,.0f}, optimality gap (vs direct cost)={}" \ .format(best_cost, prev_cost, (prev_cost-best_cost)/abs(prev_direct_cost)) if prev_cost < best_cost: @@ -400,46 +423,46 @@ def pre_iterate(m): # basis for optimality test: # 1. The total cost of supply, as a function of quantity produced each hour, forms - # a surface which is convex downward, since it is linear (assuming all variables are - # continuous or all integer variables are kept at their current level, i.e., the curve - # is locally convex). (Think of the convex hull of the extreme points of the production + # a surface which is convex downward, since it is linear (assuming all variables are + # continuous or all integer variables are kept at their current level, i.e., the curve + # is locally convex). (Think of the convex hull of the extreme points of the production # cost function.) - # 2. The total benefit of consumption, as a function of quantity consumed each hour, + # 2. The total benefit of consumption, as a function of quantity consumed each hour, # forms a surface which is concave downward (by the assumption/requirement of convexity # of the demand function). # 3. marginal costs (prev_marginal_cost) and production levels (pref_demand) from the - # most recent solution to the master problem define a production cost plane which is - # tangent to the production cost function at that production level. From 1, the production + # most recent solution to the master problem define a production cost plane which is + # tangent to the production cost function at that production level. From 1, the production # cost function must lie on or above this surface everywhere. This plane is given by # (something + prev_marginal_cost * (demand - dr_bid)) - # 4. The last bid quantities (dr_bid) must be at a point where marginal benefit of consumption - # equals marginal cost of consumption (prev_marginal_cost) in all directions; otherwise - # they would not be a private optimum. - # 5. The benefit reported in the last bid (dr_bid_benefit) shows the level of the total + # 4. The last bid quantities (dr_bid) must be at a point where marginal benefit of consumption + # equals marginal cost of consumption (prev_marginal_cost) in all directions; otherwise + # they would not be a private optimum. + # 5. The benefit reported in the last bid (dr_bid_benefit) shows the level of the total # benefit curve at that point. # 6. From 2, 4 and 5, the prev_marginal_cost and the last reported benefit must form # a plane which is at or above the total benefit curve everywhere. This plane is given by # (-DR_Welfare_Cost - (prev_marginal_cost * (demand - prev_demand) + something)) # 7. Since the total cost curve must lie above the plane defined in 3. and the total - # benefit curve must lie below the plane defined in 6., the (constant) distance between + # benefit curve must lie below the plane defined in 6., the (constant) distance between # these planes is an upper bound on the net benefit that can be obtained. This is given by # (-DR_Welfare_Cost - prev_marginal_cost * (demand - prev_demand)) # - (prev_marginal_cost * (demand - dr_bid)) # = ... - + # (prev_marginal_cost * (demand - dr_bid)) # - (prev_marginal_cost * (demand - prev_demand) ) - # - - # = prev_marginal_cost * prev_demand + DR_Welfare_Cost + # - + # = prev_marginal_cost * prev_demand + DR_Welfare_Cost # - (prev_marginal_cost * dr_bid - dr_bid_benefit) - - # Check for convergence -- optimality gap is less than 0.1% of best possible cost + + # Check for convergence -- optimality gap is less than 0.1% of best possible cost # (which may be negative) # TODO: index this to the direct costs, rather than the direct costs minus benefits - # as it stands, it converges with about $50,000,000 optimality gap, which is about + # as it stands, it converges with about $50,000,000 optimality gap, which is about # 3% of direct costs. converged = (m.iteration_number > 0 and (prev_cost - best_cost)/abs(prev_direct_cost) <= 0.0001) - + return converged def post_iterate(m): @@ -449,7 +472,7 @@ def post_iterate(m): print "Total cost: ${v:,.0f}".format(v=value(m.SystemCost)) - # TODO: + # TODO: # maybe calculate prices for the next round here and attach them to the # model, so they can be reported as final prices (currently we don't # report the final prices, only the prices prior to the final model run) @@ -467,10 +490,10 @@ def post_iterate(m): # report information on most recent bid if m.iteration_number == 0: util.create_table( - output_file=os.path.join(outputs_dir, "bid_{t}.tsv".format(t=tag)), - headings= + output_file=os.path.join(outputs_dir, "bid_{t}.tsv".format(t=tag)), + headings= ( - "bid_num", "load_zone", "timeseries", "timepoint", + "bid_num", "load_zone", "timeseries", "timepoint", ) + tuple("marginal_cost " + prod for prod in m.DR_PRODUCTS) + tuple("price " + prod for prod in m.DR_PRODUCTS) + tuple("bid " + prod for prod in m.DR_PRODUCTS) @@ -481,14 +504,14 @@ def post_iterate(m): b = m.DR_BID_LIST.last() # current bid util.append_table( m, m.LOAD_ZONES, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "bid_{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "bid_{t}.tsv".format(t=tag)), values=lambda m, z, tp: ( b, z, m.tp_ts[tp], m.tp_timestamp[tp], - ) + ) + tuple(m.prev_marginal_cost[z, tp, prod] for prod in m.DR_PRODUCTS) + tuple(m.dr_price[b, z, tp, prod] for prod in m.DR_PRODUCTS) + tuple(m.dr_bid[b, z, tp, prod] for prod in m.DR_PRODUCTS) @@ -502,25 +525,25 @@ def post_iterate(m): # store the current bid weights for future reference if m.iteration_number == 0: util.create_table( - output_file=os.path.join(outputs_dir, "bid_weights_{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "bid_weights_{t}.tsv".format(t=tag)), headings=("iteration", "load_zone", "timeseries", "bid_num", "weight") ) - util.append_table(m, m.LOAD_ZONES, m.TIMESERIES, m.DR_BID_LIST, - output_file=os.path.join(outputs_dir, "bid_weights_{t}.tsv".format(t=tag)), + util.append_table(m, m.LOAD_ZONES, m.TIMESERIES, m.DR_BID_LIST, + output_file=os.path.join(outputs_dir, "bid_weights_{t}.tsv".format(t=tag)), values=lambda m, z, ts, b: (len(m.DR_BID_LIST), z, ts, b, m.DRBidWeight[b, z, ts]) ) - + # if m.iteration_number % 5 == 0: # # save time by only writing results every 5 iterations # write_results(m) - + write_dual_costs(m) write_results(m) write_batch_results(m) # if m.iteration_number >= 3: # import pdb; pdb.set_trace() - + def update_demand(m): """ @@ -543,7 +566,7 @@ def update_demand(m): # get new bids from the demand system at the current prices bids = get_bids(m) - + # add the new bids to the model if m.options.verbose: print "adding bids to model" @@ -555,13 +578,13 @@ def update_demand(m): # for b in m.DR_BID_LIST # for z in m.LOAD_ZONES # for ts in [m.TIMESERIES.first()]]) - + # print "m.dr_bid (first day):" # print [(b, z, ts, value(m.dr_bid[b, z, ts])) # for b in m.DR_BID_LIST # for z in m.LOAD_ZONES # for ts in m.TPS_IN_TS[m.TIMESERIES.first()]] - + def total_direct_costs_per_year(m, period): """Return undiscounted total cost per year, during each period, as calculated by SWITCH, @@ -569,7 +592,7 @@ def total_direct_costs_per_year(m, period): This code comes from financials.calc_sys_costs_per_period(), excluding discounting and upscaling to the period. - + NOTE: ideally this would give costs by zone and period, to allow calibration for different utilities within a large study. But the cost components don't distinguish that way. (They probably should, since that would allow us to discuss average electricity costs @@ -583,7 +606,7 @@ def total_direct_costs_per_year(m, period): for tp_cost in m.Cost_Components_Per_TP if tp_cost != "DR_Welfare_Cost" ) - ) + ) def electricity_marginal_cost(m, z, tp, prod): """Return marginal cost of providing product prod in load_zone z during timepoint tp.""" @@ -596,7 +619,7 @@ def electricity_marginal_cost(m, z, tp, prod): else: raise ValueError('Unrecognized electricity product: {}.'.format(prod)) return m.dual[component]/m.bring_timepoint_costs_to_base_year[tp] - + def electricity_demand(m, z, tp, prod): """Return total consumption of product prod in load_zone z during timepoint tp (negative if customers supply product).""" if prod == 'energy': @@ -616,16 +639,16 @@ def electricity_demand(m, z, tp, prod): raise ValueError('Unrecognized electricity product: {}.'.format(prod)) return demand - + def calibrate_model(m): """ - Calibrate the demand system and add it to the model. + Calibrate the demand system and add it to the model. """ - + # base_data consists of a list of tuples showing (load_zone, timeseries, base_load (list) and base_price) # note: the constructor below assumes list comprehensions will preserve the order of the underlying list # (which is guaranteed according to http://stackoverflow.com/questions/1286167/is-the-order-of-results-coming-from-a-list-comprehension-guaranteed) - + # calculate the average-cost price for the current study period # TODO: store monthly retail prices in system_load, and find annual average prices # that correspond to the load forecasts for each period, then store scale factors @@ -633,23 +656,23 @@ def calibrate_model(m): # years (same technique as rescaling the loads, but only adjusting the mean), then # report base prices for each timepoint along with the loads in loads.tab. # For now, we just assume the base price was $180/MWh, which is HECO's average price in - # 2007 according to EIA form 826. + # 2007 according to EIA form 826. # TODO: add in something for the fixed costs, to make marginal cost commensurate with the base_price #baseCosts = [m.dual[m.EnergyBalance[z, tp]] for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] base_price = 180 # average retail price for 2007 ($/MWh) m.base_data = [( z, - ts, + ts, [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], [base_price] * len(m.TPS_IN_TS[ts]) ) for z in m.LOAD_ZONES for ts in m.TIMESERIES] - + # make a dict of base_data, indexed by load_zone and timepoint, for later reference m.base_data_dict = { (z, tp): (m.zone_demand_mw[z, tp], base_price) for z in m.LOAD_ZONES for tp in m.TIMEPOINTS } - + # calibrate the demand module demand_module.calibrate(m, m.base_data) @@ -665,9 +688,9 @@ def get_prices(m, flat_revenue_neutral=True): prod: ( [m.base_data_dict[z, tp][1] for tp in m.TPS_IN_TS[ts]] if prod == 'energy' else [0.0]*len(m.TPS_IN_TS[ts]) - ) - for prod in m.DR_PRODUCTS - } + ) + for prod in m.DR_PRODUCTS + } for z in m.LOAD_ZONES for ts in m.TIMESERIES } else: @@ -687,7 +710,7 @@ def get_prices(m, flat_revenue_neutral=True): prices = find_flat_prices(m, marginal_costs, flat_revenue_neutral) else: prices = marginal_costs - + return prices def get_bids(m): @@ -748,7 +771,7 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): # calc revenue balance for LSE (q*price - q.MC) # if > 0: decrease price (q will go up across the board) # if < 0: increase price (q will go down across the board) but - + flat_prices = dict() for z in m.LOAD_ZONES: for p in m.PERIODS: @@ -756,10 +779,10 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): sum( marginal_costs[z, ts]['energy'][i] * electricity_demand(m, z, tp, 'energy') - * m.tp_weight_in_year[tp] + * m.tp_weight_in_year[tp] for ts in m.TS_IN_PERIOD[p] for i, tp in enumerate(m.TPS_IN_TS[ts]) ) - / + / sum(electricity_demand(m, z, tp, 'energy') * m.tp_weight_in_year[tp] for tp in m.TPS_IN_PERIOD[p]) ) @@ -776,18 +799,18 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): # bought the final constructed quantity at the final # marginal cost flat_prices[z, p] = price_guess - + # construct a collection of flat prices with the right structure final_prices = { (z, ts): - { + { prod: [flat_prices[z, p] if prod=='energy' else 0.0] * len(m.TPS_IN_TS[ts]) for prod in m.DR_PRODUCTS } for z in m.LOAD_ZONES for p in m.PERIODS for ts in m.TS_IN_PERIOD[p] } return final_prices - + def revenue_imbalance(flat_price, m, load_zone, period, dynamic_prices): """find demand and revenue that would occur in this load_zone and period with flat prices, and @@ -815,16 +838,16 @@ def revenue_imbalance(flat_price, m, load_zone, period, dynamic_prices): imbalance = dynamic_price_revenue - flat_price_revenue print "{}, {}: price ${} produces revenue imbalance of ${}/year".format(load_zone, period, flat_price, imbalance) - + return imbalance def add_bids(m, bids): - """ + """ accept a list of bids written as tuples like (z, ts, prod, prices, demand, wtp) where z is the load zone, ts is the timeseries, prod is the product, - demand is a list of demand levels for the timepoints during that series (possibly negative, to sell), + demand is a list of demand levels for the timepoints during that series (possibly negative, to sell), and wtp is the net private benefit from consuming/selling the amount of power in that bid. Then add that set of bids to the model """ @@ -833,7 +856,7 @@ def add_bids(m, bids): b = 1 else: b = max(m.DR_BID_LIST) + 1 - + m.DR_BID_LIST.add(b) # add the bids for each load zone and timepoint to the dr_bid list @@ -859,11 +882,11 @@ def add_bids(m, bids): m.DemandUpReserves.reconstruct() m.DemandDownReserves.reconstruct() m.DR_Welfare_Cost.reconstruct() - # it seems like we have to reconstruct the higher-level components that depend on these + # it seems like we have to reconstruct the higher-level components that depend on these # ones (even though these are Expressions), because otherwise they refer to objects that - # used to be returned by the Expression but aren't any more (e.g., versions of DRBidWeight + # used to be returned by the Expression but aren't any more (e.g., versions of DRBidWeight # that no longer exist in the model). - # (i.e., Energy_Balance refers to the items returned by FlexibleDemand instead of referring + # (i.e., Energy_Balance refers to the items returned by FlexibleDemand instead of referring # to FlexibleDemand itself) m.Energy_Balance.reconstruct() if hasattr(m, 'SpinningReservesUpAvailable'): @@ -880,13 +903,13 @@ def reconstruct_energy_balance(m): # copy the existing Energy_Balance object old_Energy_Balance = dict(m.Energy_Balance) m.Energy_Balance.reconstruct() - # TODO: now that this happens just before a solve, there may be no need to + # TODO: now that this happens just before a solve, there may be no need to # preserve duals across the reconstruct(). if m.iteration_number > 0: for k in old_Energy_Balance: # change dual entries to match new Energy_Balance objects m.dual[m.Energy_Balance[k]] = m.dual.pop(old_Energy_Balance[k]) - + def write_batch_results(m): # append results to the batch results file, creating it if needed @@ -894,12 +917,12 @@ def write_batch_results(m): # create a file to hold batch results if it doesn't already exist # note: we retain this file across scenarios so it can summarize all results, - # but this means it needs to be manually cleared before launching a new + # but this means it needs to be manually cleared before launching a new # batch of scenarios (e.g., when running get_scenario_data or clearing the # scenario_queue directory) if not os.path.isfile(output_file): util.create_table(output_file=output_file, headings=summary_headers(m)) - + util.append_table(m, output_file=output_file, values=lambda m: summary_values(m)) def summary_headers(m): @@ -910,34 +933,34 @@ def summary_headers(m): +tuple(prod + ' payment ' + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) +tuple(prod + ' sold ' + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) ) - + def summary_values(m): demand_components = [ c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs', 'FlexibleDemand') if hasattr(m, c) ] values = [] - + # tag (configuration) values.extend([ m.options.scenario_name, m.iteration_number, m.SystemCost # total cost (all periods) ]) - + # direct costs (including "other") values.extend([total_direct_costs_per_year(m, p) for p in m.PERIODS]) - + # DR_Welfare_Cost values.extend([ sum(m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p]) for p in m.PERIODS ]) - + # payments by customers ([expected demand] * [gice offered for that demand]) # note: this uses the final MC to set the final price, rather than using the # final price offered to customers. This creates consistency between the final # quantities and prices. Otherwise, we would use prices that differ from the - # final cost by some random amount, and the split between PS and CS would + # final cost by some random amount, and the split between PS and CS would # jump around randomly. # note: if switching to using the offered prices, then you may have to use None # as the customer payment during iteration 0, since m.dr_price[last_bid, z, tp, prod] @@ -967,13 +990,19 @@ def summary_values(m): return values +def get(component, idx, default): + try: + return component[idx] + except KeyError: + return default + def write_results(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) - + avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) last_bid = m.DR_BID_LIST.last() - + # get final prices that will be charged to customers (not necessarily # the same as the final prices they were offered, if iteration was # stopped before complete convergence) @@ -1022,10 +1051,10 @@ def write_results(m): # for tp in m.TIMEPOINTS # for prod in m.DR_PRODUCTS # } - + util.write_table( m, m.LOAD_ZONES, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), headings= ("load_zone", "period", "timepoint_label") +tuple(m.FUELS) @@ -1039,8 +1068,8 @@ def write_results(m): +tuple("final price "+prod for prod in m.DR_PRODUCTS) +tuple("final q "+prod for prod in m.DR_PRODUCTS) +("peak_day", "base_load", "base_price"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) + values=lambda m, z, t: + (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( sum(DispatchGenByFuel(m, p, t, f) for p in m.GENERATION_PROJECTS_BY_FUEL[f]) for f in m.FUELS @@ -1069,7 +1098,7 @@ def write_results(m): m.base_data_dict[z, t][1], ) ) - + # import pprint # b=[(g, pe, value(m.BuildGen[g, pe]), m.gen_tech[g], m.gen_overnight_cost[g, pe]) for (g, pe) in m.BuildGen if value(m.BuildGen[g, pe]) > 0] # bt=set(x[3] for x in b) # technologies @@ -1096,7 +1125,7 @@ def write_dual_costs(m): dual_data = [] start_time = time.time() print "Writing {} ... ".format(outfile), - + def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): if const in duals: dual = duals[const] diff --git a/switch_model/hawaii/constant_elasticity_demand_system.py b/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py similarity index 100% rename from switch_model/hawaii/constant_elasticity_demand_system.py rename to switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py diff --git a/switch_model/hawaii/r_demand_system.py b/switch_model/balancing/demand_response/iterative/r_demand_system.py similarity index 100% rename from switch_model/hawaii/r_demand_system.py rename to switch_model/balancing/demand_response/iterative/r_demand_system.py diff --git a/switch_model/balancing/demand_response/simple.py b/switch_model/balancing/demand_response/simple.py index b9ae78bb7..1595df72c 100644 --- a/switch_model/balancing/demand_response/simple.py +++ b/switch_model/balancing/demand_response/simple.py @@ -71,9 +71,9 @@ def define_components(mod): rule=lambda m, z, ts: sum(m.ShiftDemand[z, t] for t in m.TPS_IN_TS[ts]) == 0.0) - if 'Distributed_Power_Withdrawals' in dir(mod): + try: mod.Distributed_Power_Withdrawals.append('ShiftDemand') - else: + except AttributeError: mod.Zone_Power_Withdrawals.append('ShiftDemand') diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py new file mode 100644 index 000000000..c2c13c0a6 --- /dev/null +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -0,0 +1,633 @@ +# Copyright (c) 2015-2017 The Switch Authors. All rights reserved. +# Licensed under the Apache License, Version 2.0, which is in the LICENSE file. +""" +This is an advanced version of the basic spinning_reserves reserves module, and +can be used in place of it (not in addition to). +""" +import os +from collections import defaultdict +from pyomo.environ import * +from switch_model.utilities import iteritems + + +dependencies = ( + 'switch_model.timescales', + 'switch_model.balancing.load_zones', + 'switch_model.balancing.operating_reserves.areas', + 'switch_model.financials', + 'switch_model.energy_sources.properties', + 'switch_model.generators.core.build', + 'switch_model.generators.core.dispatch', + 'switch_model.generators.core.commit.operate', +) + + +def define_arguments(argparser): + group = argparser.add_argument_group(__name__) + group.add_argument('--unit-contingency', default=False, action='store_true', + help=("This will enable an n-1 contingency based on a single unit of " + "a generation project falling offline. Note: This create a new " + "binary variable for each timepoint for each generation project " + "that has a gen_unit_size specified.") + ) + group.add_argument('--project-contingency', default=False, action='store_true', + help=("This will enable an n-1 contingency based on the entire " + "committed capacity of a generation project falling offline. " + "Unlike unit contingencies, this is a purely linear expression.") + ) + group.add_argument('--fixed-contingency', type=float, default=0.0, + help=("Add a fixed generator contingency reserve margin, specified in MW. " + "This can be used alone or in combination with the other " + "contingency options.") + ) + group.add_argument('--spinning-requirement-rule', default=None, + choices = ["Hawaii", "3+5", "none"], + help=("Choose rules for spinning reserves requirements as a function " + "of variable renewable power and load. Hawaii uses rules " + "bootstrapped from the GE RPS study, and '3+5' requires 3%% of " + "load and 5%% of variable renewable output, based on the heuristic " + "described in the 2010 Western Wind and Solar Integration Study. " + "Specify 'none' if applying your own rules instead. " + ) + ) + # TODO: define these inputs in data files + group.add_argument( + '--contingency-reserve-type', dest='contingency_reserve_type', + default='spinning', + help= + "Type of reserves to use to meet the contingency reserve requirements " + "defined for generation projects and sometimes for loss-of-load events " + "(e.g., 'contingency' or 'spinning'); default is 'spinning'." + ) + group.add_argument( + '--regulating-reserve-type', dest='regulating_reserve_type', + default='spinning', + help= + "Type of reserves to use to meet the regulating reserve requirements " + "defined by the spinning requirements rule (e.g., 'spinning' or " + "'regulation'); default is 'spinning'." + ) + + + + +def define_dynamic_lists(m): + """ + Spinning_Reserve_Requirements and Spinning_Reserve_Provisions are + dicts of lists of components that contribute to the requirement or provision + of spinning reserves. Entries in each dict are indexed by reserve + product. In the simple scenario, you may only have a single product called + 'spinning'. In other scenarios where some generators are limited in what + kind of reserves they can provide, you may have "regulation" and + "contingency" reserve products. + The dicts are setup as defaultdicts, so they will automatically + return an empty list if nothing has been added for a particular + type of reserves. + + Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements + list model components that increase reserve requirements in each balancing + area and timepoint. + + Spinning_Reserve_Up_Provisions and Spinning_Reserve_Down_Provisions list + model components that help satisfy spinning reserve requirements in + each balancing area and timepoint. + + Spinning_Reserve_Up_Contingencies and Spinning_Reserve_Down_Contingencies + list model components describing maximum contingency events. Elements of + this list are summarized into a MaximumContingency variable that is added + to the Spinning_Reserve_Requirements['contingency'] list. + + Each component in the Requirements and Provisions lists needs to use units + of MW and be indexed by reserve type, balancing area and timepoint. Missing + entries will be treated as zero (no reserves required or no reserves available). + + Each component in the Contingencies list should be in MW and indexed by + (ba, tp) in BALANCING_AREA_TIMEPOINTS. + """ + m.Spinning_Reserve_Up_Requirements = [] + m.Spinning_Reserve_Down_Requirements = [] + m.Spinning_Reserve_Up_Provisions = [] + m.Spinning_Reserve_Down_Provisions = [] + + m.Spinning_Reserve_Up_Contingencies = [] + m.Spinning_Reserve_Down_Contingencies = [] + + +def gen_fixed_contingency(m): + """ + Add a fixed contingency reserve margin (much faster than unit-by-unit + reserve margins, and reasonable when there is a single largest plant + that is usually online and/or reserves are cheap). + """ + m.GenFixedContingency = Param( + m.BALANCING_AREA_TIMEPOINTS, + initialize=lambda m: m.options.fixed_contingency + ) + m.Spinning_Reserve_Up_Contingencies.append('GenFixedContingency') + +def gen_unit_contingency(m): + """ + Add components for unit-level contingencies. A generation project can + include one or more discretely sized generation units. This will model + contingencies of individual generation units that have discrete sizes + specified. Caution, this adds binary variables to the model for every + GEN_TPS for DISCRETELY_SIZED_GENS. This many binary variables can impact + runtime. + + UNIT_CONTINGENCY_DISPATCH_POINTS is a subset of GEN_TPS for + DISCRETELY_SIZED_GENS + + GenIsCommitted[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] is a binary + variable that tracks whether generation projects at least one units + committed. + + Enforce_GenIsCommitted[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] is a + constraint that enforces the tracking behavior of GenIsCommitted. + + GenUnitLargestContingency[(b,t) in BALANCING_AREA_TIMEPOINTS] is a + variable that tracks the size of the largest contingency in each balancing + area, accounting for all of the discretely sized units that are currently + committed. This is added to the dynamic list Spinning_Reserve_Contingencies. + + Enforce_GenUnitLargestContingency[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] + is a constraint that enforces the behavior of GenUnitLargestContingency, + by making GenUnitLargestContingency >= the capacity of each of the + committed units in its balancing area. + + """ + # UNIT_CONTINGENCY_DISPATCH_POINTS duplicates + # DISCRETE_GEN_TPS from generators.core.commit.discrete. I + # justify the duplication because I don't think discrete unit commitment + # should be a prerequisite for this functionality. + m.UNIT_CONTINGENCY_DISPATCH_POINTS = Set( + dimen=2, + initialize=lambda m: + [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] + ) + m.GenIsCommitted = Var( + m.UNIT_CONTINGENCY_DISPATCH_POINTS, + within=Binary, + doc="Stores the status of unit committment as a binary variable." + ) + m.Enforce_GenIsCommitted = Constraint( + m.UNIT_CONTINGENCY_DISPATCH_POINTS, + rule=lambda m, g, tp: + m.CommitGen[g, tp] <= m.GenIsCommitted[g, tp] * ( + m._gen_max_cap_for_binary_constraints + if g not in m.CAPACITY_LIMITED_GENS + else m.gen_capacity_limit_mw[g] + ) + ) + # TODO: would it be faster to add all generator contingencies directly + # to Spinning_Reserve_Contingencies instead of introducing this intermediate + # variable and constraint? + m.GenUnitLargestContingency = Var( + m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, + doc="Largest generating unit that could drop offline.") + def Enforce_GenUnitLargestContingency_rule(m, g, t): + b = m.zone_balancing_area[m.gen_load_zone[g]] + return (m.GenUnitLargestContingency[b,t] >= + m.GenIsCommitted[g, t] * m.gen_unit_size[g]) + m.Enforce_GenUnitLargestContingency = Constraint( + m.UNIT_CONTINGENCY_DISPATCH_POINTS, + rule=Enforce_GenUnitLargestContingency_rule, + doc=("Force GenUnitLargestContingency to be at least as big as the " + "maximum unit contingency.") + ) + m.Spinning_Reserve_Up_Contingencies.append('GenUnitLargestContingency') + + +def gen_project_contingency(m): + """ + Add components for project-level contingencies based on committed capacity. + A generation project can include one or more discretely sized generation + units. This will model contingencies of entire generation projects - + basically entire plants tripping offline, rather than individual + generation units in a plan tripping offline. + + GenProjectLargestContingency[(b,t) in BALANCING_AREA_TIMEPOINTS] is a + variable that tracks the size of the largest contingency in each balancing + area, accounting for all of the capacity that is committed. This is + added to the dynamic list Spinning_Reserve_Contingencies. + + Enforce_GenProjectLargestContingency[(g,t) in GEN_TPS] is a constraint + that enforces the behavior of GenProjectLargestContingency by making + GenProjectLargestContingency >= DispatchGen + for each generation project in a balancing area. If a generation project + is capable of providing upward reserves, then CommitGenSpinningReservesUp + is added to the right hand side. + + """ + m.GenProjectLargestContingency = Var( + m.BALANCING_AREA_TIMEPOINTS, + doc="Largest generating project that could drop offline.") + def Enforce_GenProjectLargestContingency_rule(m, g, t): + b = m.zone_balancing_area[m.gen_load_zone[g]] + if g in m.SPINNING_RESERVE_CAPABLE_GENS: + total_up_reserves = sum( + m.CommitGenSpinningReservesUp[rt, g, t] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) + return m.GenProjectLargestContingency[b, t] >= \ + m.DispatchGen[g, t] + total_up_reserves + else: + return m.GenProjectLargestContingency[b, t] >= m.DispatchGen[g, t] + m.Enforce_GenProjectLargestContingency = Constraint( + m.GEN_TPS, + rule=Enforce_GenProjectLargestContingency_rule, + doc=("Force GenProjectLargestContingency to be at least as big as the " + "maximum generation project contingency.") + ) + m.Spinning_Reserve_Up_Contingencies.append('GenProjectLargestContingency') + +def hawaii_spinning_reserve_requirements(m): + # These parameters were found by regressing the reserve requirements from + # the GE RPS Study against wind and solar conditions each hour (see + # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/source_data/ + # reserve_requirements_oahu_scenarios charts.xlsx and + # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/ + # fit_renewable_reserves.ipynb ) + # TODO: supply all the parameters for this function in input files. + + # Calculate and register regulating reserve requirements + # (currently only considers variable generation, only underforecasting) + # (could eventually use some linearized quadratic formulation based + # on load, magnitude of renewables and geographic dispersion of renewables) + m.var_gen_power_reserve = Param( + m.VARIABLE_GENS, default=1.0, + doc=("Spinning reserves required to back up variable renewable " + "generators, as fraction of potential output.") + ) + def var_gen_cap_reserve_limit_default(m, g): + if m.gen_energy_source[g] == 'SUN': + return 0.21288916 + elif m.gen_energy_source[g] == 'WND': + return 0.21624407 + else: + raise ValueError( + "Unable to calculate reserve requirement for energy source {}".format(m.gen_energy_source[g]) + ) + m.var_gen_cap_reserve_limit = Param( + m.VARIABLE_GENS, + default=var_gen_cap_reserve_limit_default, + doc="Maximum spinning reserves required, as fraction of installed capacity" + ) + m.HawaiiVarGenUpSpinningReserveRequirement = Expression( + [m.options.regulating_reserve_type], + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, b, t: sum( + m.GenCapacityInTP[g, t] + * min( + m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], + m.var_gen_cap_reserve_limit[g] + ) + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.VARIABLE_GENS_IN_ZONE[z] + if (g, t) in m.VARIABLE_GEN_TPS), + doc="The spinning reserves for backing up variable generation with Hawaii rules." + ) + m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') + + # Calculate and register loss-of-load (down) contingencies + if hasattr(m, 'WithdrawFromCentralGrid'): + rule = lambda m, ba, tp: 0.10 * sum( + m.WithdrawFromCentralGrid[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] + ) + else: + # TODO: include effect of demand response here + rule = lambda m, ba, tp: 0.10 * sum( + m.zone_demand_mw[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] + ) + m.HawaiiLoadDownContingency = Expression( + m.BALANCING_AREA_TIMEPOINTS, rule=rule + ) + m.Spinning_Reserve_Down_Contingencies.append('HawaiiLoadDownContingency') + + +def nrel_3_5_spinning_reserve_requirements(m): + """ + NREL35VarGenSpinningReserveRequirement[(b,t) in BALANCING_AREA_TIMEPOINTS] + is an expression for upward and downward spinning reserve requirements of + 3% of load plus 5% of renewable output, based on a heuristic described in + NREL's 2010 Western Wind and Solar Integration study. It is added to the + Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements + lists. If the local_td module is available with DER accounting, load will + be set to WithdrawFromCentralGrid. Otherwise load will be set to + zone_demand_mw. + """ + def NREL35VarGenSpinningReserveRequirement_rule(m, rt, b, t): + try: + load = m.WithdrawFromCentralGrid + except AttributeError: + load = m.zone_demand_mw + return ( + 0.03 * sum(load[z, t] + for z in m.LOAD_ZONES + if b == m.zone_balancing_area[z]) + + 0.05 * sum(m.DispatchGen[g, t] + for g in m.VARIABLE_GENS + if (g, t) in m.VARIABLE_GEN_TPS and + b == m.zone_balancing_area[m.gen_load_zone[g]])) + m.NREL35VarGenSpinningReserveRequirement = Expression( + [m.options.regulating_reserve_type], + m.BALANCING_AREA_TIMEPOINTS, + rule=NREL35VarGenSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Up_Requirements.append('NREL35VarGenSpinningReserveRequirement') + m.Spinning_Reserve_Down_Requirements.append('NREL35VarGenSpinningReserveRequirement') + + +def define_components(m): + """ + contingency_safety_factor is a parameter that increases the contingency + requirements. This is defaults to 1.0. + + GEN_SPINNING_RESERVE_TYPES is a set of all allowed reserve types for each generation + project. This is read from generation_projects_reserve_availability.tab. + If that file doesn't exist, this defaults to GENERATION_PROJECTS x {"spinning"} + + gen_reserve_type_max_share specifies the maximum amount of committed + capacity that can be used to provide each type of reserves. It is indexed + by GEN_SPINNING_RESERVE_TYPES. This is read from generation_projects_reserve_availability.tab + and defaults to 1 if not specified. + + SPINNING_RESERVE_CAPABLE_GEN_TPS is a subset of GEN_TPS of generators that can + provide spinning reserves based on generation_projects_reserve_capability.tab. + + CommitGenSpinningReservesUp[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] is a + decision variable of how much upward spinning reserve capacity to commit + (in MW). + + CommitGenSpinningReservesDown[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] is a + corresponding variable for downward spinning reserves. + + CommitGenSpinningReservesUp_Limit[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] and + CommitGenSpinningReservesDown_Limit constraint the CommitGenSpinningReserves + variables based on DispatchSlackUp and DispatchSlackDown. + + CommittedSpinningReserveUp[(b,t) in BALANCING_AREA_TIMEPOINTS] and + CommittedSpinningReserveDown are expressions summarizing the + CommitGenSpinningReserves variables for generators within each balancing + area. + + Depending on the configuration parameters unit_contingency, + project_contingency and spinning_requirement_rule, other components may be + added by other functions which are documented above. + """ + m.contingency_safety_factor = Param(default=1.0, + doc=("The spinning reserve requiremet will be set to this value " + "times the maximum contingency. This defaults to 1 to provide " + "n-1 security for the largest committed generator. ")) + + m.GEN_SPINNING_RESERVE_TYPES = Set(dimen=2) + m.gen_reserve_type_max_share = Param( + m.GEN_SPINNING_RESERVE_TYPES, + within=PercentFraction, + default=1.0 + ) + + # reserve types that are supplied by generation projects + # and generation projects that can provide reserves + # note: these are also the indexing sets of the above set arrays; maybe that could be used? + m.SPINNING_RESERVE_TYPES_FROM_GENS = Set(initialize=lambda m: set(rt for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES)) + m.SPINNING_RESERVE_CAPABLE_GENS = Set(initialize=lambda m: set(g for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES)) + + # slice GEN_SPINNING_RESERVE_TYPES both ways for later use + def rule(m): + m.SPINNING_RESERVE_TYPES_FOR_GEN_dict = defaultdict(list) + m.GENS_FOR_SPINNING_RESERVE_TYPE_dict = defaultdict(list) + for g, rt in m.GEN_SPINNING_RESERVE_TYPES: + m.SPINNING_RESERVE_TYPES_FOR_GEN_dict[g].append(rt) + m.GENS_FOR_SPINNING_RESERVE_TYPE_dict[rt].append(g) + m.build_spinning_reserve_indexed_sets = BuildAction(rule=rule) + + m.SPINNING_RESERVE_TYPES_FOR_GEN = Set( + m.SPINNING_RESERVE_CAPABLE_GENS, + rule=lambda m, g: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict.pop(g) + ) + m.GENS_FOR_SPINNING_RESERVE_TYPE = Set( + m.SPINNING_RESERVE_TYPES_FROM_GENS, + rule=lambda m, rt: m.GENS_FOR_SPINNING_RESERVE_TYPE_dict.pop(rt) + ) + + # types, generators and timepoints when reserves could be supplied + m.SPINNING_RESERVE_TYPE_GEN_TPS = Set(dimen=3, initialize=lambda m: ( + (rt, g, tp) + for g, rt in m.GEN_SPINNING_RESERVE_TYPES + for tp in m.TPS_FOR_GEN[g] + )) + # generators and timepoints when reserves could be supplied + m.SPINNING_RESERVE_CAPABLE_GEN_TPS = Set(dimen=2, initialize=lambda m: ( + (g, tp) + for g in m.SPINNING_RESERVE_CAPABLE_GENS + for tp in m.TPS_FOR_GEN[g] + )) + + # decide how much of each type of reserves to produce from each generator + # during each timepoint + m.CommitGenSpinningReservesUp = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) + m.CommitGenSpinningReservesDown = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) + + # constrain reserve provision appropriately + m.CommitGenSpinningReservesUp_Limit = Constraint( + m.SPINNING_RESERVE_CAPABLE_GEN_TPS, + rule=lambda m, g, tp: + sum(m.CommitGenSpinningReservesUp[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) + <= + m.DispatchSlackUp[g, tp] + # storage can give more up response by stopping charging + + (m.ChargeStorage[g, tp] if g in getattr(m, 'STORAGE_GENS', []) else 0.0) + ) + m.CommitGenSpinningReservesDown_Limit = Constraint( + m.SPINNING_RESERVE_CAPABLE_GEN_TPS, + rule=lambda m, g, tp: + sum(m.CommitGenSpinningReservesDown[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) + <= + m.DispatchSlackDown[g, tp] + + ( # storage could give more down response by raising ChargeStorage to the maximum rate + (m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] - m.ChargeStorage[g, tp]) + if g in getattr(m, 'STORAGE_GENS', []) + else 0.0 + ) + ) + + # Calculate total spinning reserves from generation projects, + # and add to the list of reserve provisions + def rule(m): + d = m.TotalGenSpinningReserves_dict = defaultdict(float) + for g, rt in m.GEN_SPINNING_RESERVE_TYPES: + ba = m.zone_balancing_area[m.gen_load_zone[g]] + for tp in m.TPS_FOR_GEN[g]: + d[rt, 'up', ba, tp] += m.CommitGenSpinningReservesUp[rt, g, tp] + d[rt, 'down', ba, tp] += m.CommitGenSpinningReservesDown[rt, g, tp] + m.TotalGenSpinningReserves_aggregate = BuildAction(rule=rule) + + m.TotalGenSpinningReservesUp = Expression( + m.SPINNING_RESERVE_TYPES_FROM_GENS, + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: + m.TotalGenSpinningReserves_dict.pop((rt, 'up', ba, tp), 0.0) + ) + m.TotalGenSpinningReservesDown = Expression( + m.SPINNING_RESERVE_TYPES_FROM_GENS, + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: + m.TotalGenSpinningReserves_dict.pop((rt, 'down', ba, tp), 0.0) + ) + + m.Spinning_Reserve_Up_Provisions.append('TotalGenSpinningReservesUp') + m.Spinning_Reserve_Down_Provisions.append('TotalGenSpinningReservesDown') + + # define reserve requirements + if m.options.fixed_contingency: + gen_fixed_contingency(m) + if m.options.unit_contingency: + gen_unit_contingency(m) + if m.options.project_contingency: + gen_project_contingency(m) + if m.options.spinning_requirement_rule == 'Hawaii': + hawaii_spinning_reserve_requirements(m) + elif m.options.spinning_requirement_rule == '3+5': + nrel_3_5_spinning_reserve_requirements(m) + elif m.options.spinning_requirement_rule == 'none': + pass # users can turn off the rules and use their own instead + else: + raise ValueError('No --spinning-requirement-rule specified on command line; unable to allocate reserves.') + + +def define_dynamic_components(m): + """ + MaximumContingency[(b,t) in BALANCING_AREA_TIMEPOINTS] is a variable that + tracks the size of the largest contingency in each balancing area, + accounting for every contingency that has been registered with + Spinning_Reserve_Contingencies. + + BALANCING_AREA_TIMEPOINT_CONTINGENCIES is a set of (b, t, contingency) formed + from the cross product of the set BALANCING_AREA_TIMEPOINTS and the dynamic + list Spinning_Reserve_Contingencies. + + Enforce_MaximumContingency[(b,t,contingency) in BALANCING_AREA_TIMEPOINT_CONTINGENCIES] + is a constraint that enforces the behavior of MaximumContingency by making + MaximumContingency >= contingency for each contingency registered in the + dynamic list Spinning_Reserve_Contingencies. + + Satisfy_Spinning_Reserve_Up_Requirement[(b,t) in BALANCING_AREA_TIMEPOINTS] + is a constraint that ensures upward spinning reserve requirements are + being satisfied based on the sums of the two dynamic lists + Spinning_Reserve_Up_Provisions and Spinning_Reserve_Up_Requirements. + + Satisfy_Spinning_Reserve_Down_Requirement[(b,t) in BALANCING_AREA_TIMEPOINTS] + is a matching constraint that uses the downward reserve lists. + """ + + # TODO: add contingency down reserves (loss-of-load events) + + # define largest contingencies + m.MaximumContingencyUp = Var( + m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, + doc=("Maximum of the registered Spinning_Reserve_Up_Contingencies, after " + "multiplying by contingency_safety_factor.") + ) + m.MaximumContingencyDown = Var( + m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, + doc=("Maximum of the registered Spinning_Reserve_Down_Contingencies, after " + "multiplying by contingency_safety_factor.") + ) + m.Calculate_MaximumContingencyUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + m.Spinning_Reserve_Up_Contingencies, # list of contingency events + rule=lambda m, b, t, contingency: + m.MaximumContingencyUp[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + ) + m.Calculate_MaximumContingencyDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + m.Spinning_Reserve_Down_Contingencies, # list of contingency events + rule=lambda m, b, t, contingency: + m.MaximumContingencyDown[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + ) + + # create reserve requirements equal to the largest contingencies + # (these could eventually be region-specific) + m.MaximumContingencyUpRequirement = Expression( + [m.options.contingency_reserve_type], + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: m.MaximumContingencyUp[ba, tp] + ) + m.MaximumContingencyDownRequirement = Expression( + [m.options.contingency_reserve_type], + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: m.MaximumContingencyDown[ba, tp] + ) + + m.Spinning_Reserve_Up_Requirements.append('MaximumContingencyUpRequirement') + m.Spinning_Reserve_Down_Requirements.append('MaximumContingencyDownRequirement') + + # aggregate the requirements for each type of reserves during each timepoint + def rule(m): + def makedict(m, lst): + # lst is the name of a dynamic list from which to aggregate components + d = defaultdict(float) + for comp in getattr(m, lst): + for key, val in iteritems(getattr(m, comp)): + d[key] += val + setattr(m, lst + '_dict', d) + makedict(m, 'Spinning_Reserve_Up_Requirements') + makedict(m, 'Spinning_Reserve_Down_Requirements') + makedict(m, 'Spinning_Reserve_Up_Provisions') + makedict(m, 'Spinning_Reserve_Down_Provisions') + m.Aggregate_Spinning_Reserve_Details = BuildAction(rule=rule) + + m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS = Set( + dimen=3, + rule=lambda m: m.Spinning_Reserve_Up_Requirements_dict.keys() + ) + m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS = Set( + dimen=3, + rule=lambda m: m.Spinning_Reserve_Down_Requirements_dict.keys() + ) + + # satisfy all spinning reserve requirements + m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( + m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: + m.Spinning_Reserve_Up_Provisions_dict.pop((rt, ba, tp), 0.0) + >= + m.Spinning_Reserve_Up_Requirements_dict.pop((rt, ba, tp)) + ) + m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( + m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: + m.Spinning_Reserve_Down_Provisions_dict.pop((rt, ba, tp), 0.0) + >= + m.Spinning_Reserve_Down_Requirements_dict.pop((rt, ba, tp)) + ) + + +def load_inputs(m, switch_data, inputs_dir): + """ + All files & columns are optional. + + generation_projects_reserve_capability.tab + GENERATION_PROJECTS, RESERVE_TYPES, [gen_reserve_type_max_share] + + spinning_reserve_params.dat may override the default value of + contingency_safety_factor. Note that this is a .dat file, not a .tab file. + """ + path=os.path.join(inputs_dir, 'generation_projects_reserve_capability.tab') + switch_data.load_aug( + filename=path, + optional=True, + auto_select=True, + optional_params=['gen_reserve_type_max_share]'], + index=m.GEN_SPINNING_RESERVE_TYPES, + param=(m.gen_reserve_type_max_share) + ) + if not os.path.isfile(path): + gen_projects = switch_data.data()['GENERATION_PROJECTS'][None] + switch_data.data()['GEN_SPINNING_RESERVE_TYPES'] = {} + switch_data.data()['GEN_SPINNING_RESERVE_TYPES'][None] = \ + [(g, "spinning") for g in gen_projects] + + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'spinning_reserve_params.dat'), + optional=True, + ) diff --git a/switch_model/energy_sources/fuel_costs/markets.py b/switch_model/energy_sources/fuel_costs/markets.py index e2e2e44d8..4dc92a022 100644 --- a/switch_model/energy_sources/fuel_costs/markets.py +++ b/switch_model/energy_sources/fuel_costs/markets.py @@ -75,8 +75,8 @@ def define_components(mod): ConsumeFuelTier[rfm, period, tier] is a decision variable that denotes the amount of fuel consumed in each tier of a supply curve - in a particular regional fuel market and period. It has an upper bound - of rfm_supply_tier_limit. + in a particular regional fuel market and period (MMBtu/year). It + has an upper bound of rfm_supply_tier_limit. FuelConsumptionInMarket[rfm, period] is a derived decision variable specifying the total amount of fuel consumed in a regional fuel @@ -180,9 +180,9 @@ def define_components(mod): with the fuel are a much larger driver of consumption than the fuel costs. - GEN_TPS_FOR_RFM_PERIOD[regional_fuel_market, period] is an indexed set - of GEN_TP_FUELS that contribute to a given regional - fuel market's activity in a given period. + GENS_FOR_RFM_PERIOD[regional_fuel_market, period] is an indexed set + of GENS that contribute to a given regional fuel market's activity + in a given period. Enforce_Fuel_Consumption is a constraint that ties the aggregate fuel consumption from dispatch into FuelConsumptionInMarket variable @@ -291,19 +291,36 @@ def rfm_annual_costs(m, rfm, p): # Components to link aggregate fuel consumption from project # dispatch into market framework - mod.GEN_TPS_FOR_RFM_PERIOD = Set( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, - within=mod.GEN_TP_FUELS, - initialize=lambda m, rfm, p: [ - (g, t, f) for (g, t, f) in m.GEN_TP_FUELS - if f == m.rfm_fuel[rfm] and - m.gen_load_zone[g] in m.ZONES_IN_RFM[rfm] and - m.tp_period[t] == p]) + def GENS_FOR_RFM_PERIOD_rule(m, rfm, p): + # Construct and cache a set of gens for each zone/fuel/period, then + # return lists of gens for each rfm/period as needed + try: + d = m.GENS_FOR_RFM_PERIOD_dict + except AttributeError: + d = m.GENS_FOR_RFM_PERIOD_dict = dict() + # d uses (zone, fuel, period) as key; could use (rfm, period) as key + # if m.zone_fuel_rfm (back-lookup) existed + for g in m.FUEL_BASED_GENS: + for f in m.FUELS_FOR_GEN[g]: + for p_ in m.PERIODS_FOR_GEN[g]: + d.setdefault((m.gen_load_zone[g], f, p_), []).append(g) + relevant_gens = [ + g + for z in m.ZONES_IN_RFM[rfm] + for g in d.pop((z, m.rfm_fuel[rfm], p), []) # pop releases memory + ] + return relevant_gens + mod.GENS_FOR_RFM_PERIOD = Set( + mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, + initialize=GENS_FOR_RFM_PERIOD_rule + ) def Enforce_Fuel_Consumption_rule(m, rfm, p): return m.FuelConsumptionInMarket[rfm, p] == sum( - m.GenFuelUseRate[g, t, f] * m.tp_weight_in_year[t] - for (g, t, f) in m.GEN_TPS_FOR_RFM_PERIOD[rfm, p]) + m.GenFuelUseRate[g, t, m.rfm_fuel[rfm]] * m.tp_weight_in_year[t] + for g in m.GENS_FOR_RFM_PERIOD[rfm, p] + for t in m.TPS_IN_PERIOD[p] + ) mod.Enforce_Fuel_Consumption = Constraint( mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, rule=Enforce_Fuel_Consumption_rule) diff --git a/switch_model/generators/core/build.py b/switch_model/generators/core/build.py index e94dae5fa..11cf0e093 100644 --- a/switch_model/generators/core/build.py +++ b/switch_model/generators/core/build.py @@ -182,7 +182,7 @@ def define_components(mod): TODO: - Allow early capacity retirements with savings on fixed O&M - + """ mod.GENERATION_PROJECTS = Set() mod.gen_dbid = Param(mod.GENERATION_PROJECTS, default=lambda m, g: g) @@ -190,7 +190,7 @@ def define_components(mod): mod.GENERATION_TECHNOLOGIES = Set(initialize=lambda m: {m.gen_tech[g] for g in m.GENERATION_PROJECTS} ) - mod.gen_energy_source = Param(mod.GENERATION_PROJECTS, + mod.gen_energy_source = Param(mod.GENERATION_PROJECTS, validate=lambda m,val,g: val in m.ENERGY_SOURCES or val == "multiple") mod.gen_load_zone = Param(mod.GENERATION_PROJECTS, within=mod.LOAD_ZONES) mod.gen_max_age = Param(mod.GENERATION_PROJECTS, within=PositiveIntegers) @@ -203,9 +203,9 @@ def define_components(mod): mod.gen_forced_outage_rate = Param(mod.GENERATION_PROJECTS, within=PercentFraction, default=0) mod.min_data_check('GENERATION_PROJECTS', 'gen_tech', 'gen_energy_source', - 'gen_load_zone', 'gen_max_age', 'gen_is_variable', + 'gen_load_zone', 'gen_max_age', 'gen_is_variable', 'gen_is_baseload') - + mod.GENS_IN_ZONE = Set( mod.LOAD_ZONES, initialize=lambda m, z: set( @@ -213,10 +213,13 @@ def define_components(mod): mod.VARIABLE_GENS = Set( initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_is_variable[g]) + mod.VARIABLE_GENS_IN_ZONE = Set( + mod.LOAD_ZONES, + initialize=lambda m, z: [g for g in m.GENS_IN_ZONE[z] if m.gen_is_variable[g]]) mod.BASELOAD_GENS = Set( initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_is_baseload[g]) - + mod.CAPACITY_LIMITED_GENS = Set(within=mod.GENERATION_PROJECTS) mod.gen_capacity_limit_mw = Param( mod.CAPACITY_LIMITED_GENS, within=PositiveReals) @@ -228,11 +231,11 @@ def define_components(mod): mod.CCS_EQUIPPED_GENS, within=PercentFraction) mod.gen_ccs_energy_load = Param( mod.CCS_EQUIPPED_GENS, within=PercentFraction) - + mod.gen_uses_fuel = Param( mod.GENERATION_PROJECTS, initialize=lambda m, g: ( - m.gen_energy_source[g] in m.FUELS + m.gen_energy_source[g] in m.FUELS or m.gen_energy_source[g] == "multiple")) mod.NON_FUEL_BASED_GENS = Set( initialize=mod.GENERATION_PROJECTS, @@ -242,15 +245,15 @@ def define_components(mod): filter=lambda m, g: m.gen_uses_fuel[g]) mod.gen_full_load_heat_rate = Param( mod.FUEL_BASED_GENS, - within=PositiveReals) + within=NonNegativeReals) mod.MULTIFUEL_GENS = Set( initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_energy_source[g] == "multiple") mod.FUELS_FOR_MULTIFUEL_GEN = Set(mod.MULTIFUEL_GENS, within=mod.FUELS) - mod.FUELS_FOR_GEN = Set(mod.FUEL_BASED_GENS, + mod.FUELS_FOR_GEN = Set(mod.FUEL_BASED_GENS, initialize=lambda m, g: ( - m.FUELS_FOR_MULTIFUEL_GEN[g] - if g in m.MULTIFUEL_GENS + m.FUELS_FOR_MULTIFUEL_GEN[g] + if g in m.MULTIFUEL_GENS else [m.gen_energy_source[g]])) mod.PREDETERMINED_GEN_BLD_YRS = Set( @@ -267,7 +270,7 @@ def define_components(mod): mod.PREDETERMINED_GEN_BLD_YRS, within=NonNegativeReals) mod.min_data_check('gen_predetermined_cap') - + def _gen_build_can_operate_in_period(m, g, build_year, period): if build_year in m.PERIODS: @@ -281,7 +284,7 @@ def _gen_build_can_operate_in_period(m, g, build_year, period): # This is probably more correct, but is a different behavior # mid_period = m.period_start[period] + 0.5 * m.period_length_years[period] # return online <= m.period_start[period] and mid_period <= retirement - + # The set of periods when a project built in a certain year will be online mod.PERIODS_FOR_GEN_BLD_YR = Set( mod.GEN_BLD_YRS, @@ -296,8 +299,13 @@ def _gen_build_can_operate_in_period(m, g, build_year, period): mod.GENERATION_PROJECTS, mod.PERIODS, initialize=lambda m, g, period: set( bld_yr for (gen, bld_yr) in m.GEN_BLD_YRS - if gen == g and + if gen == g and _gen_build_can_operate_in_period(m, g, bld_yr, period))) + # The set of periods when a generator is available to run + mod.PERIODS_FOR_GEN = Set( + mod.GENERATION_PROJECTS, + initialize=lambda m, g: [p for p in m.PERIODS if len(m.BLD_YRS_FOR_GEN_PERIOD[g, p]) > 0] + ) def bounds_BuildGen(model, g, bld_yr): if((g, bld_yr) in model.PREDETERMINED_GEN_BLD_YRS): @@ -314,12 +322,12 @@ def bounds_BuildGen(model, g, bld_yr): within=NonNegativeReals, bounds=bounds_BuildGen) # Some projects are retired before the first study period, so they - # don't appear in the objective function or any constraints. - # In this case, pyomo may leave the variable value undefined even + # don't appear in the objective function or any constraints. + # In this case, pyomo may leave the variable value undefined even # after a solve, instead of assigning a value within the allowed # range. This causes errors in the Progressive Hedging code, which - # expects every variable to have a value after the solve. So as a - # starting point we assign an appropriate value to all the existing + # expects every variable to have a value after the solve. So as a + # starting point we assign an appropriate value to all the existing # projects here. def BuildGen_assign_default_value(m, g, bld_yr): m.BuildGen[g, bld_yr] = m.gen_predetermined_cap[g, bld_yr] @@ -327,11 +335,22 @@ def BuildGen_assign_default_value(m, g, bld_yr): mod.PREDETERMINED_GEN_BLD_YRS, rule=BuildGen_assign_default_value) + # note: in pull request 78, commit e7f870d..., GEN_PERIODS + # was mistakenly redefined as GENERATION_PROJECTS * PERIODS. + # That didn't directly affect the objective function in the tests + # because most code uses GEN_TPS, which was defined correctly. + # But it did have some subtle effects on the main Hawaii model. + # It would be good to have a test that this set is correct, + # e.g., assertions that in the 3zone_toy model, + # ('C-Coal_ST', 2020) in m.GEN_PERIODS and ('C-Coal_ST', 2030) not in m.GEN_PERIODS + # and 'C-Coal_ST' in m.GENS_IN_PERIOD[2020] and 'C-Coal_ST' not in m.GENS_IN_PERIOD[2030] mod.GEN_PERIODS = Set( dimen=2, - initialize=mod.GENERATION_PROJECTS * mod.PERIODS) + initialize=lambda m: + [(g, p) for g in m.GENERATION_PROJECTS for p in m.PERIODS_FOR_GEN[g]]) + mod.GenCapacity = Expression( - mod.GEN_PERIODS, + mod.GENERATION_PROJECTS, mod.PERIODS, rule=lambda m, g, period: sum( m.BuildGen[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, period])) @@ -355,9 +374,9 @@ def BuildGen_assign_default_value(m, g, bld_yr): mod.Enforce_Min_Build_Lower = Constraint( mod.NEW_GEN_WITH_MIN_BUILD_YEARS, rule=lambda m, g, p: ( - m.BuildMinGenCap[g, p] * m.gen_min_build_capacity[g] + m.BuildMinGenCap[g, p] * m.gen_min_build_capacity[g] <= m.BuildGen[g, p])) - + # Define a constant for enforcing binary constraints on project capacity # The value of 100 GW should be larger than any expected build size. For # perspective, the world's largest electric power plant (Three Gorges Dam) @@ -383,7 +402,7 @@ def BuildGen_assign_default_value(m, g, bld_yr): mod.GEN_BLD_YRS, within=NonNegativeReals) mod.min_data_check('gen_overnight_cost', 'gen_fixed_om') - + # Derived annual costs mod.gen_capital_cost_annual = Param( mod.GEN_BLD_YRS, @@ -393,12 +412,12 @@ def BuildGen_assign_default_value(m, g, bld_yr): crf(m.interest_rate, m.gen_max_age[g]))) mod.GenCapitalCosts = Expression( - mod.GEN_PERIODS, + mod.GENERATION_PROJECTS, mod.PERIODS, rule=lambda m, g, p: sum( m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p])) mod.GenFixedOMCosts = Expression( - mod.GEN_PERIODS, + mod.GENERATION_PROJECTS, mod.PERIODS, rule=lambda m, g, p: sum( m.BuildGen[g, bld_yr] * m.gen_fixed_om[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p])) @@ -452,7 +471,7 @@ def load_inputs(mod, switch_data, inputs_dir): auto_select=True, optional_params=['gen_dbid', 'gen_scheduled_outage_rate', 'gen_forced_outage_rate', 'gen_capacity_limit_mw', 'gen_unit_size', - 'gen_ccs_energy_load', 'gen_ccs_capture_efficiency', + 'gen_ccs_energy_load', 'gen_ccs_capture_efficiency', 'gen_min_build_capacity', 'gen_is_cogen', 'gen_is_distributed'], index=mod.GENERATION_PROJECTS, param=(mod.gen_dbid, mod.gen_tech, mod.gen_energy_source, @@ -460,7 +479,7 @@ def load_inputs(mod, switch_data, inputs_dir): mod.gen_is_baseload, mod.gen_scheduled_outage_rate, mod.gen_forced_outage_rate, mod.gen_capacity_limit_mw, mod.gen_unit_size, mod.gen_ccs_energy_load, - mod.gen_ccs_capture_efficiency, mod.gen_full_load_heat_rate, + mod.gen_ccs_capture_efficiency, mod.gen_full_load_heat_rate, mod.gen_variable_om, mod.gen_min_build_capacity, mod.gen_connect_cost_per_mw, mod.gen_is_cogen, mod.gen_is_distributed)) @@ -497,12 +516,13 @@ def post_solve(instance, outdir): write_table( instance, instance.GEN_PERIODS, output_file=os.path.join(outdir, "gen_cap.txt"), - headings=("GENERATION_PROJECT", "PERIOD", - "gen_tech", "gen_load_zone", "gen_energy_source", - "GenCapacity", "GenCapitalCosts", "GenFixedOMCosts"), + headings=( + "GENERATION_PROJECT", "PERIOD", + "gen_tech", "gen_load_zone", "gen_energy_source", + "GenCapacity", "GenCapitalCosts", "GenFixedOMCosts"), # Indexes are provided as a tuple, so put (g,p) in parentheses to # access the two components of the index individually. - values=lambda m, (g, p): ( - g, p, + values=lambda m, g, p: ( + g, p, m.gen_tech[g], m.gen_load_zone[g], m.gen_energy_source[g], m.GenCapacity[g, p], m.GenCapitalCosts[g, p], m.GenFixedOMCosts[g, p])) diff --git a/switch_model/generators/core/commit/discrete.py b/switch_model/generators/core/commit/discrete.py index 65ecdafbc..8a2a3120e 100644 --- a/switch_model/generators/core/commit/discrete.py +++ b/switch_model/generators/core/commit/discrete.py @@ -10,7 +10,7 @@ dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ 'switch_model.financials', 'switch_model.energy_sources.properties',\ - 'switch_model.generators.core.build', 'switch_model.investment.gen_discrete_build',\ + 'switch_model.generators.core.build',\ 'switch_model.generators.core.dispatch', 'switch_model.operations.unitcommit' def define_components(mod): @@ -21,7 +21,7 @@ def define_components(mod): Unless otherwise stated, all power capacity is specified in units of MW and all sets and parameters are mandatory. - GEN_TPS_DISCRETE is a subset of GEN_TPS + DISCRETE_GEN_TPS is a subset of GEN_TPS that only includes projects that have gen_unit_size defined. CommitGenUnits[(g, bld_yr) in GEN_BLD_YRS_DISCRETE] is an @@ -48,15 +48,16 @@ def define_components(mod): """ - mod.GEN_TPS_DISCRETE = Set( - initialize=mod.GEN_TPS, - filter=lambda m, g, t: ( - g in m.DISCRETELY_SIZED_GENS)) + mod.DISCRETE_GEN_TPS = Set( + dimen=2, + initialize=lambda m: + [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] + ) mod.CommitGenUnits = Var( - mod.GEN_TPS_DISCRETE, + mod.DISCRETE_GEN_TPS, within=NonNegativeIntegers) mod.Commit_Units_Consistency = Constraint( - mod.GEN_TPS_DISCRETE, + mod.DISCRETE_GEN_TPS, rule=lambda m, g, t: ( m.CommitGen[g, t] == m.CommitGenUnits[g, t] * m.gen_unit_size[g] * m.gen_availability[g])) diff --git a/switch_model/generators/core/commit/fuel_use.py b/switch_model/generators/core/commit/fuel_use.py index 8c71f18d3..a72b5ff60 100644 --- a/switch_model/generators/core/commit/fuel_use.py +++ b/switch_model/generators/core/commit/fuel_use.py @@ -117,7 +117,7 @@ def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): dimen=4, initialize=lambda m: [ (g, t, intercept, slope) - for (g, t) in m._FUEL_BASED_GEN_TPS + for (g, t) in m.FUEL_BASED_GEN_TPS for (intercept, slope) in m.FUEL_USE_SEGMENTS_FOR_GEN[g] ] ) @@ -183,7 +183,7 @@ def load_inputs(mod, switch_data, inputs_dir): path = os.path.join(inputs_dir, 'gen_inc_heat_rates.tab') if os.path.isfile(path): (fuel_rate_segments, min_load, full_hr) = _parse_inc_heat_rate_file( - path, id_column="project") + path, id_column="GENERATION_PROJECT") # Check implied minimum loading level for consistency with # gen_min_load_fraction if gen_min_load_fraction was provided. If # gen_min_load_fraction wasn't provided, set it to implied minimum diff --git a/switch_model/generators/core/commit/operate.py b/switch_model/generators/core/commit/operate.py index fe0323022..1638b2995 100644 --- a/switch_model/generators/core/commit/operate.py +++ b/switch_model/generators/core/commit/operate.py @@ -83,9 +83,9 @@ def define_components(mod): The capacity started up or shutdown is completely determined by the change in CommitGen from one hour to the next, but we can't calculate these directly within the linear program because linear - programs don't have if statements. Instead, we'll define extra decision - variables that are tightly constrained. Since startup incurs costs and - shutdown does not, the linear program will not simultaneously set both + programs don't have if statements. Instead, we'll define extra decision + variables that are tightly constrained. Since startup incurs costs and + shutdown does not, the linear program will not simultaneously set both of these to non-zero values. StartupGenCapacity[(g, t) in GEN_TPS] is a decision variable @@ -136,7 +136,7 @@ def define_components(mod): downtime constraints are active. These are the indexing sets for the Enforce_Min_Uptime and Enforce_Min_Downtime constraints, and are probably not useful elsewhere. - + Enforce_Min_Uptime[(g, tp) in UPTIME_CONSTRAINED_GEN_TPS] and Enforce_Min_Downtime[(g, tp) in DOWNTIME_CONSTRAINED_GEN_TPS] are constraints that ensure that unit commitment respects the minimum @@ -158,7 +158,7 @@ def define_components(mod): rules. On the other hand any capacity that could have been committed at some point in the lookback window can be startup now, possibly replacing other units that were shutdown recently. - + -- Dispatch limits based on committed capacity -- gen_min_load_fraction[g] describes the minimum loading level of a @@ -174,7 +174,7 @@ def define_components(mod): gen_min_load_fraction, but has separate entries for each timepoint. This could be used, for example, for non-curtailable renewable energy projects. This defaults to the value of gen_min_load_fraction[g]. - + gen_min_cap_factor[(g, t) in GEN_TPS] describes the minimum loadding level for each project and timepoint as a fraction of committed capacity. This is an optional parameter that defaults @@ -262,23 +262,24 @@ def define_components(mod): within=NonNegativeReals) mod.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: - m.CommitGen[g, m.tp_previous[t]] - + m.StartupGenCapacity[g, t] - m.ShutdownGenCapacity[g, t] + rule=lambda m, g, t: + m.CommitGen[g, m.tp_previous[t]] + + m.StartupGenCapacity[g, t] - m.ShutdownGenCapacity[g, t] == m.CommitGen[g, t]) - + # StartupGenCapacity costs mod.gen_startup_fuel = Param(mod.FUEL_BASED_GENS, default=0.0) mod.gen_startup_om = Param(mod.GENERATION_PROJECTS, default=0.0) - # StartupGenCapacity costs need to be divided over the duration of the - # timepoint because it is a one-time expenditure in units of $ - # but Cost_Components_Per_TP requires an hourly cost rate in $ / hr. + # Note: lump-sum startup O&M cost is divided by the duration of the + # timepoint to give a cost-per-hour during this timepoint, as needed by + # Cost_Components_Per_TP. mod.Total_StartupGenCapacity_OM_Costs = Expression( mod.TIMEPOINTS, rule=lambda m, t: sum( m.gen_startup_om[g] * m.StartupGenCapacity[g, t] / m.tp_duration_hrs[t] - for (g, t2) in m.GEN_TPS - if t == t2)) + for g in m.GENS_IN_PERIOD[m.tp_period[t]] + ) + ) mod.Cost_Components_Per_TP.append('Total_StartupGenCapacity_OM_Costs') mod.gen_min_uptime = Param( @@ -290,16 +291,16 @@ def define_components(mod): within=NonNegativeReals, default=0.0) mod.UPTIME_CONSTRAINED_GEN_TPS = Set(dimen=2, initialize=lambda m: [ - (g, tp) + (g, tp) for g in m.GENERATION_PROJECTS if m.gen_min_uptime[g] > 0.0 - for tp in m.TPS_FOR_GEN[g] + for tp in m.TPS_FOR_GEN[g] ]) mod.DOWNTIME_CONSTRAINED_GEN_TPS = Set(dimen=2, initialize=lambda m: [ - (g, tp) + (g, tp) for g in m.GENERATION_PROJECTS if m.gen_min_downtime[g] > 0.0 - for tp in m.TPS_FOR_GEN[g] + for tp in m.TPS_FOR_GEN[g] ]) - + def tp_prev(m, tp, n=1): # find nth previous timepoint, wrapping from start to end of day return m.TPS_IN_TS[m.tp_ts[tp]].prevw(tp, n) @@ -308,10 +309,10 @@ def min_time_rule(m, g, tp, up): """ This uses a simple rule: all capacity turned on in the last x hours must still be on now (or all capacity recently turned off must still be off).""" - + # how many timepoints must the project stay on/off once it's # started/shutdown? - # note: StartupGenCapacity and ShutdownGenCapacity are assumed to occur at the start of + # note: StartupGenCapacity and ShutdownGenCapacity are assumed to occur at the start of # the timepoint n_tp = int(round( (m.gen_min_uptime[g] if up else m.gen_min_downtime[g]) @@ -325,31 +326,31 @@ def min_time_rule(m, g, tp, up): # behavior of range()), because the current timepoint is # included in the duration when the capacity will be on/off. if up: - rule = ( - # online capacity >= recent startups + rule = ( + # online capacity >= recent startups # (all recent startups are still online) - m.CommitGen[g, tp] - >= + m.CommitGen[g, tp] + >= sum(m.StartupGenCapacity[g, tp_prev(m, tp, i)] for i in range(n_tp)) ) else: # Find the largest fraction of capacity that could have # been committed in the last x hours, including the # current hour. We assume that everything above this band - # must remain turned off (e.g., on maintenance outage). + # must remain turned off (e.g., on maintenance outage). # Note: this band extends one step prior to the first # relevant shutdown, since that capacity could have been # online in the prior step. committable_fraction = m.gen_availability[g] * max( - m.gen_max_commit_fraction[g, tp_prev(m, tp, i)] + m.gen_max_commit_fraction[g, tp_prev(m, tp, i)] for i in range(n_tp+1) ) - rule = ( + rule = ( # committable capacity - committed >= recent shutdowns # (all recent shutdowns are still offline) m.GenCapacityInTP[g, tp] * committable_fraction - - m.CommitGen[g, tp] - >= + - m.CommitGen[g, tp] + >= sum(m.ShutdownGenCapacity[g, tp_prev(m, tp, i)] for i in range(n_tp)) ) return rule @@ -359,7 +360,7 @@ def min_time_rule(m, g, tp, up): mod.Enforce_Min_Downtime = Constraint( mod.DOWNTIME_CONSTRAINED_GEN_TPS, rule=lambda *a: min_time_rule(*a, up=False) ) - + # Dispatch limits relative to committed capacity. mod.gen_min_load_fraction = Param( mod.GENERATION_PROJECTS, @@ -430,5 +431,5 @@ def load_inputs(mod, switch_data, inputs_dir): optional=True, filename=os.path.join(inputs_dir, 'gen_timepoint_commit_bounds.tab'), auto_select=True, - param=(mod.gen_min_commit_fraction, + param=(mod.gen_min_commit_fraction, mod.gen_max_commit_fraction, mod.gen_min_load_fraction_TP)) diff --git a/switch_model/generators/core/dispatch.py b/switch_model/generators/core/dispatch.py index d87fd4eff..d4b75fc80 100644 --- a/switch_model/generators/core/dispatch.py +++ b/switch_model/generators/core/dispatch.py @@ -36,13 +36,13 @@ def define_components(mod): they can be dispatched. A dispatch decisions is made for each member of this set. Members of this set can be abbreviated as (g, t) or (g, t). - - TPS_FOR_GEN[g] is a set array showing all timepoints when a - project is active. These are the timepoints corresponding to - PERIODS_FOR_GEN. This is the same data as GEN_TPS, + + TPS_FOR_GEN[g] is a set array showing all timepoints when a + project is active. These are the timepoints corresponding to + PERIODS_FOR_GEN. This is the same data as GEN_TPS, but split into separate sets for each project. - TPS_FOR_GEN_IN_PERIOD[g, period] is the same as + TPS_FOR_GEN_IN_PERIOD[g, period] is the same as TPS_FOR_GEN, but broken down by period. Periods when the project is inactive will yield an empty set. @@ -102,13 +102,13 @@ def define_components(mod): in $base_year/hour in the future period (rather than Net Present Value). - _FUEL_BASED_GEN_TPS is a subset of GEN_TPS - showing all times when fuel-consuming projects could be dispatched + FUEL_BASED_GEN_TPS is a subset of GEN_TPS + showing all times when fuel-consuming projects could be dispatched (used to identify timepoints when fuel use must match power production). GEN_TP_FUELS is a subset of GEN_TPS * FUELS, showing all the valid combinations of project, timepoint and fuel, - i.e., all the times when each project could consume a fuel that is + i.e., all the times when each project could consume a fuel that is limited, costly or produces emissions. GenFuelUseRate[(g, t, f) in GEN_TP_FUELS] is a @@ -129,7 +129,7 @@ def define_components(mod): fuel's upstream emissions, as well as Carbon Capture efficiency for generators that implement Carbon Capture and Sequestration. This does not yet support multi-fuel generators. - + AnnualEmissions[p in PERIODS]:The system's annual emissions, in metric tonnes of CO2 per year. @@ -161,19 +161,13 @@ def period_active_gen_rule(m, period): mod.GENS_IN_PERIOD = Set(mod.PERIODS, initialize=period_active_gen_rule, doc="The set of projects active in a given period.") - def TPS_FOR_GEN_rule(m, gen): - if not hasattr(m, '_TPS_FOR_GEN_dict'): - m._TPS_FOR_GEN_dict = collections.defaultdict(set) - for (_gen, period) in m.GEN_PERIODS: - for t in m.TPS_IN_PERIOD[period]: - m._TPS_FOR_GEN_dict[_gen].add(t) - result = m._TPS_FOR_GEN_dict.pop(gen) - if len(m._TPS_FOR_GEN_dict) == 0: - delattr(m, '_TPS_FOR_GEN_dict') - return result mod.TPS_FOR_GEN = Set( - mod.GENERATION_PROJECTS, within=mod.TIMEPOINTS, - rule=TPS_FOR_GEN_rule) + mod.GENERATION_PROJECTS, + within=mod.TIMEPOINTS, + rule=lambda m, g: ( + tp for p in m.PERIODS_FOR_GEN[g] for tp in m.TPS_IN_PERIOD[p] + ) + ) def TPS_FOR_GEN_IN_PERIOD_rule(m, gen, period): if not hasattr(m, '_TPS_FOR_GEN_IN_PERIOD_dict'): @@ -187,33 +181,33 @@ def TPS_FOR_GEN_IN_PERIOD_rule(m, gen, period): if len(m._TPS_FOR_GEN_IN_PERIOD_dict) == 0: delattr(m, '_TPS_FOR_GEN_IN_PERIOD_dict') return result - mod.TPS_FOR_GEN_IN_PERIOD = Set(mod.GENERATION_PROJECTS, mod.PERIODS, + mod.TPS_FOR_GEN_IN_PERIOD = Set(mod.GENERATION_PROJECTS, mod.PERIODS, within=mod.TIMEPOINTS, rule=TPS_FOR_GEN_IN_PERIOD_rule) mod.GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.GENERATION_PROJECTS + (g, tp) + for g in m.GENERATION_PROJECTS for tp in m.TPS_FOR_GEN[g])) mod.VARIABLE_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) + (g, tp) for g in m.VARIABLE_GENS for tp in m.TPS_FOR_GEN[g])) - mod._FUEL_BASED_GEN_TPS = Set( + mod.FUEL_BASED_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) + (g, tp) for g in m.FUEL_BASED_GENS for tp in m.TPS_FOR_GEN[g])) mod.GEN_TP_FUELS = Set( dimen=3, initialize=lambda m: ( - (g, t, f) - for (g, t) in m._FUEL_BASED_GEN_TPS + (g, t, f) + for (g, t) in m.FUEL_BASED_GEN_TPS for f in m.FUELS_FOR_GEN[g])) mod.GenCapacityInTP = Expression( diff --git a/switch_model/generators/core/no_commit.py b/switch_model/generators/core/no_commit.py index 701ad48eb..18a83fbb5 100644 --- a/switch_model/generators/core/no_commit.py +++ b/switch_model/generators/core/no_commit.py @@ -56,21 +56,21 @@ def define_components(mod): """ - # NOTE: DispatchBaseloadByPeriod should eventually be replaced by + # NOTE: DispatchBaseloadByPeriod should eventually be replaced by # an "ActiveCapacityDuringPeriod" decision variable that applies to all # projects. This should be constrained - # based on the amount of installed capacity each period, and then + # based on the amount of installed capacity each period, and then # DispatchUpperLimit and DispatchLowerLimit should be calculated - # relative to ActiveCapacityDuringPeriod. Fixed O&M (but not capital + # relative to ActiveCapacityDuringPeriod. Fixed O&M (but not capital # costs) should be calculated based on ActiveCapacityDuringPeriod. # This would allow mothballing (and possibly restarting) projects. # Choose flat operating level for baseload plants during each period # (not necessarily running all available capacity) - # Note: this is unconstrained, because other constraints limit project + # Note: this is unconstrained, because other constraints limit project # dispatch during each timepoint and therefore the level of this variable. mod.DispatchBaseloadByPeriod = Var(mod.BASELOAD_GENS, mod.PERIODS) - + def DispatchUpperLimit_expr(m, g, t): if g in m.VARIABLE_GENS: return (m.GenCapacityInTP[g, t] * m.gen_availability[g] * @@ -83,7 +83,7 @@ def DispatchUpperLimit_expr(m, g, t): mod.Enforce_Dispatch_Baseload_Flat = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: + rule=lambda m, g, t: (m.DispatchGen[g, t] == m.DispatchBaseloadByPeriod[g, m.tp_period[t]]) if g in m.BASELOAD_GENS else Constraint.Skip) @@ -94,8 +94,7 @@ def DispatchUpperLimit_expr(m, g, t): m.DispatchGen[g, t] <= m.DispatchUpperLimit[g, t])) mod.GenFuelUseRate_Calculate = Constraint( - mod._FUEL_BASED_GEN_TPS, + mod.FUEL_BASED_GEN_TPS, rule=lambda m, g, t: ( - sum(m.GenFuelUseRate[g, t, f] - for f in m.FUELS_FOR_GEN[g]) + sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) == m.DispatchGen[g, t] * m.gen_full_load_heat_rate[g])) diff --git a/switch_model/generators/extensions/storage.py b/switch_model/generators/extensions/storage.py index af67230fc..62ab451a9 100644 --- a/switch_model/generators/extensions/storage.py +++ b/switch_model/generators/extensions/storage.py @@ -38,9 +38,21 @@ def define_components(mod): gen_store_to_release_ratio[STORAGE_GENS] describes the maximum rate that energy can be stored, expressed as a ratio of discharge power capacity. This is an optional parameter and will default to 1. If a - storage project has 1 MW of dischage capacity and a max_store_rate + storage project has 1 MW of dischage capacity and a gen_store_to_release_ratio of 1.2, then it can consume up to 1.2 MW of power while charging. + gen_storage_energy_to_power_ratio[STORAGE_GENS], if specified, restricts + the storage capacity (in MWh) to be a fixed multiple of the output + power (in MW), i.e., specifies a particular number of hours of + storage capacity. Omit this column or specify "." to allow Switch + to choose the energy/power ratio. (Note: gen_storage_energy_overnight_cost + or gen_overnight_cost should often be set to 0 when using this.) + + gen_storage_max_cycles_per_year[STORAGE_GENS], if specified, restricts + the number of charge/discharge cycles each storage project can perform + per year; one cycle is defined as discharging an amount of energy + equal to the storage capacity of the project. + gen_storage_energy_overnight_cost[(g, bld_yr) in STORAGE_GEN_BLD_YRS] is the overnight capital cost per MWh of energy capacity for building the given storage technology installed in the @@ -86,14 +98,28 @@ def define_components(mod): """ mod.STORAGE_GENS = Set(within=mod.GENERATION_PROJECTS) + mod.STORAGE_GEN_PERIODS = Set( + within=mod.GEN_PERIODS, + initialize=lambda m: [(g, p) for g in m.STORAGE_GENS for p in m.PERIODS_FOR_GEN[g]] + ) mod.gen_storage_efficiency = Param( mod.STORAGE_GENS, within=PercentFraction) + # TODO: rename to gen_charge_to_discharge_ratio? mod.gen_store_to_release_ratio = Param( mod.STORAGE_GENS, within=PositiveReals, default=1.0) + mod.gen_storage_energy_to_power_ratio = Param( + mod.STORAGE_GENS, + within=NonNegativeReals, + default=float("inf")) # inf is a flag that no value is specified (nan and None don't work) + mod.gen_storage_max_cycles_per_year = Param( + mod.STORAGE_GENS, + within=NonNegativeReals, + default=float('inf')) + # TODO: build this set up instead of filtering down, to improve performance mod.STORAGE_GEN_BLD_YRS = Set( dimen=2, initialize=mod.GEN_BLD_YRS, @@ -134,7 +160,9 @@ def define_components(mod): within=NonNegativeReals) # Summarize storage charging for the energy balance equations - def StorageNetCharge_rule(m, z, t): + # TODO: rename this StorageTotalCharging or similar (to indicate it's a + # sum for a zone, not a net quantity for a project) + def rule(m, z, t): # Construct and cache a set for summation as needed if not hasattr(m, 'Storage_Charge_Summation_dict'): m.Storage_Charge_Summation_dict = collections.defaultdict(set) @@ -142,15 +170,22 @@ def StorageNetCharge_rule(m, z, t): z2 = m.gen_load_zone[g] m.Storage_Charge_Summation_dict[z2, t2].add(g) # Use pop to free memory - relevant_projects = m.Storage_Charge_Summation_dict.pop((z, t)) + relevant_projects = m.Storage_Charge_Summation_dict.pop((z, t), {}) return sum(m.ChargeStorage[g, t] for g in relevant_projects) - mod.StorageNetCharge = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=StorageNetCharge_rule) + mod.StorageNetCharge = Expression(mod.LOAD_ZONES, mod.TIMEPOINTS, rule=rule) # Register net charging with zonal energy balance. Discharging is already # covered by DispatchGen. mod.Zone_Power_Withdrawals.append('StorageNetCharge') + # use fixed energy/power ratio (# hours of capacity) when specified + mod.Enforce_Fixed_Energy_Storage_Ratio = Constraint( + mod.STORAGE_GEN_BLD_YRS, + rule=lambda m, g, y: + Constraint.Skip if m.gen_storage_energy_to_power_ratio[g] == float("inf") # no value specified + else + (m.BuildStorageEnergy[g, y] == m.gen_storage_energy_to_power_ratio[g] * m.BuildGen[g, y]) + ) + def Charge_Storage_Upper_Limit_rule(m, g, t): return m.ChargeStorage[g,t] <= \ m.DispatchUpperLimit[g, t] * m.gen_store_to_release_ratio[g] @@ -178,6 +213,19 @@ def State_Of_Charge_Upper_Limit_rule(m, g, t): mod.STORAGE_GEN_TPS, rule=State_Of_Charge_Upper_Limit_rule) + # batteries can only complete the specified number of cycles per year, averaged over each period + mod.Battery_Cycle_Limit = Constraint( + mod.STORAGE_GEN_PERIODS, + rule=lambda m, g, p: + # solvers sometimes perform badly with infinite constraint + Constraint.Skip if m.gen_storage_max_cycles_per_year[g] == float('inf') + else ( + sum(m.DispatchGen[g, tp] * m.tp_duration_hrs[tp] for tp in m.TPS_IN_PERIOD[p]) + <= + m.gen_storage_max_cycles_per_year[g] * m.StorageEnergyCapacity[g, p] * m.period_length_years[p] + ) + ) + def load_inputs(mod, switch_data, inputs_dir): """ @@ -186,7 +234,8 @@ def load_inputs(mod, switch_data, inputs_dir): generation_projects_info.tab GENERATION_PROJECT, ... - gen_storage_efficiency, gen_store_to_release_ratio* + gen_storage_efficiency, gen_store_to_release_ratio*, + gen_storage_energy_to_power_ratio*, gen_storage_max_cycles_per_year* gen_build_costs.tab GENERATION_PROJECT, build_year, ... @@ -194,12 +243,19 @@ def load_inputs(mod, switch_data, inputs_dir): """ + # TODO: maybe move these columns to a storage_gen_info file to avoid the weird index + # reading and avoid having to create these extra columns for all projects; + # Alternatively, say that these values are specified for _all_ projects (maybe with None + # as default) and then define STORAGE_GENS as the subset of projects for which + # gen_storage_efficiency has been specified, then require valid settings for all + # STORAGE_GENS. switch_data.load_aug( filename=os.path.join(inputs_dir, 'generation_projects_info.tab'), auto_select=True, - optional_params=['gen_store_to_release_ratio'], - param=(mod.gen_storage_efficiency, mod.gen_store_to_release_ratio)) + optional_params=['gen_store_to_release_ratio', 'gen_storage_energy_to_power_ratio', 'gen_storage_max_cycles_per_year'], + param=(mod.gen_storage_efficiency, mod.gen_store_to_release_ratio, mod.gen_storage_energy_to_power_ratio, mod.gen_storage_max_cycles_per_year)) # Base the set of storage projects on storage efficiency being specified. + # TODO: define this in a more normal way switch_data.data()['STORAGE_GENS'] = { None: switch_data.data(name='gen_storage_efficiency').keys()} switch_data.load_aug( @@ -217,10 +273,10 @@ def post_solve(instance, outdir): reporting.write_table( instance, instance.STORAGE_GEN_BLD_YRS, output_file=os.path.join(outdir, "storage_builds.txt"), - headings=("project", "period", "load_zone", + headings=("generation_project", "period", "load_zone", "IncrementalPowerCapacityMW", "IncrementalEnergyCapacityMWh", "OnlinePowerCapacityMW", "OnlineEnergyCapacityMWh" ), - values=lambda m, (g, bld_yr): ( + values=lambda m, g, bld_yr: ( g, bld_yr, m.gen_load_zone[g], m.BuildGen[g, bld_yr], m.BuildStorageEnergy[g, bld_yr], m.GenCapacity[g, bld_yr], m.StorageEnergyCapacity[g, bld_yr] @@ -228,9 +284,9 @@ def post_solve(instance, outdir): reporting.write_table( instance, instance.STORAGE_GEN_TPS, output_file=os.path.join(outdir, "storage_dispatch.txt"), - headings=("project", "timepoint", "load_zone", + headings=("generation_project", "timepoint", "load_zone", "ChargeMW", "DischargeMW", "StateOfCharge"), - values=lambda m, (g, t): ( + values=lambda m, g, t: ( g, m.tp_timestamp[t], m.gen_load_zone[g], m.ChargeStorage[g, t], m.DispatchGen[g, t], m.StateOfCharge[g, t] diff --git a/switch_model/hawaii/batteries_fixed_calendar_life.py b/switch_model/hawaii/batteries_fixed_calendar_life.py index 228e34ec4..e5ddc50a8 100644 --- a/switch_model/hawaii/batteries_fixed_calendar_life.py +++ b/switch_model/hawaii/batteries_fixed_calendar_life.py @@ -86,32 +86,34 @@ def define_components(m): m.Battery_Max_Charge_Rate = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: m.ChargeBattery[z, t] <= - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + # changed 2018-02-20 to allow full discharge in min_discharge_time, + # (previously pegged to battery_max_discharge) + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time ) m.Battery_Max_Discharge_Rate = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: m.DischargeBattery[z, t] <= - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time ) # how much could output/input be increased on short notice (to provide reserves) m.BatterySlackUp = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time - m.DischargeBattery[z, t] + m.ChargeBattery[z, t] ) m.BatterySlackDown = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time - m.ChargeBattery[z, t] + m.DischargeBattery[z, t] ) - # assume batteries can only complete one full cycle (charged to max discharge) - # per day, averaged over each period + # assume batteries can only complete one full cycle per day, averaged over each period + # (this was pegged to battery_max_discharge before 2018-02-20) m.Battery_Cycle_Limit = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: sum(m.DischargeBattery[z, tp] * m.tp_duration_hrs[tp] for tp in m.TPS_IN_PERIOD[p]) <= - m.Battery_Capacity[z, p] * m.battery_max_discharge * m.period_length_hours[p] + m.Battery_Capacity[z, p] * m.period_length_hours[p] ) # Register with spinning reserves if it is available diff --git a/switch_model/hawaii/demand_response_simple.py b/switch_model/hawaii/demand_response_simple.py index 063182e86..a4bd8aa94 100644 --- a/switch_model/hawaii/demand_response_simple.py +++ b/switch_model/hawaii/demand_response_simple.py @@ -5,6 +5,11 @@ def define_arguments(argparser): argparser.add_argument('--demand-response-share', type=float, default=0.30, help="Fraction of hourly load that can be shifted to other times of day (default=0.30)") + argparser.add_argument('--demand-response-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from demand response (e.g., 'contingency' or 'regulation'). " + "Specify 'none' to disable." + ) def define_components(m): @@ -16,18 +21,10 @@ def define_components(m): m.ShiftDemand = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=Reals, bounds=lambda m, z, t: ( (-1.0) * m.demand_response_max_share * m.zone_demand_mw[z, t], - None + # assume all shiftable load can be concentrated into 3 hours (no less) + None # 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, t] ) ) - # Register with spinning reserves if it is available - if 'Spinning_Reserve_Up_Provisions' in dir(m): - m.HIDemandResponseSimpleSpinningReserveUp = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.DemandResponse[z, t] - m.DemandResponse[z, t].lb - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Up_Provisions.append('HIDemandResponseSimpleSpinningReserveUp') # all changes to demand must balance out over the course of the day m.Demand_Response_Net_Zero = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: @@ -36,3 +33,64 @@ def define_components(m): # add demand response to the zonal energy balance m.Zone_Power_Withdrawals.append('ShiftDemand') + + if [rt.lower() for rt in m.options.demand_response_reserve_types] != ['none']: + # Register with spinning reserves + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # calculate available slack from demand response + # (from supply perspective, so "up" means less load) + m.DemandResponseSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: + sum( + m.ShiftDemand[z, t] - m.ShiftDemand[z, t].lb + for z in m.ZONES_IN_BALANCING_AREA[b] + ) + ) + m.DemandResponseSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, tp: + sum( + # Assume shiftable load can only be raised by factor of 8 (i.e., concentrate in 3 hours) + 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] - m.ShiftDemand[z, tp] + for z in m.ZONES_IN_BALANCING_AREA[b] + ) + ) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.DR_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.demand_response_reserve_types + ) + m.DemandResponseSpinningReserveUp = Var( + m.DR_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + m.DemandResponseSpinningReserveDown = Var( + m.DR_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_DemandResponseSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.DemandResponseSpinningReserveUp[rt, ba, tp] + for rt in m.DR_SPINNING_RESERVE_TYPES + ) <= m.DemandResponseSlackUp[ba, tp] + ) + m.Limit_DemandResponseSpinningReserveDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.DemandResponseSpinningReserveDown[rt, ba, tp] + for rt in m.DR_SPINNING_RESERVE_TYPES + ) <= m.DemandResponseSlackDown[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('DemandResponseSpinningReserveUp') + m.Spinning_Reserve_Down_Provisions.append('DemandResponseSpinningReserveDown') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.demand_response_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('DemandResponseSlackUp') + m.Spinning_Reserve_Down_Provisions.append('DemandResponseSlackDown') diff --git a/switch_model/hawaii/ev.py b/switch_model/hawaii/ev.py index 0f3c53c2e..554bbc5c3 100644 --- a/switch_model/hawaii/ev.py +++ b/switch_model/hawaii/ev.py @@ -5,6 +5,11 @@ def define_arguments(argparser): argparser.add_argument("--ev-timing", choices=['bau', 'flat', 'optimal'], default='optimal', help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).") + argparser.add_argument('--ev-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." + "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'." + ) def define_components(m): # setup various parameters describing the EV and ICE fleet each year @@ -85,17 +90,48 @@ def define_components(m): # add the EV load to the model's energy balance m.Zone_Power_Withdrawals.append('ChargeEVs') - # Register with spinning reserves if it is available and optimal EV - # charging is enabled. - if('Spinning_Reserve_Up_Provisions' in dir(m) and - m.options.ev_timing == "optimal"): - m.EVSpinningReserveUp = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.ChargeEVs[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') + # Register with spinning reserves if it is available and optimal EV charging is enabled. + if [rt.lower() for rt in m.options.ev_reserve_types] != ['none'] and m.options.ev_timing == "optimal": + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # calculate available slack from EV charging + # (from supply perspective, so "up" means less load) + m.EVSlackUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: + sum(m.ChargeEVs[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + ) + # note: we currently ignore down-reserves (option of increasing consumption) + # from EVs since it's not clear how high they could go; we could revisit this if + # down-reserves have a positive price at equilibrium (probabably won't) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.EV_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.ev_reserve_types + ) + m.EVSpinningReserveUp = Var( + m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_EVSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.EVSpinningReserveUp[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) <= m.EVSlackUp[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.ev_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('EVSlackUp') + def load_inputs(m, switch_data, inputs_dir): diff --git a/switch_model/hawaii/ev_advanced.py b/switch_model/hawaii/ev_advanced.py new file mode 100644 index 000000000..ffc921902 --- /dev/null +++ b/switch_model/hawaii/ev_advanced.py @@ -0,0 +1,226 @@ +import os +from pyomo.environ import * + +def define_arguments(argparser): + argparser.add_argument("--ev-timing", choices=['bau', 'optimal'], default='optimal', + help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).") + argparser.add_argument('--ev-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." + "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'." + ) + +# parameters describing the EV and ICE fleet each year, all indexed by zone, +# vehicle type and period +ev_zone_type_period_params = [ + "n_vehicles", + "ice_gals_per_year", "ice_fuel", "ev_kwh_per_year", + "ev_extra_cost_per_vehicle_year" +] + +def define_components(m): + + # indexing set for EV bids, decomposed to get sets of EV bid numbers and EV types + m.EV_ZONE_TYPE_BID_TP = Set(dimen=4) # load zone, vehicle type, bid number, timepoint + def rule(m): + bids = m.EV_BID_NUMS_set = set() + types = m.EV_TYPES_set = set() + for z, t, n, tp in m.EV_ZONE_TYPE_BID_TP: + bids.add(n) + types.add(t) + m.Split_EV_Sets = BuildAction(rule=rule) + m.EV_BID_NUMS = Set(initialize=lambda m: m.EV_BID_NUMS_set) + m.EV_TYPES = Set(initialize=lambda m: m.EV_TYPES_set) + + # parameters describing the EV and ICE fleet each year + + # fraction of vehicle fleet that will be electrified in each period (0-1) + # (could eventually be a decision variable) + m.ev_share = Param(m.LOAD_ZONES, m.PERIODS, within=PercentFraction) + for p in ev_zone_type_period_params: + setattr(m, p, Param(m.LOAD_ZONES, m.EV_TYPES, m.PERIODS)) + + # calculate the extra annual cost (non-fuel) of having EVs, relative to ICEs, + # for batteries and chargers + m.ev_extra_annual_cost = Param( + m.PERIODS, initialize=lambda m, p: + sum( + m.ev_share[z, p] + * m.n_vehicles[z, t, p] + * m.ev_extra_cost_per_vehicle_year[z, t, p] + for z in m.LOAD_ZONES + for t in m.EV_TYPES + ) + ) + # calculate total fuel cost for ICE (non-EV) VMTs + motor_fuel_mmbtu_per_gallon = { + # from https://www.eia.gov/Energyexplained/?page=about_energy_units + "Motor_Gasoline": 0.120476, + "Motor_Diesel": 0.137452 + } + if hasattr(m, "rfm_supply_tier_cost"): + ice_fuel_cost_func = lambda m, z, p, f: m.rfm_supply_tier_cost[m.zone_rfm[z, f], p, 'base'] + else: + ice_fuel_cost_func = lambda m, z, p, f: m.fuel_cost[z, f, p] + m.ice_annual_fuel_cost = Param(m.PERIODS, initialize=lambda m, p: + sum( + (1.0 - m.ev_share[z, p]) + * m.n_vehicles[z, t, p] + * m.ice_gals_per_year[z, t, p] + * motor_fuel_mmbtu_per_gallon[m.ice_fuel[z, t, p]] + * ice_fuel_cost_func(m, z, p, m.ice_fuel[z, t, p]) + for z in m.LOAD_ZONES + for t in m.EV_TYPES + ) + ) + + # add cost components to account for the vehicle miles traveled via EV or ICE + # (not used because it interferes with calculation of cost per kWh for electricity) + m.Cost_Components_Per_Period.append('ev_extra_annual_cost') + m.Cost_Components_Per_Period.append('ice_annual_fuel_cost') + + # EV bid data -- total MW used by 100% EV fleet, for each zone, veh type, + # bid number, timepoint + m.ev_bid_by_type = Param(m.EV_ZONE_TYPE_BID_TP) + + # aggregate across vehicle types (types are only needed for reporting) + m.ev_bid_mw = Param( + m.LOAD_ZONES, m.EV_BID_NUMS, m.TIMEPOINTS, + initialize=lambda m, z, n, tp: + sum(m.ev_bid_by_type[z, t, n, tp] for t in m.EV_TYPES) + ) + + # find lowest and highest possible charging in each timepoint, used for reserve calcs + m.ev_charge_min = Param( + m.LOAD_ZONES, m.TIMEPOINTS, + initialize=lambda m, z, tp: min(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + ) + m.ev_charge_max = Param( + m.LOAD_ZONES, m.TIMEPOINTS, + initialize=lambda m, z, tp: max(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + ) + + # decide which share of the fleet to allocate to each charging bid + m.EVBidWeight = Var(m.LOAD_ZONES, m.TIMESERIES, m.EV_BID_NUMS, within=PercentFraction) + m.Charge_Enough_EVs = Constraint( + m.LOAD_ZONES, m.TIMESERIES, + rule=lambda m, z, ts: + sum(m.EVBidWeight[z, ts, n] for n in m.EV_BID_NUMS) == m.ev_share[z, m.ts_period[ts]] + ) + + # calculate total EV charging + m.ChargeEVs = Expression( + m.LOAD_ZONES, m.TIMEPOINTS, + rule=lambda m, z, tp: sum( + m.EVBidWeight[z, m.tp_ts[tp], n] * m.ev_bid_mw[z, n, tp] + for n in m.EV_BID_NUMS + ) + ) + + # set rules for when to charge EVs + # note: this could be generalized to fractions between 0% and 100% BAU + if m.options.ev_timing == "optimal": + if m.options.verbose: + print "Charging EVs at best time each day." + # no extra code needed + elif m.options.ev_timing == "bau": + if m.options.verbose: + print "Charging EVs at business-as-usual times of day." + # give full weight to BAU bid (number 0) + m.ChargeEVs_bau = Constraint( + m.LOAD_ZONES, m.EV_BID_NUMS, m.TIMESERIES, + rule=lambda m, z, n, ts: ( + m.EVBidWeight[z, ts, n] + == (m.ev_share[z, m.ts_period[ts]] if n == 0 else 0) + ) + ) + else: + # should never happen + raise ValueError("Invalid value specified for --ev-timing: {}".format(str(m.options.ev_timing))) + + # add the EV load to the model's energy balance + m.Zone_Power_Withdrawals.append('ChargeEVs') + + # Register with spinning reserves if it is available and optimal EV charging is enabled. + if [rt.lower() for rt in m.options.ev_reserve_types] != ['none'] and m.options.ev_timing == "optimal": + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # calculate available slack from EV charging + # (from supply perspective, so "up" means less load) + m.EVSlackUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: sum( + m.ChargeEVs[z, t] - m.ev_charge_min[z, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + ) + ) + m.EVSlackDown = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: sum( + m.ev_charge_max[z, t] - m.ChargeEVs[z, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + ) + ) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint. + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.EV_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.ev_reserve_types + ) + m.EVSpinningReserveUp = Var( + m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + m.EVSpinningReserveDown = Var( + m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_EVSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.EVSpinningReserveUp[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) <= m.EVSlackUp[ba, tp] + ) + m.Limit_EVSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.EVSpinningReserveDown[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) <= m.EVSlackDown[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') + m.Spinning_Reserve_Down_Provisions.append('EVSpinningReserveDown') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.ev_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('EVSlackUp') + m.Spinning_Reserve_Down_Provisions.append('EVSlacDown') + + +def load_inputs(m, switch_data, inputs_dir): + """ + Import ev data from .tab files. + """ + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'ev_share.tab'), + auto_select=True, + param=m.ev_share + ) + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'ev_fleet_info_advanced.tab'), + auto_select=True, + param=[getattr(m, p) for p in ev_zone_type_period_params] + ) + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'ev_charging_bids.tab'), + auto_select=True, + param=m.ev_bid_by_type, + index=m.EV_ZONE_TYPE_BID_TP + ) diff --git a/switch_model/hawaii/fed_subsidies.py b/switch_model/hawaii/fed_subsidies.py index 42f0a3df8..fa323bd39 100644 --- a/switch_model/hawaii/fed_subsidies.py +++ b/switch_model/hawaii/fed_subsidies.py @@ -5,44 +5,52 @@ def define_components(m): """ incorporate the effect of federal subsidies """ - + + # note: wind/solar/geothermal production tax credit expires in 2017-2019, + # so we ignore that (http://programs.dsireusa.org/system/program/detail/734) + # TODO: move these values into data files - wind_energy_source = 'WND' - # approx lifetime average credit, based on 2014$ 0.023/kWh for first 10 years of project - wind_prod_tax_credit = 0.015 * 1000 # $/MWh - solar_energy_source = 'SUN' - solar_invest_tax_credit = 0.3 # fraction of capital cost + itc_rates = { + # DistPV from http://programs.dsireusa.org/system/program/detail/1235 + (2018, 'DistPV'): 0.3, + (2019, 'DistPV'): 0.3, + (2020, 'DistPV'): 0.3, + (2021, 'DistPV'): 0.3, + # Wind, Solar and Geothermal ITC from + # http://programs.dsireusa.org/system/program/detail/658 + (2018, 'CentralTrackingPV'): 0.3, + (2019, 'CentralTrackingPV'): 0.3, + (2020, 'CentralTrackingPV'): 0.26, + (2021, 'CentralTrackingPV'): 0.22, + (2022, 'CentralTrackingPV'): 0.10, + (2018, 'OnshoreWind'): 0.22, + (2019, 'OnshoreWind'): 0.12, + (2018, 'OffshoreWind'): 0.22, + (2019, 'OffshoreWind'): 0.12, + } + itc_rates.update({ + (y, 'CentralTrackingPV'): 0.1 + for y in range(2023, 2051) + }) + itc_rates.update({ # clone the CentralTrackingPV entries + (y, 'CentralFixedPV'): itc_rates[y, 'CentralTrackingPV'] + for y in range(2018, 2051) + }) + itc_rates.update({ + (y, 'Geothermal'): 0.1 + for y in range(2018, 2051) + }) - # note: wind PTC expired at end of 2014; solar expires at end of 2016, - # except for continuing 10% business investment tax credit. - - # note: here we assume that existing projects and new (unbuilt) projects - # are defined separately - m.NEW_GENECTS = Set(initialize=lambda m: set(p for (p, y) in m.NEW_GEN_BLD_YRS)) - - # model the wind production tax credit - m.Wind_Subsidy_Hourly = Expression( - m.TIMEPOINTS, - rule=lambda m, t: -wind_prod_tax_credit * sum( - m.DispatchGen[p, t] - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[wind_energy_source] - if p in m.NEW_GENECTS and (p, t) in m.GEN_TPS + # model the renewable investment tax credit as simply prorating the annual capital cost + m.Federal_Investment_Tax_Credit_Annual = Expression( + m.PERIODS, + rule=lambda m, pe: sum( + -itc_rates[bld_yr, m.gen_tech[g]] + * m.BuildGen[g, bld_yr] + * m.gen_capital_cost_annual[g, bld_yr] + for g in m.NON_FUEL_BASED_GENS + for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, pe] + if (bld_yr, m.gen_tech[g]) in itc_rates ) ) - m.Cost_Components_Per_TP.append('Wind_Subsidy_Hourly') - - # model the solar tax credit as simply prorating the annual capital cost - m.Solar_Credit_Annual = Expression(m.PERIODS, rule=lambda m, pe: - -solar_invest_tax_credit * sum( - m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] - for g in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[solar_energy_source] - if g in m.NEW_GENECTS - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, pe])) - # # another version: - # m.Solar_Credit_Annual = Expression(m.PERIODS, rule=lambda m, pe: - # -solar_invest_tax_credit * sum( - # m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] - # for (g, bld_yr) in m.NEW_GEN_BLD_YRS - # if (pe in m.PERIODS_FOR_GEN_BLD_YR[g, bld_yr] - # and m.gen_energy_source[g] == solar_energy_source))) - m.Cost_Components_Per_Period.append('Solar_Credit_Annual') + m.Cost_Components_Per_Period.append('Federal_Investment_Tax_Credit_Annual') diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index 612c795cd..e23e18c14 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -2,6 +2,13 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf +def define_arguments(argparser): + argparser.add_argument('--hydrogen-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from hydrogen infrastructure (e.g., 'contingency regulation'). " + "Specify 'none' to disable." + ) + def define_components(m): # electrolyzer details @@ -185,22 +192,62 @@ def define_components(m): m.Cost_Components_Per_Period.append('HydrogenFixedCostAnnual') # Register with spinning reserves if it is available - if 'Spinning_Reserve_Up_Provisions' in dir(m): - m.HydrogenSpinningReserveUp = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.HydrogenSlackUp[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Up_Provisions.append('HydrogenSpinningReserveUp') - - m.HydrogenSpinningReserveDown = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.HydrogenSlackDown[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Down_Provisions.append('HydrogenSpinningReserveDown') + if [rt.lower() for rt in m.options.hydrogen_reserve_types] != ['none']: + # Register with spinning reserves + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # calculate available slack from hydrogen equipment + m.HydrogenSlackUpForArea = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: + sum(m.HydrogenSlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + ) + m.HydrogenSlackDownForArea = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: + sum(m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + ) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.HYDROGEN_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.hydrogen_reserve_types + ) + m.HydrogenSpinningReserveUp = Var( + m.HYDROGEN_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + m.HydrogenSpinningReserveDown = Var( + m.HYDROGEN_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_HydrogenSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.HydrogenSpinningReserveUp[rt, ba, tp] + for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES + ) <= m.HydrogenSlackUpForArea[ba, tp] + ) + m.Limit_HydrogenSpinningReserveDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.HydrogenSpinningReserveDown[rt, ba, tp] + for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES + ) <= m.HydrogenSlackDownForArea[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('HydrogenSpinningReserveUp') + m.Spinning_Reserve_Down_Provisions.append('HydrogenSpinningReserveDown') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.hydrogen_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('HydrogenSlackUpForArea') + m.Spinning_Reserve_Down_Provisions.append('HydrogenSlackDownForArea') def load_inputs(mod, switch_data, inputs_dir): diff --git a/switch_model/hawaii/kalaeloa.py b/switch_model/hawaii/kalaeloa.py index 7ea611de4..70f765fdb 100644 --- a/switch_model/hawaii/kalaeloa.py +++ b/switch_model/hawaii/kalaeloa.py @@ -11,8 +11,6 @@ def define_components(m): # commit units 1 & 2, run each between 65 and 90 MW # run both 1 & 2 at 90 MW, and run 3 at 28 MW - more_than_kalaeloa_capacity = 220 # used for big-m constraints on individual units - m.KALAELOA_MAIN_UNITS = Set( initialize=["Oahu_Kalaeloa_CC1", "Oahu_Kalaeloa_CC2", "Kalaeloa_CC1", "Kalaeloa_CC2"], filter=lambda m, g: g in m.GENERATION_PROJECTS @@ -39,13 +37,14 @@ def define_components(m): ) # run kalaeloa at full power or not + # (if linearized, this is the fraction of capacity that is dispatched) m.RunKalaeloaUnitFull = Var(m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, within=Binary) - m.Run_Kalaeloa_Unit_Full_Enforce = Constraint( + m.Run_Kalaeloa_Unit_Full_Enforce = Constraint( # big-m constraint m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, rule=lambda m, g, tp: m.DispatchGen[g, tp] - + (1 - m.RunKalaeloaUnitFull[g, tp]) * more_than_kalaeloa_capacity + + (1 - m.RunKalaeloaUnitFull[g, tp]) * m.gen_capacity_limit_mw[g] >= m.GenCapacityInTP[g, tp] * m.gen_availability[g] ) @@ -56,7 +55,7 @@ def define_components(m): rule=lambda m, g_duct, tp, g_main: m.DispatchGen[g_duct, tp] <= - m.RunKalaeloaUnitFull[g_main, tp] * more_than_kalaeloa_capacity + m.RunKalaeloaUnitFull[g_main, tp] * m.gen_capacity_limit_mw[g_duct] ) # force at least one Kalaeloa unit to run at full power at all times diff --git a/switch_model/hawaii/lake_wilson.py b/switch_model/hawaii/lake_wilson.py new file mode 100644 index 000000000..9f5aab3fc --- /dev/null +++ b/switch_model/hawaii/lake_wilson.py @@ -0,0 +1,26 @@ +""" +Special modeling for Lake Wilson - relax daily energy balance by 10 MW to account +for net inflow. +""" +from pyomo.environ import * + +def define_components(m): + def rule(m): + g = 'Oahu_Lake_Wilson' + inflow = 10.0 + if g in m.GENERATION_PROJECTS: + for t in m.TPS_FOR_GEN[g]: + # assign new energy balance with extra inflow, and allow spilling + m.Track_State_Of_Charge[g, t] = ( + m.StateOfCharge[g, t] + <= + m.StateOfCharge[g, m.tp_previous[t]] + + (m.ChargeStorage[g, t] * m.gen_storage_efficiency[g] + - m.DispatchGen[g, t]) * m.tp_duration_hrs[t] + # allow inflow only if capacity is built + + inflow * m.tp_duration_hrs * m.GenCapacityInTP[g] / m.gen_unit_size[g] + ) + m.Add_Lake_Wilson_Inflow = BuildAction(rule=rule) + +# TODO: don't allow zero crossing when calculating reserves available +# see http://www.ucdenver.edu/faculty-staff/dmays/3414/Documents/Antal-MS-2014.pdf diff --git a/switch_model/hawaii/lng_conversion.py b/switch_model/hawaii/lng_conversion.py index 1146b11f3..c5db0c581 100644 --- a/switch_model/hawaii/lng_conversion.py +++ b/switch_model/hawaii/lng_conversion.py @@ -5,14 +5,14 @@ # TODO: change fuel_markets_expansion to support more complex supply chains, # e.g., a regional facility (LNG switch) in line with a market upgrade (bulk LNG), # and possibly additional upgrades beyond (e.g., adding a second FSRU). -# For now, we include the cost of the LNG switch via ad-hoc constraints +# For now, we include the cost of the LNG switch via ad-hoc constraints from pyomo.environ import * from switch_model.financials import capital_recovery_factor def define_arguments(argparser): - argparser.add_argument('--force-lng-tier', nargs='*', default=None, - help="LNG tier to use or 'none' to use no LNG; can also specify start and end date to use this tier; optimal choices will be made if nothing specified.") + argparser.add_argument('--force-lng-tier', nargs='*', default=None, + help="LNG tier to use: tier [start [stop]] or 'none' to use no LNG. Optimal choices will be made if nothing specified.") def define_components(m): @@ -22,9 +22,9 @@ def define_components(m): # on which path to follow with LNG, if any) # Note: if we activate a tier in any market, we activate it in all markets # (e.g., bringing in containerized LNG for all islands) - + m.LNG_RFM_SUPPLY_TIERS = Set( - initialize=m.RFM_SUPPLY_TIERS, + initialize=m.RFM_SUPPLY_TIERS, filter=lambda m, rfm, per, tier: m.rfm_fuel[rfm].upper() == 'LNG' ) m.LNG_REGIONAL_FUEL_MARKETS = Set( @@ -33,41 +33,41 @@ def define_components(m): m.LNG_TIERS = Set( initialize=lambda m: {tier for rfm, per, tier in m.LNG_RFM_SUPPLY_TIERS} ) - - # force LNG to be deactivated when RPS is 100%; + + # force LNG to be deactivated when RPS is 100%; # this forces recovery of all costs before the 100% RPS takes effect # (otherwise the model sometimes tries to postpone recovery beyond the end of the study) if hasattr(m, 'RPS_Enforce'): - m.No_LNG_In_100_RPS = Constraint(m.LNG_RFM_SUPPLY_TIERS, + m.No_LNG_In_100_RPS = Constraint(m.LNG_RFM_SUPPLY_TIERS, rule=lambda m, rfm, per, tier: - (m.RFMSupplyTierActivate[rfm, per, tier] == 0) - if m.rps_target_for_period[per] >= 1.0 + (m.RFMSupplyTierActivate[rfm, per, tier] == 0) + if m.rps_target_for_period[per] >= 1.0 else Constraint.Skip ) - - # user can study different LNG durations by specifying a tier to activate and + + # user can study different LNG durations by specifying a tier to activate and # a start and end date. Both the capital recovery and fixed costs for this tier are # bundled into the market's fixed cost, which means a different fuel_supply_curves.tab # file is needed for each LNG duration (i.e., the tiers must be forced on or off # for a particular duration which matches the fuel_supply_curves.tab). This is # brittle and requires trying all permutations to find the optimum, which is not - # good. A better way would be to specify capital costs separately from fixed costs, - # and add a flag to force the model to recover capital costs completely within the + # good. A better way would be to specify capital costs separately from fixed costs, + # and add a flag to force the model to recover capital costs completely within the # study period if desired. (Another flag could set a minimum duration for LNG # infrastructure to be activated.) - + # This may mean defining a tier upgrade as a project with a certain capital cost # and fixed O&M. Or maybe for LNG upgrades, we require full recovery during the # online period? i.e., lump the cost on the first day of use? or amortize it over - # all fuel that passes through the project? maybe just allow specification of + # all fuel that passes through the project? maybe just allow specification of # capital cost and project life for LNG upgrades, and allow deactivation (avoiding # fixed O&M) after a certain period of time. Then PSIP module could force longer # activation if needed. - + # In the end, this was resolved by having the user specify multiple tiers with # different lifetimes and corresponding fixed costs per year; then the model # (or user) can choose a tier with a particular lifetime. - + # force use of a particular LNG tier in particular periods def Force_LNG_Tier_rule(m, rfm, per, tier): if m.options.force_lng_tier is None: @@ -104,8 +104,8 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # list of all projects and timepoints when LNG could potentially be used - m.LNG_GENECT_TIMEPOINTS = Set(dimen=2, initialize = lambda m: - ((p, t) for p in m.GENERATION_PROJECTS_BY_FUEL['LNG'] for t in m.TIMEPOINTS + m.LNG_GEN_TIMEPOINTS = Set(dimen=2, initialize = lambda m: + ((p, t) for p in m.GENERATION_PROJECTS_BY_FUEL['LNG'] for t in m.TIMEPOINTS if (p, t) in m.GEN_TPS) ) @@ -119,33 +119,33 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # are included in the LNG supply tiers, so we don't need to worry about that. m.LNG_CONVERTED_PLANTS = Set( initialize=[ - 'Oahu_Kahe_K5', 'Oahu_Kahe_K6', + 'Oahu_Kahe_K5', 'Oahu_Kahe_K6', 'Oahu_Kalaeloa_CC1_CC2', # used in some older models 'Oahu_Kalaeloa_CC1', 'Oahu_Kalaeloa_CC2', 'Oahu_Kalaeloa_CC3', 'Oahu_CC_383', 'Oahu_CC_152', 'Oahu_CT_100' ] ) - m.LNG_In_Converted_Plants_Only = Constraint(m.LNG_GENECT_TIMEPOINTS, + m.LNG_In_Converted_Plants_Only = Constraint(m.LNG_GEN_TIMEPOINTS, rule=lambda m, g, tp: Constraint.Skip if g in m.LNG_CONVERTED_PLANTS else (m.GenFuelUseRate[g, tp, 'LNG'] == 0) ) - + # CODE BELOW IS DISABLED because we have abandoned the 'container' tier which cost - # more than LSFO, and because we would rather show the choice that is made if LNG + # more than LSFO, and because we would rather show the choice that is made if LNG # is more expensive (i.e., stick with LSFO) # NOTE: all the code below works together to force the model to meet an LNG quota - or try as - # hard as possible - if LNG has been activated and the variable cost is higher than LSFO. - # These constraints could potentially be replaced with simpler code that forces the power - # system to meet the LNG quota, but then that could be infeasible if there is not enough + # hard as possible - if LNG has been activated and the variable cost is higher than LSFO. + # These constraints could potentially be replaced with simpler code that forces the power + # system to meet the LNG quota, but then that could be infeasible if there is not enough # LNG-capable generation capacity to meet that quota. - + # # largest amount of LNG that might be consumed per year (should be at least # # equal to the amount that might be activated and left unused, but # # not too much bigger); this is 2 million tons per year * 52 MMBtu/ton # big_market_lng = 2e6 * 52 # MMbtu/year - + # # LNG converted plants must use LNG unless the supply is exhausted # # note: in this formulation, FuelConsumptionInMarket can be low, # # unless LNG_Has_Slack is zero, in which case all available fuel @@ -195,10 +195,10 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # lng_market_exhausted = 1 - m.LNG_Has_Slack[rfm, m.tp_period[tp]] # return (non_lng_fuel <= big_gect_lng * lng_market_exhausted) # m.Only_LNG_In_Converted_Plants = Constraint( - # m.LNG_GENECT_TIMEPOINTS, + # m.LNG_GEN_TIMEPOINTS, # rule=Only_LNG_In_Converted_Plants_rule # ) - + # # If the 'container' tier is forced on, then # # force LNG-capable plants to run at max power, or up to the # # point where they exhaust all active LNG tiers. Combined with the @@ -226,10 +226,10 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # ) # return rule # m.Force_Converted_Plants_On = Constraint( - # m.LNG_GENECT_TIMEPOINTS, + # m.LNG_GEN_TIMEPOINTS, # rule=Force_Converted_Plants_On_rule # ) - + # # force consumption up to the limit if the 'container' tier is activated, # # because this tier sometimes costs more than oil, in which case it will # # be avoided without this rule. (this also models HECO's commitment to LNG in PSIP) diff --git a/switch_model/hawaii/psip.py b/switch_model/hawaii/psip_2016_04.py similarity index 100% rename from switch_model/hawaii/psip.py rename to switch_model/hawaii/psip_2016_04.py diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py new file mode 100644 index 000000000..26d7354ed --- /dev/null +++ b/switch_model/hawaii/psip_2016_12.py @@ -0,0 +1,448 @@ +from __future__ import division +from collections import defaultdict +from textwrap import dedent +import os +from pyomo.environ import * + +def TODO(note): + raise NotImplementedError(dedent(note)) + +def define_arguments(argparser): + argparser.add_argument('--psip-force', action='store_true', default=True, + help="Force following of PSIP plans (retiring AES and building certain technologies).") + argparser.add_argument('--psip-relax', dest='psip_force', action='store_false', + help="Relax PSIP plans, to find a more optimal strategy.") + argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).") + argparser.add_argument('--force-build', nargs=3, default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") + argparser.add_argument('--psip-relax-after', type=float, default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.") + +def is_renewable(tech): + return any(txt in tech for txt in ("PV", "Wind", "Solar")) +def is_battery(tech): + return 'battery' in tech.lower() + +def define_components(m): + ################### + # resource rules to match HECO's 2016-04-01 PSIP + ################## + + # decide whether to enforce the PSIP preferred plan + # if an environment variable is set, that takes precedence + # (e.g., on a cluster to override options.txt) + psip_env_var = os.environ.get('USE_PSIP_PLAN') + if psip_env_var is None: + # no environment variable; use the --psip-relax flag + psip = m.options.psip_force + elif psip_env_var.lower() in ["1", "true", "y", "yes", "on"]: + psip = True + elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: + psip = False + else: + raise ValueError('Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)'.format(psip_env_var)) + + if m.options.verbose: + if psip: + print "Using PSIP construction plan." + else: + print "Relaxing PSIP construction plan." + + # don't allow addition of anything other than those specified here + # force retirement of AES at end of 2022 + + # these plants are all multi-fuel; will automatically convert to biodiesel in 2045: + # CIP CT-1, W9, W10, Airport DSG, Schofield, IC_Barge, IC_MCBH, Kalaeloa + + # no use of LNG + + # force battery installations directly (since they're not currently a standard tech) + + # NOTE: RESOLVE used different wind and solar profiles from SWITCH. + # SWITCH profiles seem to be more accurate, so we optimize against them + # and show that this may give (small) savings vs. the RESOLVE plan. + + # TODO: Should I use Switch to investigate how much of HECO's poor performance is due + # to using bad resource profiles (small onshore wind that doesn't rise in the rankings), + # how much is due to capping PV at 300 MW in 2020, + # how much is due to non-integrality in RESOLVE (fixed by later jimmying by HECO), and + # how much is due to forcing in elements before and after the optimization? + + # NOTE: I briefly moved future DistPV to the existing plants workbook, with the idea that + # we assume the same forecasted adoption occurs with or without the PSIP. That approach + # also spread the DistPV adoption among the top half of tranches, rather than allowing + # Switch to cherry-pick the best tranches. However, that approach was ineffective because + # Switch was still able to add (and did add) DistPV from the lower tranches. That could + # have been fixed up in import_data.py, or the DistPV could have been moved here, into + # technology_targets_definite. However, on further reflection, forcing DistPV installations + # to always match the PSIP forecast seems artificial -- it might be better to do DistPV + # than utility-scale PV, and there's no reason to preclude that in the non-PSIP plans. + # (Although it's probably not worth dwelling long on differences if they arise, since they + # won't make a huge difference in cost.) So now the DistPV is treated as just another optional + # part of the PSIP plan. Note that this allows Switch to cherry-pick among the best DistPV + # tranches to meet the PSIP, but that is a little conservative (favorable to HECO), because + # Switch can also do that for the non-PSIP scenarios. Also, these targets are roughly equal + # to the top half of the DistPV tranches, so there's not much cherry-picking going on anyway. + # This could be resolved by setting (optional) project-specific targets in this module, + # or by making the DistPV tranches coarser (e.g., upper half, third quartile, fourth quartile), + # which seems like a good idea for representing the general precision of DistPV policies + # anyway. + + # TODO (maybe): set project-specific targets, so that DistPV targets can be spread among tranches + # and specific projects in the PSIP can be represented accurately (really just NPM wind). This + # might also allow reconstruction of exactly the same existing or PSIP project when retired + # (as specified in the PSIP). Currently the code below lets Switch choose the best project with the + # same technology when it replaces retired renewable projects. + + # targets for individual generation technologies + # (year, technology, MW added) + # TODO: allow either CentralFixedPV or CentralTrackingPV for utility-scale solar + # (not urgent now, since CentralFixedPV is not currently in the model) + + # Technologies that are definitely being built (at least have permits already.) + # (Note: these have all been moved into the existing plants workbook.) + technology_targets_definite = [] + + # add targets specified on the command line + if m.options.force_build is not None: + b = list(m.options.force_build) + b[0] = int(b[0]) # year + b[2] = float(b[2]) # quantity + b = tuple(b) + print "Forcing build: {}".format(b) + technology_targets_definite.append(b) + + # technologies proposed in PSIP but which may not be built if a better plan is found. + # All from final plan in Table 4-1 of PSIP 2016-12-23 sometimes cross-referenced with PLEXOS inputs. + # These differ somewhat from inputs to RESOLVE or the RESOLVE plans in Table 3-1 and 3-4, but + # they represent HECO's final plan as reported in the PSIP. + technology_targets_psip = [ + # Na Pua Makani (NPM) wind (still awaiting approval as of Feb. 2018) note: this is at a + # specific location (21.668 -157.956), but since it isn't in the existing plants + # workbook, we represent it as a generic technology target. + # note: Resolve modeled 134 MW of planned onshore wind, 30 MW of optional onshore + # and 800 MW of optional offshore; See "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/capacity_limits.tab". + # 'planned' seems to correspond to Na Pua Makani (24), CBRE (10), Kahuku (30), Kawailoka (69); + # Resolve built 273 MW offshore in 2025-45 (including 143 MW rebuilt in 2045), + # and 30 MW onshore in 2045 (tables 3-1 and 3-4). + # Not clear why it picked offshore before onshore (maybe bad resource profiles?). But + # in their final plan (table 4-1), HECO changed it to 200 MW offshore in 2025 + # (presumably rebuilt in 2045) and 30 MW onshore in 2045. + (2018, 'OnshoreWind', 24), # Na Pua Makani (NPM) wind + (2018, 'OnshoreWind', 10), # CBRE wind + # note: 109.6 MW SunEdison replacements are in Existing Plants workbook. + + # note: RESOLVE had 53.6 MW of planned PV, which is probably Waianae (27.6), Kalaeloa (5) + # and West Loch (20). Then it added these (table 3-1): 2020: 300 MW (capped, see "renewable_limits.tab"), + # 2022: 48 MW, 2025: 193 MW, 2040: 541 (incl. 300 MW rebuild), 2045: 1400 MW (incl. 241 MW rebuild). + # HECO converted this to 109.6 MW of replacement SunEdison waiver projects in 2018 + # (we list those as "existing") and other additions shown below. + (2018, 'CentralTrackingPV', 15), # CBRE PV + (2020, 'CentralTrackingPV', 180), + (2022, 'CentralTrackingPV', 40), + (2022, 'IC_Barge', 100.0), # JBPHH plant + # note: we moved IC_MCBH one year earlier than PSIP to reduce infeasibility in 2022 + (2022, 'IC_MCBH', 54.0), + (2025, 'CentralTrackingPV', 200), + (2025, 'OffshoreWind', 200), + (2040, 'CentralTrackingPV', 280), + (2045, 'CentralTrackingPV', 1180), + (2045, 'IC_MCBH', 68.0), # proxy for 68 MW of generic ICE capacity + + # batteries (MW) + # from PSIP 2016-12-23 Table 4-1; also see energy ("capacity") and power files in + # "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Battery" + # (note: we mistakenly treated these as MWh quantities instead of MW before 2018-02-20) + (2019, 'Battery_Conting', 90), + (2022, 'Battery_4', 426), + (2025, 'Battery_4', 29), + (2030, 'Battery_4', 165), + (2035, 'Battery_4', 168), + (2040, 'Battery_4', 420), + (2045, 'Battery_4', 1525), + # RESOLVE modeled 4-hour batteries as being capable of providing reserves, + # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). + # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). + # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not + # from EVs or flexible demand. + # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled + # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? + # (no separate sign of EVs). + # TODO: check Resolve load levels against Switch. + # TODO: maybe I should switch over to using the ABC curves and load profiles that HECO used with PLEXOS + # (for all islands). + # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? + # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. + + # installations based on changes in installed capacity shown in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/planned_installed_capacities.tab + # Also see Figure J-10 of 2016-12-23 PSIP (Vol. 3), which matches these levels (excluding FIT(?)). + # Note: code further below adds in reconstruction of early installations + (2020, "DistPV", 606.3-444), # net of 444 installed as of 2016 (in existing generators workbook) + (2022, "DistPV", 680.3-606.3), + (2025, "DistPV", 744.9-680.3), + (2030, "DistPV", 868.7-744.9), + (2035, "DistPV", 1015.4-868.7), + (2040, "DistPV", 1163.4-1015.4), + (2045, "DistPV", 1307.9-1163.4), + ] + + # Rebuild renewable projects at retirement (20 years), as specified in the PSIP + # note: this doesn't include DistPV, because those are part of a forecast, not a plan, so they already + # get reconstructed in the existing generators workbook, whether or not the PSIP plan is used. + + # note: this behavior is consistent with the following: + # discussion on p. 3-8 of PSIP 2016-12-23 vol. 1. + # Resolve applied planned wind and solar as set levels through 2045, not set additions in each year. + # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Theme 5 + # show optional capacity built in 2020 or 2025 (in list below) continuing in service in 2045. + # and Plexos input files in data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/PSIP Max Capacity.csv + # don't show any retirements of wind and solar included as "planned" in RESOLVE and "existing" in Switch + # (Waivers PV1, West Loch; Kawailoa may be omitted?) + # also note: Plexos input files in XX + # show max battery capacity equal to sum of all prior additions + + # projects from existing plants workbook (pasted in) + existing_techs = [ + (2011, "OnshoreWind", 30), + (2012, "OnshoreWind", 69), + (2012, "CentralTrackingPV", 5), + (2016, "CentralTrackingPV", 27.6), + (2016, "DistPV", 444), + (2018, "IC_Schofield", 54.98316), + (2018, "CentralTrackingPV", 49), + (2018, "CentralTrackingPV", 14.7), + (2018, "CentralTrackingPV", 46), + (2018, "CentralTrackingPV", 20), + ] + existing_techs += technology_targets_definite + existing_techs += technology_targets_psip + # rebuild all renewables at retirement (20 years for RE, 15 years for batteries) + rebuild_targets = [ + (y+20, tech, cap) for y, tech, cap in existing_techs if is_renewable(tech) + ] + [ + (y+15, tech, cap) for y, tech, cap in existing_techs if is_battery(tech) + ] # note: early batteries won't quite need 2 replacements + # don't schedule rebuilding past end of study + rebuild_targets = [t for t in rebuild_targets if t[0] <= 2045] + technology_targets_psip += rebuild_targets + + # make sure LNG is turned off + if psip and getattr(m.options, "force_lng_tier", []) != ["none"]: + raise RuntimeError('You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.') + + if psip: + if m.options.psip_relax_after is not None: + psip_targets = [t for t in technology_targets_psip if t[0] <= m.options.psip_relax_after] + else: + psip_targets = technology_targets_psip + technology_targets = technology_targets_definite + psip_targets + else: + technology_targets = technology_targets_definite + + # make a special list including all standard generation technologies plus "LoadShiftBattery" + m.GEN_TECHS_AND_BATTERIES = Set(initialize=lambda m: [g for g in m.GENERATION_TECHNOLOGIES] + ["LoadShiftBattery"]) + + # make a list of renewable technologies + m.RENEWABLE_TECHNOLOGIES = Set( + initialize=m.GENERATION_TECHNOLOGIES, + filter=lambda m, tech: is_renewable(tech) + ) + + def technology_target_init(m, per, tech): + """Find the amount of each technology that is targeted to be built between the start of the + previous period and the start of the current period.""" + start = 2000 if per == m.PERIODS.first() else m.PERIODS.prev(per) + end = per + target = sum( + mw for (tyear, ttech, mw) in technology_targets + if ttech == tech and start < tyear and tyear <= end + ) + return target + m.technology_target = Param(m.PERIODS, m.GEN_TECHS_AND_BATTERIES, initialize=technology_target_init) + + def MakeGenTechDicts_rule(m): + # get unit sizes of all technologies + unit_sizes = m.gen_tech_unit_size_dict = defaultdict(float) + for g, unit_size in m.gen_unit_size.iteritems(): + tech = m.gen_tech[g] + if tech in unit_sizes: + if unit_sizes[tech] != unit_size: + raise ValueError("Generation technology {} uses different unit sizes for different projects.") + else: + unit_sizes[tech] = unit_size + # get predetermined capacity for all technologies + predet_cap = m.gen_tech_predetermined_cap_dict = defaultdict(float) + for (g, per), cap in m.gen_predetermined_cap.iteritems(): + tech = m.gen_tech[g] + predet_cap[tech, per] += cap + m.MakeGenTechDicts = BuildAction(rule=MakeGenTechDicts_rule) + + # with PSIP: BuildGen is zero except for technology_targets + # (sum during each period or before first period) + # without PSIP: BuildGen is >= definite targets + def Enforce_Technology_Target_rule(m, per, tech): + """Enforce targets for each technology; exact target for PSIP cases, minimum target for non-PSIP.""" + + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = m.technology_target[per, tech] + m.gen_tech_predetermined_cap_dict[tech, per] + + # convert target to closest integral number of units + # (some of the targets are based on nominal unit sizes rather than actual max output) + if m.gen_tech_unit_size_dict[tech] > 0.0: + target = round(target / m.gen_tech_unit_size_dict[tech]) * m.gen_tech_unit_size_dict[tech] + + if tech == "LoadShiftBattery": + # special treatment for batteries, which are not a standard technology + if hasattr(m, 'BuildBattery'): + # note: BuildBattery is in MWh, so we convert to MW + build = sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES) / m.battery_min_discharge_time + else: + build = 0 + else: + build = sum( + m.BuildGen[g, per] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] == tech and (g, per) in m.GEN_BLD_YRS + ) + + if type(build) is int and build == 0: + # no matching projects found + if target == 0: + return Constraint.Skip + else: + print( + "WARNING: target was set for {} in {}, but no matching projects are available. " + "Model will be infeasible.".format(tech, per) + ) + return Constraint.Infeasible + elif psip and per <= m.options.psip_relax_after: + return (build == target) + elif m.options.psip_minimal_renewables and tech in m.RENEWABLE_TECHNOLOGIES: + # only build the specified amount of renewables, no more + return (build == target) + else: + # treat the target as a lower bound + return (build >= target) + m.Enforce_Technology_Target = Constraint( + m.PERIODS, m.GEN_TECHS_AND_BATTERIES, rule=Enforce_Technology_Target_rule + ) + + aes_g = 'Oahu_AES' + aes_size = 180 + aes_bld_year = 1992 + m.AES_OPERABLE_PERIODS = Set(initialize = lambda m: + m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] + ) + m.OperateAES = Var(m.AES_OPERABLE_PERIODS, within=Binary) + m.Enforce_AES_Deactivate = Constraint(m.TIMEPOINTS, rule=lambda m, tp: + Constraint.Skip if (aes_g, tp) not in m.GEN_TPS + else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size) + ) + m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per: + 0.0 if per not in m.AES_OPERABLE_PERIODS + else - m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year] + ) + m.Cost_Components_Per_Period.append('AESDeactivateFixedCost') + + if psip: + # keep AES active until 9/2022; deactivate after that + # note: since a period starts in 2022, we retire before that + m.PSIP_Retire_AES = Constraint(m.AES_OPERABLE_PERIODS, rule=lambda m, per: + (m.OperateAES[per] == 1) if per + m.period_length_years[per] <= 2022 + else (m.OperateAES[per] == 0) + ) + + # before 2040: no biodiesel, and only 100-300 GWh of non-LNG fossil fuels + # period including 2040-2045: <= 300 GWh of oil; unlimited biodiesel or LNG + + # no biodiesel before 2040 (then phased in fast enough to meet the RPS) + m.EARLY_BIODIESEL_MARKETS = Set(dimen=2, initialize=lambda m: [ + (rfm, per) + for per in m.PERIODS if per + m.period_length_years[per] <= 2040 + for rfm in m.REGIONAL_FUEL_MARKETS if m.rfm_fuel == 'Biodiesel' + ]) + m.NoEarlyBiodiesel = Constraint(m.EARLY_BIODIESEL_MARKETS, rule=lambda m, rfm, per: + m.FuelConsumptionInMarket[rfm, per] == 0 + ) + + # # 100-300 GWh of non-LNG fuels in 2021-2040 (based on 2016-04 PSIP fig. 5-5) + # # Note: this is needed because we assume HECO plans to burn LNG in the future + # # even in scenarios where it costs more than oil. + # m.PSIP_HIGH_LNG_PERIODS = Set(initialize=lambda m: + # [per for per in m.PERIODS if per + m.period_length_years[per] > 2021 and per < 2045] + # ) + # m.OilProductionGWhPerYear = Expression(m.PERIODS, rule=lambda m, per: + # sum( + # m.DispatchGenByFuel[g, tp, f] * m.tp_weight_in_year[tp] * 0.001 # convert from MWh to GWh + # for f in ['Diesel', 'LSFO', 'LSFO-Diesel-Blend'] + # for g in m.GENERATION_PROJECTS_BY_FUEL[f] + # for tp in m.TPS_IN_PERIOD[per] if (g, tp) in m.GEN_TPS + # ) + # ) + # m.Upper_Limit_Oil_Power = Constraint(m.PERIODS, rule=lambda m, per: + # (m.OilProductionGWhPerYear[per] <= 300) + # if per + 0.5 * m.period_length_years[per] >= 2021 + # else + # Constraint.Skip + # ) + # # lower limit is in place to roughly reflect HECO's plan + # m.Lower_Limit_Oil_Power = Constraint(m.PERIODS, rule=lambda m, per: + # (m.OilProductionGWhPerYear[per] >= 100) + # if per + m.period_length_years[per] < 2040 # relax constraint if period ends after 2040 + # else + # Constraint.Skip + # ) + + # force LNG conversion in 2021 (modeled on similar constraint in lng_conversion.py) + # This could have extra code to skip the constraint if there are no periods after 2021, + # but it is unlikely ever to be run that way. + # Note: this is not needed if some plants are forced to run on LNG + # NOTE: this is no longer used; use '--force-lng-tier container' instead + # m.PSIP_Force_LNG_Conversion = Constraint(m.LOAD_ZONES, rule=lambda m, z: + # m.ConvertToLNG[ + # z, + # min(per for per in m.PERIODS if per + m.period_length_years[per] > 2021) + # ] == 1 + # ) + + # # Kahe 5, Kahe 6, Kalaeloa and CC_383 only burn LNG after 2021 + # # This is not used because it creates a weird situation where HECO runs less-efficient non-LNG + # # plants instead of more efficient LNG-capable plants on oil. + # # there may be a faster way to build this, but it's not clear what + # m.PSIP_Force_LNG_Use = Constraint(m.GEN_TP_FUELS, rule=lambda m, g, tp, fuel: + # (m.GenFuelUseRate[g, tp, fuel] == 0) + # if g in m.LNG_CONVERTED_PLANTS + # and fuel != 'LNG' + # and m.tp_period[tp] + m.period_length_years[m.tp_period[tp]] > 2021 + # else + # Constraint.Skip + # ) + + # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) + advanced_tech_vars = [ + "BuildPumpedHydroMW", "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", "BuildLiquifierKgPerHour", "BuildLiquidHydrogenTankKg", + "BuildFuelCellMW", + ] + def no_advanced_tech_rule_factory(v): + return lambda m, *k: (getattr(m, v)[k] == 0) + for v in advanced_tech_vars: + try: + var = getattr(m, v) + setattr(m, "PSIP_No_"+v, Constraint(var._index, rule=no_advanced_tech_rule_factory(v))) + except AttributeError: + pass # model doesn't have this var + + # # don't allow any changes to the fuel market, including bulk LNG + # # not used now; use "--force-lng-tier container" instead + # m.PSIP_Deactivate_Limited_RFM_Supply_Tiers = Constraint(m.RFM_SUPPLY_TIERS, + # rule=lambda m, r, p, st: + # Constraint.Skip if (m.rfm_supply_tier_limit[r, p, st] == float('inf')) + # else (m.RFMSupplyTierActivate[r, p, st] == 0) + # ) diff --git a/switch_model/hawaii/register_hi_storage_reserves.py b/switch_model/hawaii/register_hi_storage_reserves.py new file mode 100644 index 000000000..611d20c3a --- /dev/null +++ b/switch_model/hawaii/register_hi_storage_reserves.py @@ -0,0 +1,125 @@ +""" +Defines types of reserve target and components that contribute to reserves, +and enforces the reserve targets. +""" +import os +from pyomo.environ import * + +# TODO: use standard reserves module for this +# note: this is modeled off of hawaii.reserves, to avoid adding lots of +# reserve-related code to the pumped storage and (formerly) hydrogen modules. +# But eventually those modules should use the standard storage module and +# extend that as needed. + +def define_arguments(argparser): + argparser.add_argument('--hawaii-storage-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from " # hydrogen and/or + "pumped-hydro storage " + "(e.g., 'contingency regulation'). " + "Default is generic 'spinning'. Specify 'none' to disable." + ) + +def define_components(m): + + if [rt.lower() for rt in m.options.hawaii_storage_reserve_types] != ['none']: + if hasattr(m, 'PumpedHydroProjGenerateMW'): + m.PumpedStorageCharging = Var(m.PH_GENS, m.TIMEPOINTS, within=Binary) + m.Set_PumpedStorageCharging_Flag = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, phg, tp: + m.PumpedHydroProjGenerateMW[phg, tp] + <= + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) + ) + # choose how much pumped storage reserves to provide each hour, without reversing direction + m.PumpedStorageSpinningUpReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals) + m.Limit_PumpedStorageSpinningUpReserves_When_Charging = Constraint( + m.PH_GENS, m.TIMEPOINTS, + rule=lambda m, phg, tp: + m.PumpedStorageSpinningUpReserves[phg, tp] + <= + m.PumpedHydroProjStoreMW[phg, tp] + + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) # relax when discharging + ) + m.Limit_PumpedStorageSpinningUpReserves_When_Discharging = Constraint( + m.PH_GENS, m.TIMEPOINTS, + rule=lambda m, phg, tp: + m.PumpedStorageSpinningUpReserves[phg, tp] + <= + m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] - m.PumpedHydroProjGenerateMW[phg, tp] + + m.ph_max_capacity_mw[phg] * m.PumpedStorageCharging[phg, tp] # relax when charging + ) + # TODO: implement down reserves + m.PumpedStorageSpinningDownReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals, bounds=(0,0)) + + # Register with spinning reserves + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): # using spinning_reserves_advanced + # calculate available slack from hawaii storage + def up_expr(m, a, tp): + avail = 0.0 + # now handled in hydrogen module: + # if hasattr(m, 'HydrogenSlackUp'): + # avail += sum(m.HydrogenSlackUp[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) + if hasattr(m, 'PumpedStorageSpinningUpReserves'): + avail += sum( + m.PumpedStorageSpinningUpReserves[phg, tp] + for phg in m.PH_GENS + if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] + ) + return avail + m.HawaiiStorageSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=up_expr) + def down_expr(m, a, tp): + avail = 0.0 + # if hasattr(m, 'HydrogenSlackDown'): + # avail += sum(m.HydrogenSlackDown[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) + if hasattr(m, 'PumpedStorageSpinningDownReserves'): + avail += sum( + m.PumpedStorageSpinningDownReserves[phg, tp] + for phg in m.PH_GENS + if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] + ) + return avail + m.HawaiiStorageSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=down_expr) + + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.HI_STORAGE_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.hawaii_storage_reserve_types + ) + m.HawaiiStorageSpinningReserveUp = Var( + m.HI_STORAGE_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + m.HawaiiStorageSpinningReserveDown = Var( + m.HI_STORAGE_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_HawaiiStorageSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.HawaiiStorageSpinningReserveUp[rt, ba, tp] + for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES + ) <= m.HawaiiStorageSlackUp[ba, tp] + ) + m.Limit_HawaiiStorageSpinningReserveDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.HawaiiStorageSpinningReserveDown[rt, ba, tp] + for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES + ) <= m.HawaiiStorageSlackDown[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSpinningReserveUp') + m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSpinningReserveDown') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.hawaii_storage_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSlackUp') + m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSlackDown') + diff --git a/switch_model/hawaii/reserves.py b/switch_model/hawaii/reserves.py index c9b5946d8..d69ecf439 100644 --- a/switch_model/hawaii/reserves.py +++ b/switch_model/hawaii/reserves.py @@ -5,6 +5,19 @@ import os from pyomo.environ import * +# TODO: use standard reserves module for this + +def define_arguments(argparser): + argparser.add_argument('--reserves-from-storage', action='store_true', default=True, + help="Allow storage (batteries and hydrogen) to provide up- and down-reserves.") + argparser.add_argument('--no-reserves-from-storage', dest='reserves_from_storage', + action='store_false', + help="Don't allow storage (batteries and hydrogen) to provide up- and down-reserves.") + argparser.add_argument('--reserves-from-demand-response', action='store_true', default=True, + help="Allow demand response to provide up- and down-reserves.") + argparser.add_argument('--no-reserves-from-demand-response', dest='reserves_from_demand_response', + action='store_false', + help="Don't allow demand response to provide up- and down-reserves.") def define_components(m): """ @@ -17,21 +30,21 @@ def define_components(m): # projects that can provide reserves # TODO: add batteries, hydrogen and pumped storage to this - m.FIRM_GENECTS = Set( + m.FIRM_GENS = Set( initialize=m.GENERATION_PROJECTS, #filter=lambda m, p: m.gen_energy_source[p] not in ['Wind', 'Solar'] ) m.FIRM_GEN_TPS = Set( initialize=m.GEN_TPS, - filter=lambda m, p, tp: p in m.FIRM_GENECTS + filter=lambda m, p, tp: p in m.FIRM_GENS ) - m.CONTINGENCY_GENECTS = Set( + m.CONTINGENCY_GENS = Set( initialize=m.GENERATION_PROJECTS, filter=lambda m, p: p in m.DISCRETELY_SIZED_GENS ) m.CONTINGENCY_GEN_TPS = Set( initialize=m.GEN_TPS, - filter=lambda m, p, tp: p in m.CONTINGENCY_GENECTS + filter=lambda m, p, tp: p in m.CONTINGENCY_GENS ) # Calculate spinning reserve requirements. @@ -59,7 +72,7 @@ def define_components(m): # more conservative values (found by giving 10x weight to times when we provide less reserves than GE): # [1., 1., 1., 0.25760558, 0.18027923, 0.49123101] - m.regulating_reserve_requirement_mw = Expression(m.TIMEPOINTS, rule=lambda m, tp: sum( + m.RegulatingReserveRequirementMW = Expression(m.TIMEPOINTS, rule=lambda m, tp: sum( m.GenCapacity[g, m.tp_period[tp]] * min( m.regulating_reserve_fraction[m.gen_tech[g]] * m.gen_max_capacity_factor[g, tp], @@ -69,18 +82,18 @@ def define_components(m): if m.gen_tech[g] in m.regulating_reserve_fraction and (g, tp) in m.GEN_TPS )) +def define_dynamic_components(m): + # these are defined late, so they can check whether various components have been defined by other modules + # TODO: create a central registry for components that contribute to reserves + # Calculate contingency reserve requirements m.ContingencyReserveUpRequirement = Var(m.TIMEPOINTS, within=NonNegativeReals) # Apply a simple n-1 contingency reserve requirement; # we treat each project as a separate contingency - # Note: we provide reserves for the full committed amount of the project so that + # Note: we provide reserves for the full committed amount of each unit so that # if any of the capacity is being used for regulating reserves, that will be backed # up by contingency reserves. - # TODO: convert this to a big-m constraint with the following elements: - # binary on/off flag for each g, tp in CONTINGENCY_GEN_TPS - # constraint that ProjDispatch[g, tp] <= binary * gen_max_capacity[g] - # constraint that m.ContingencyReserveUpRequirement[tp] >= binary * m.gen_unit_size[g] - # (but this may make the model too slow to solve!) + # note: this uses a binary run/no-run flag, so it only provides one unit's worth of reserves m.CommitGenFlag = Var(m.CONTINGENCY_GEN_TPS, within=Binary) m.Set_CommitGenFlag = Constraint( m.CONTINGENCY_GEN_TPS, @@ -93,71 +106,108 @@ def define_components(m): # m.ContingencyReserveUpRequirement[tp] >= m.CommitGen[g, tp] m.ContingencyReserveUpRequirement[tp] >= m.CommitGenFlag[g, tp] * m.gen_unit_size[g] ) + + m.ContingencyReserveDownRequirement = Var(m.TIMEPOINTS, within=NonNegativeReals) + # For now, we provide down reserves equal to 10% of all loads, including + # baseline load, demand response adjustment, electric vehicles, battery charging + # and hydrogen. It would be possible to split these into centralized and distributed + # loads and allocate separately for them (e.g., contingency reserves exceed + # 10% of total decentralized load and the size of the contingency for each + # centralized load; however, it's not obvious how to set the contingency for + # centralized loads, which are modular and may be divided between several locations. + # So we just assume we could lose 10% of all loads of any type, at any time.) + m.ContingencyReserveDownRequirement_Calculate = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: + m.ContingencyReserveDownRequirement[tp] >= + 0.1 * sum(getattr(m, x)[z, tp] for x in m.Zone_Power_Withdrawals for z in m.LOAD_ZONES) + ) - # Calculate total spinning reserve requirement + # Calculate total spinning reserve requirements m.SpinningReserveUpRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.regulating_reserve_requirement_mw[tp] + m.ContingencyReserveUpRequirement[tp] + m.RegulatingReserveRequirementMW[tp] + m.ContingencyReserveUpRequirement[tp] ) - # require 10% down reserves at all times m.SpinningReserveDownRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: - 0.10 * sum(m.zone_demand_mw[z, tp] for z in m.LOAD_ZONES) + m.ContingencyReserveDownRequirement[tp] ) -def define_dynamic_components(m): - # these are defined late, so they can check whether various components have been defined by other modules - # TODO: create a central registry for components that contribute to reserves # Available reserves - m.SpinningReservesUpAvailable = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.DispatchSlackUp[p, tp] for p in m.FIRM_GENECTS if (p, tp) in m.GEN_TPS) - + ( - sum(m.BatterySlackUp[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'BatterySlackDown') - else 0.0 - ) - + ( - sum(m.HydrogenSlackUp[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'HydrogenSlackUp') - else 0.0 - ) - + ( - sum(m.DemandUpReserves[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'DemandUpReserves') - else 0.0 + def expr(m, tp): + STORAGE_GENS = getattr(m, 'STORAGE_GENS', []) + # all regular generators; omit storage because they'll be added separately if needed + avail = sum( + m.DispatchSlackUp[g, tp] + for g in m.FIRM_GENS + if (g, tp) in m.GEN_TPS and g not in STORAGE_GENS ) - + ( - sum(m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES) - if hasattr(m, 'ShiftDemand') - else 0.0 + if m.options.reserves_from_storage: + # hawaii battery and hydrogen modules + if hasattr(m, 'BatterySlackUp'): + avail += sum(m.BatterySlackUp[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'HydrogenSlackUp'): + avail += sum(m.HydrogenSlackUp[z, tp] for z in m.LOAD_ZONES) + # standard storage module (can stop charging and raise output to max) + avail += sum( + m.DispatchSlackUp[g, tp] + m.ChargeStorage[g, tp] + for g in STORAGE_GENS + if (g, tp) in m.GEN_TPS + ) + if m.options.reserves_from_demand_response: + if hasattr(m, 'DemandUpReserves'): + avail += sum(m.DemandUpReserves[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'ShiftDemand'): + avail += sum(m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES) + if hasattr(m, 'ChargeEVs') and hasattr(m.options, 'ev_timing') and m.options.ev_timing=='optimal': + avail += sum(m.ChargeEVs[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'UnservedUpReserves'): + avail += m.UnservedUpReserves[tp] + # if tp == 2045012604: + # print "inspect avail to see up reserve calculation" + # import pdb; pdb.set_trace() + return avail + m.SpinningReservesUpAvailable = Expression(m.TIMEPOINTS, rule=expr) + def expr(m, tp): + STORAGE_GENS = getattr(m, 'STORAGE_GENS', []) + # all regular generators; omit storage because they'll be added separately if needed + avail = sum( + m.DispatchSlackDown[g, tp] + for g in m.FIRM_GENS + if (g, tp) in m.GEN_TPS and g not in STORAGE_GENS ) - + ( - sum(m.ChargeEVs[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'ChargeEVs') and hasattr(m.options, 'ev_timing') and m.options.ev_timing=='optimal' - else 0.0 - ) - ) - m.SpinningReservesDownAvailable = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.DispatchSlackDown[p, tp] for p in m.FIRM_GENECTS if (p, tp) in m.GEN_TPS) - + ( - sum(m.BatterySlackDown[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'BatterySlackDown') - else 0.0 - ) - + ( - sum(m.HydrogenSlackDown[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'HydrogenSlackDown') - else 0.0 - ) - + ( - sum(m.DemandDownReserves[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'DemandDownReserves') - else 0.0 - ) - # note: we currently ignore down-reserves (option of increasing consumption) - # from EVs and simple demand response, since it's not clear how high they could go - ) + if m.options.reserves_from_storage: + if hasattr(m, 'BatterySlackDown'): + avail += sum(m.BatterySlackDown[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'HydrogenSlackDown'): + avail += sum(m.HydrogenSlackDown[z, tp] for z in m.LOAD_ZONES) + # standard storage module (can stop producing power and raise charging to max) + avail += sum( + m.DispatchSlackDown[g, tp] + + m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] + - m.ChargeStorage[g, tp] + for g in STORAGE_GENS + if (g, tp) in m.GEN_TPS + ) + + if m.options.reserves_from_demand_response: + if hasattr(m, 'DemandDownReserves'): + avail += sum(m.DemandDownReserves[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'ShiftDemand'): + # avail += sum(m.ShiftDemand[z, tp].ub - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES) + avail += sum( + 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] + - m.ShiftDemand[z, tp] + for z in m.LOAD_ZONES + ) + # note: we currently ignore down-reserves (option of increasing consumption) + # from EVs since it's not clear how high they could go; we could revisit this if + # down-reserves have a positive price at equilibrium (probabably won't) + if hasattr(m, 'UnservedDownReserves'): + avail += m.UnservedDownReserves[tp] + return avail + m.SpinningReservesDownAvailable = Expression(m.TIMEPOINTS, rule=expr) - # Meet the reserve requirements + # Meet the reserve requirements (we use zero on RHS to enforce the right sign for the duals) m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp: m.SpinningReservesUpAvailable[tp] - m.SpinningReserveUpRequirement[tp] >= 0 ) @@ -189,7 +239,7 @@ def define_dynamic_components(m): # project reporting types are defined in save_custom_results.py # Note: this assumes timepoints are evenly spaced, and timeseries begin at midnight # m.CYCLING_PLANTS_TIMEPOINTS = Set(dimen=2, initialize=lambda m: [ - # (g, tp) for g in m.REPORTING_TYPE_GENECTS['Cycling'] + # (g, tp) for g in m.REPORTING_TYPE_GENS['Cycling'] # for tp in m.TPS_FOR_GEN[g] # ]) # m.Cycle_Plants = Constraint(m.CYCLING_PLANTS_TIMEPOINTS, rule=lambda m, g, tp: @@ -206,6 +256,6 @@ def define_dynamic_components(m): # switch_data.load_aug( # filename=os.path.join(inputs_dir, 'reserve_requirements.tab'), # auto_select=True, -# param=(m.regulating_reserve_requirement_mw)) +# param=(m.RegulatingReserveRequirementMW)) diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index e06562e6d..7aa54861e 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -5,30 +5,55 @@ from util import get def define_arguments(argparser): - argparser.add_argument('--biofuel-limit', type=float, default=1.0, + argparser.add_argument('--biofuel-limit', type=float, default=1.0, help="Maximum fraction of power that can be obtained from biofuel in any period (default=1.0)") + argparser.add_argument('--biofuel-switch-threshold', type=float, default=1.0, + help="RPS level at which all thermal plants switch to biofuels (0.0-1.0, default=1.0); use with --rps-allocation fuel_switch_at_high_rps") argparser.add_argument('--rps-activate', default='activate', - dest='rps_level', action='store_const', const='activate', + dest='rps_level', action='store_const', const='activate', help="Activate RPS (on by default).") - argparser.add_argument('--rps-deactivate', - dest='rps_level', action='store_const', const='deactivate', - help="Dectivate RPS.") - argparser.add_argument('--rps-no-renewables', - dest='rps_level', action='store_const', const='no_renewables', - help="Deactivate RPS and don't allow any new renewables.") + argparser.add_argument('--rps-deactivate', + dest='rps_level', action='store_const', const='deactivate', + help="Deactivate RPS.") + argparser.add_argument('--rps-no-new-renewables', + dest='rps_level', action='store_const', const='no_new_renewables', + help="Deactivate RPS and don't allow any new renewables except to replace existing capacity.") + argparser.add_argument('--rps-no-new-wind', action='store_true', default=False, + help="Don't allow any new wind capacity except to replace existing capacity.") + argparser.add_argument('--rps-no-wind', action='store_true', default=False, + help="Don't allow any new wind capacity or replacement of existing capacity.") argparser.add_argument( - '--rps-allocation', default=None, + '--rps-allocation', default=None, choices=[ - 'quadratic', - 'fuel_switch_by_period', 'fuel_switch_by_timeseries', - 'full_load_heat_rate', + 'quadratic', + 'fuel_switch_by_period', 'fuel_switch_by_timeseries', + 'full_load_heat_rate', 'split_commit', 'relaxed_split_commit', + 'fuel_switch_at_high_rps', ], help="Method to use to allocate power output among fuels. Default is fuel_switch_by_period for models " + "with unit commitment, full_load_heat_rate for models without." ) - + argparser.add_argument('--rps-targets', nargs='*', default=None, + help="Targets to use for RPS, specified as --rps-targets year1 level1 year2 level2 ..., " + "where years are transition years and levels are fractions between 0 and 1. " + "If not specified, values from rps_targets.tab will be used." + ) + +# TODO: make this work with progressive hedging as follows: +# add a variable indexed over all weather scenarios and all cost scenarios, +# which shows how much of the RPS will be allocated to each scenario. +# Problem: we multiply the RPS target by total generation, so this will become quadratic? +# May instead need to treat the RPS more like a limit on non-renewable production (as a fraction of loads)? +# Designate the allocations as a first-stage variable. +# Require each subproblem to work within its part of the allocation. Also require in each subproblem +# that the allocations across all weather scenarios (within each cost scenario) average out to match the +# actual target (when applying the scenario weights). +# Then PHA will force all the scenarios to agree on how the target is allocated among them. +# Could do the same with hydrogen storage: require average hydrogen stored across all scenarios +# to be less than the size of the storage built. + def define_components(m): """ @@ -36,11 +61,12 @@ def define_components(m): ################### # RPS calculation ################## - + m.f_rps_eligible = Param(m.FUELS, within=Binary) - m.RPS_ENERGY_SOURCES = Set(initialize=lambda m: - list(m.NON_FUEL_ENERGY_SOURCES) + [f for f in m.FUELS if m.f_rps_eligible[f]]) + m.RPS_ENERGY_SOURCES = Set(initialize=lambda m: + [s for s in m.NON_FUEL_ENERGY_SOURCES if s != 'Battery'] + [f for f in m.FUELS if m.f_rps_eligible[f]] + ) m.RPS_YEARS = Set(ordered=True) m.rps_target = Param(m.RPS_YEARS) @@ -56,24 +82,23 @@ def rps_target_for_period_rule(m, p): # m.rps_fuel_limit = Param(default=float("inf"), mutable=True) m.rps_fuel_limit = Param(initialize=m.options.biofuel_limit, mutable=True) - # Define DispatchGenRenewableMW, which shows the amount of power produced + # Define DispatchGenRenewableMW, which shows the amount of power produced # by each project from each fuel during each time step. define_DispatchGenRenewableMW(m) # calculate amount of power produced from renewable fuels during each period m.RPSFuelPower = Expression(m.PERIODS, rule=lambda m, per: sum( - m.DispatchGenRenewableMW[p, tp] * m.tp_weight[tp] - for p in m.FUEL_BASED_GENS - if (p, m.TPS_IN_PERIOD[per].first()) in m.GEN_TPS - for tp in m.TPS_IN_PERIOD[per] + m.DispatchGenRenewableMW[g, tp] * m.tp_weight[tp] + for g in m.FUEL_BASED_GENS + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) - # Note: this rule ignores pumped hydro and batteries, so it could be gamed by producing extra - # RPS-eligible power and burning it off in storage losses; on the other hand, + # Note: this rule ignores pumped hydro and batteries, so it could be gamed by producing extra + # RPS-eligible power and burning it off in storage losses; on the other hand, # it also neglects the (small) contribution from net flow of pumped hydro projects. - # TODO: incorporate pumped hydro into this rule, maybe change the target to refer to + # TODO: incorporate pumped hydro into this rule, maybe change the target to refer to # sum(getattr(m, component)[z, t] for z in m.LOAD_ZONES) for component in m.Zone_Power_Injections) # power production that can be counted toward the RPS each period @@ -81,23 +106,23 @@ def rps_target_for_period_rule(m, p): m.RPSFuelPower[per] + sum( - m.DispatchGen[p, tp] * m.tp_weight[tp] - for f in m.NON_FUEL_ENERGY_SOURCES if f in m.RPS_ENERGY_SOURCES - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[f] - if (p, m.TPS_IN_PERIOD[per].first()) in m.GEN_TPS - for tp in m.TPS_IN_PERIOD[per] + m.DispatchGen[g, tp] * m.tp_weight[tp] + for f in m.NON_FUEL_ENERGY_SOURCES if f in m.RPS_ENERGY_SOURCES + for g in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[f] + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) # total power production each period (against which RPS is measured) + # note: we exclude production from storage m.RPSTotalPower = Expression(m.PERIODS, rule=lambda m, per: sum( - m.DispatchGen[p, tp] * m.tp_weight[tp] - for p in m.GENERATION_PROJECTS if (p, m.TPS_IN_PERIOD[per].first()) in m.GEN_TPS - for tp in m.TPS_IN_PERIOD[per] + m.DispatchGen[g, tp] * m.tp_weight[tp] + for g in m.GENERATION_PROJECTS if g not in getattr(m, 'STORAGE_GENS', []) + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) - + if m.options.rps_level == 'activate': # we completely skip creating the constraint if the RPS is not activated. # this makes it easy for other modules to check whether there's an RPS in effect @@ -106,49 +131,66 @@ def rps_target_for_period_rule(m, p): m.RPS_Enforce = Constraint(m.PERIODS, rule=lambda m, per: m.RPSEligiblePower[per] >= m.rps_target_for_period[per] * m.RPSTotalPower[per] ) - elif m.options.rps_level == 'no_renewables': - # prevent construction of any new exclusively-renewable projects - # (doesn't actually ban use of biofuels in existing or multi-fuel projects, - # but that could be done with --biofuel-limit 0) - m.No_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.BuildGen[g, bld_yr] == 0) - if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES else - Constraint.Skip + elif m.options.rps_level == 'no_new_renewables': + # prevent construction of any new exclusively-renewable projects, but allow + # replacement of existing ones + # (doesn't ban use of biofuels in existing or multi-fuel projects, but that could + # be done with --biofuel-limit 0) + m.No_New_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: + (m.GenCapacity[g, bld_yr] <= m.GenCapacity[g, m.PERIODS.first()] - m.BuildGen[g, m.PERIODS.first()]) + if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES + else Constraint.Skip + ) + + wind_energy_sources = {'WND'} + if m.options.rps_no_new_wind: + # limit wind to existing capacity + m.No_New_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: + (m.GenCapacity[g, bld_yr] <= m.GenCapacity[g, m.PERIODS.first()] - m.BuildGen[g, m.PERIODS.first()]) + if m.gen_energy_source[g] in wind_energy_sources + else Constraint.Skip + ) + if m.options.rps_no_wind: + # don't build any new capacity or replace existing + m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: + (m.BuildGen[g, bld_yr] == 0.0) + if m.gen_energy_source[g] in wind_energy_sources + else Constraint.Skip ) # Don't allow (bio)fuels to provide more than a certain percentage of the system's energy # Note: when the system really wants to use more biofuel, it is possible to "game" this limit by # cycling power through batteries, pumped storage, transmission lines or the hydrogen system to - # burn off some - # extra non-fuel energy, allowing more biofuel into the system. (This doesn't typically happen + # burn off some + # extra non-fuel energy, allowing more biofuel into the system. (This doesn't typically happen # with batteries due to high variable costs -- e.g., it has to cycle 4 kWh through a battery to - # consume 1 kWh of non-biofuel power, to allow 0.05 kWh of additional biofuel into the system. + # consume 1 kWh of non-biofuel power, to allow 0.05 kWh of additional biofuel into the system. # Even if this can save $0.5/kWh, if battery cycling costs $0.15/kWh, that means $0.60 extra to # save $0.025. It also doesn't happen in the hydrogen scenario, since storing intermittent power # directly as hydrogen can directly displace biofuel consumption. But it could happen if batteries # have low efficiency or low cycling cost, or if transmission losses are significant.) # One solution would be to only apply the RPS to the predefined load (not generation), but then # transmission and battery losses could be served by fossil fuels. - # Alternatively: limit fossil fuels to (1-rps) * standard loads + # Alternatively: limit fossil fuels to (1-rps) * standard loads # and limit biofuels to (1-bio)*standard loads. This would force renewables to be used for # all losses, which is slightly inaccurate. # TODO: fix the problem noted above; for now we don't worry too much because there are no # transmission losses, the cycling costs for batteries are too high and pumped storage is only # adopted on a small scale. - + m.RPS_Fuel_Cap = Constraint(m.PERIODS, rule = lambda m, per: m.RPSFuelPower[per] <= m.rps_fuel_limit * m.RPSTotalPower[per] ) def define_DispatchGenRenewableMW(m): - # Define DispatchGenRenewableMW, which shows the amount of power produced + # Define DispatchGenRenewableMW, which shows the amount of power produced # by each project from each fuel during each time step. # This must be linear, because it may be used in RPS calculations. # This can get complex when a project uses multiple fuels and incremental - # heat rate curves. + # heat rate curves. if m.options.rps_allocation is None: if hasattr(m, 'FUEL_USE_SEGMENTS_FOR_GEN'): - # using heat rate curves and possibly startup fuel; + # using heat rate curves and possibly startup fuel; # have to do more advanced allocation of power to fuels m.options.rps_allocation = 'fuel_switch_by_period' else: @@ -156,7 +198,7 @@ def define_DispatchGenRenewableMW(m): m.options.rps_allocation = 'full_load_heat_rate' if m.options.verbose: print "Using {} method to allocate DispatchGenRenewableMW".format(m.options.rps_allocation) - + if m.options.rps_allocation == 'full_load_heat_rate': simple_DispatchGenRenewableMW(m) elif m.options.rps_allocation == 'quadratic': @@ -169,18 +211,19 @@ def define_DispatchGenRenewableMW(m): split_commit_DispatchGenRenewableMW(m) elif m.options.rps_allocation == 'relaxed_split_commit': relaxed_split_commit_DispatchGenRenewableMW(m) - + elif m.options.rps_allocation == 'fuel_switch_at_high_rps': + fuel_switch_at_high_rps_DispatchGenRenewableMW(m) def simple_DispatchGenRenewableMW(m): # Allocate the power produced during each timepoint among the fuels. # When not using heat rate curves, this can be calculated directly from - # fuel usage and the full load heat rate. This also allows use of + # fuel usage and the full load heat rate. This also allows use of # multiple fuels in the same project at the same time. m.DispatchGenRenewableMW = Expression( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, t: sum( - m.GenFuelUseRate[g, t, f] + m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] ) @@ -192,74 +235,74 @@ def split_commit_DispatchGenRenewableMW(m): # This approach requires the utility to designate part of their capacity for # renewable production and part for non-renewable, and show how they commit # and dispatch each part. The current version allows fractional commitment to - # each mode, but we could use integer commitment variables to force full units + # each mode, but we could use integer commitment variables to force full units # into each mode (more physically meaningful, but unnecessarily restrictive and # harder to calculate; the current version may serve as a reasonable accounting # method for multi-fuel projects in a partial-RPS environment). - + # TODO: limit this to projects that can use both renewable and non-renewable fuel - # TODO: force CommitGenRenewable == CommitGen when there's 100% RPS - # TODO: force DispatchGenRenewableMW == DispatchGen when there's 100% RPS + # TODO: force CommitGenRenewable == CommitGen when there's 100% RPS + # TODO: force DispatchGenRenewableMW == DispatchGen when there's 100% RPS # TODO: force CommitGenRenewable == 0 when there's 0% RPS # (these may not be needed: single-category projects will get dispatch forced to zero # in one category and forced up to total dispatch in another; non-renewable capacity # can't get committed in the 100% RPS due to non-zero min loads) - + # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # a portion of every startup and shutdown must be designated as renewable - m.CommitGenRenewable = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.CommitGenRenewable_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.CommitGenRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.CommitGenRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.CommitGenRenewable[g, tp] <= m.CommitGen[g, tp] ) - m.StartupGenCapacityRenewable = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] ) - m.ShutdownGenCapacityRenewable = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.ShutdownGenCapacityRenewable_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.ShutdownGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.ShutdownGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.ShutdownGenCapacityRenewable[g, tp] <= m.ShutdownGenCapacity[g, tp] ) # chain commitments, startup and shutdown for renewables m.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency_Renewable = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.CommitGenRenewable[g, m.tp_previous[tp]] - + m.StartupGenCapacityRenewable[g, tp] - - m.ShutdownGenCapacityRenewable[g, tp] + + m.StartupGenCapacityRenewable[g, tp] + - m.ShutdownGenCapacityRenewable[g, tp] == m.CommitGenRenewable[g, tp] ) # must use committed capacity for renewable production m.Enforce_Dispatch_Upper_Limit_Renewable = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.CommitGenRenewable[g, tp] ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - >= - (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) + >= + (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) * m.gen_min_load_fraction_TP[g, tp] ) # use standard heat rate calculations for renewable and non-renewable parts m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] if f in m.RPS_ENERGY_SOURCES - ) + ) >= m.StartupGenCapacityRenewable[g, tp] * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] + intercept * m.CommitGenRenewable[g, tp] @@ -267,12 +310,12 @@ def split_commit_DispatchGenRenewableMW(m): ) m.ProjNonRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] if f not in m.RPS_ENERGY_SOURCES - ) + ) >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] + intercept * (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) @@ -286,38 +329,52 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # then replaced by m.DispatchGenRenewableMW. # This means all startup fuel can be non-renewable, except when the RPS # is 100%. - + # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) - m.StartupGenCapacityRenewable = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - >= - (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + >= + (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) * m.gen_min_load_fraction_TP[g, tp] ) - + + # rule=lambda m, g, t, intercept, incremental_heat_rate: ( + # sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) >= + # # Do the startup + # m.StartupGenCapacity[g, t] * m.gen_startup_fuel[g] / m.tp_duration_hrs[t] + + # intercept * m.CommitGen[g, t] + + # incremental_heat_rate * m.DispatchGen[g, t])) + + # TODO: fix bug in this code that forces renewable dispatch=total committed when + # using 100% RPS (this makes it hard to get reserves and makes it impossible to + # use the AES plant when using discrete commitment, because the PSIP module limits + # output to 180 MW but the plant is rated 185 MW.) + # use standard heat rate calculations for renewable and non-renewable parts + # These set a lower bound for each type of fuel, as if we committed one slice of capacity + # for renewables and one slice for non-renewable, equal to the amount of power from each. m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] if f in m.RPS_ENERGY_SOURCES - ) + ) >= m.StartupGenCapacityRenewable[g, tp] * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] + intercept * m.DispatchGenRenewableMW[g, tp] @@ -325,15 +382,15 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): ) m.ProjNonRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] if f not in m.RPS_ENERGY_SOURCES - ) + ) >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + + intercept * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + incremental_heat_rate * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) ) @@ -343,12 +400,11 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): m.FULL_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( dimen=3, initialize=lambda m: [ - (g, tp, f) - for per in m.PERIODS if m.rps_target_for_period[per] == 1.0 - for g in m.FUEL_BASED_GENS - if (g, m.TPS_IN_PERIOD[per].first()) in m.GEN_TPS - for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] - for tp in m.TPS_IN_PERIOD[per] + (g, tp, f) + for per in m.PERIODS if m.rps_target_for_period[per] == 1.0 + for g in m.FUEL_BASED_GENS if (g, per) in m.GEN_PERIODS + for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] + for tp in m.TPS_IN_PERIOD[per] ] ) m.No_Fossil_Fuel_With_Full_RPS = Constraint( @@ -357,6 +413,38 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): ) +def fuel_switch_at_high_rps_DispatchGenRenewableMW(m): + """ switch all plants to biofuel (and count toward RPS) if and only if rps is above threshold """ + + if m.options.rps_level == 'activate': + # find all dispatch points for non-renewable fuels during periods with 100% RPS + m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( + dimen=3, + initialize=lambda m: [ + (g, tp, f) + for p in m.PERIODS if m.rps_target_for_period[p] >= m.options.biofuel_switch_threshold + for g in m.FUEL_BASED_GENS if (g, p) in m.GEN_PERIODS + for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] + for tp in m.TPS_IN_PERIOD[p] + ] + ) + m.No_Fossil_Fuel_With_High_RPS = Constraint( + m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS, + rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 + ) + # count full dispatch toward RPS during non-fossil periods, otherwise give no credit + def rule(m, g, tp): + if m.rps_target_for_period[m.tp_period[tp]] >= m.options.biofuel_switch_threshold: + return m.DispatchGen[g, tp] + else: + return 0.0 + m.DispatchGenRenewableMW = Expression(m.FUEL_BASED_GEN_TPS, rule=rule) + else: + m.DispatchGenRenewableMW = Expression( + m.FUEL_BASED_GEN_TPS, within=NonNegativeReals, + rule=lambda m, g, tp: 0.0 + ) + def binary_by_period_DispatchGenRenewableMW(m): # NOTE: this could be extended to handle fuel blends (e.g., 50% biomass/50% coal) # by assigning an RPS eligibility level to each fuel (e.g., 50%), then @@ -367,47 +455,56 @@ def binary_by_period_DispatchGenRenewableMW(m): # This could be further simplified by creating a set of eligibility levels, # and choosing the amount to produce from each eligibility level (similar to the # renewable/non-renewable distinction here, but with a 50% renewable category) - + m.GEN_WITH_FUEL_ACTIVE_PERIODS = Set(dimen=2, initialize=lambda m: { - (g, pe) + (g, pe) for g in m.FUEL_BASED_GENS for pe in m.PERIODS - if (g, m.TPS_IN_PERIOD[pe].first()) in m._FUEL_BASED_GEN_TPS + if (g, m.TPS_IN_PERIOD[pe].first()) in m.FUEL_BASED_GEN_TPS }) - + # choose whether to run (only) on renewable fuels during each period m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_PERIODS, within=Binary) - + # force flag on or off when the RPS is simple (to speed computation) + def rule(m, g, p): + if m.rps_target_for_period[pe]==1.0: + # 100% RPS; use only renewable fuels + return (m.DispatchRenewableFlag[g, pe] == 1) + elif m.rps_target_for_period[pe]==0.0 or m.options.rps_level != 'activate': + # no RPS, don't bother counting renewable fuels + return (m.DispatchRenewableFlag[g, pe] == 0) + else: + return Constraint.Skip m.Force_DispatchRenewableFlag = Constraint( - m.GEN_WITH_FUEL_ACTIVE_PERIODS, + m.GEN_WITH_FUEL_ACTIVE_PERIODS, rule=lambda m, g, pe: - (m.DispatchRenewableFlag[g, pe] == 0) + (m.DispatchRenewableFlag[g, pe] == 0) if (m.rps_target_for_period[pe]==0.0 or m.options.rps_level != 'activate') else ( - (m.DispatchRenewableFlag[g, pe] == 1) + (m.DispatchRenewableFlag[g, pe] == 1) if m.rps_target_for_period[pe]==1.0 else Constraint.Skip ) ) - + # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= + m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, m.tp_period[tp]] * m.gen_capacity_limit_mw[g] ) - + # prevent use of non-renewable fuels during renewable timepoints def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): if m.f_rps_eligible[f]: @@ -419,9 +516,9 @@ def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): # on total output from any power plant (that also clarifies dual analysis) big_fuel = 1.01 * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] return ( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] + m.DispatchRenewableFlag[g, m.tp_period[tp]] * big_fuel - <= + <= big_fuel ) m.Enforce_DispatchRenewableFlag = Constraint( @@ -430,17 +527,17 @@ def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): def binary_by_timeseries_DispatchGenRenewableMW(m): m.GEN_WITH_FUEL_ACTIVE_TIMESERIES = Set(dimen=2, initialize=lambda m: { - (g, ts) + (g, ts) for g in m.FUEL_BASED_GENS for ts in m.TIMESERIES - if (g, m.TPS_IN_TS[ts].first()) in m._FUEL_BASED_GEN_TPS + if (g, m.TPS_IN_TS[ts].first()) in m.FUEL_BASED_GEN_TPS }) - + # choose whether to run (only) on renewable fuels during each period m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, within=Binary) - + # force flag on or off depending on RPS status (to speed computation) m.Force_DispatchRenewableFlag = Constraint( - m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, + m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, rule=lambda m, g, ts: (m.DispatchRenewableFlag[g, ts] == 0) if m.rps_target_for_period[m.ts_period[ts]]==0.0 else ( @@ -448,38 +545,38 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): else Constraint.Skip ) ) - + # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= + m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g] ) - + # prevent use of non-renewable fuels during renewable timepoints m.Enforce_DispatchRenewableFlag = Constraint( - m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: + m.GEN_TP_FUELS, + rule=lambda m, g, tp, f: Constraint.Skip if m.f_rps_eligible[f] else ( # original code, rewritten to get numerical parts on rhs # m.GenFuelUseRate[g, tp, f] # <= # (1-m.DispatchRenewableFlag[g, m.tp_ts[tp]]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] + m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - <= + <= m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] ) ) @@ -488,33 +585,33 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): def advanced2_DispatchGenRenewableMW(m): # choose whether to run (only) on renewable fuels during each timepoint - m.DispatchRenewableFlag = Var(m._FUEL_BASED_GEN_TPS, within=Binary) - + m.DispatchRenewableFlag = Var(m.FUEL_BASED_GEN_TPS, within=Binary) + # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= + m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, tp] * m.gen_capacity_limit_mw[g] ) - + # prevent use of non-renewable fuels during renewable timepoints m.Enforce_DispatchRenewableFlag = Constraint( - m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: + m.GEN_TP_FUELS, + rule=lambda m, g, tp, f: Constraint.Skip if m.f_rps_eligible[f] else ( - m.GenFuelUseRate[g, tp, f] - <= + m.GenFuelUseRate[g, tp, f] + <= (1-m.DispatchRenewableFlag[g, tp]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] ) ) @@ -526,18 +623,18 @@ def advanced1_DispatchGenRenewableMW(m): m.DispatchGenRenewableMW = Var(m.GEN_TP_FUELS, within=NonNegativeReals) # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == m.DispatchGen[g, tp] ) - + # choose a single fuel to use during each timestep m.DispatchFuelFlag = Var(m.GEN_TP_FUELS, within=Binary) m.DispatchFuelFlag_Total = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: sum(m.DispatchFuelFlag[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == 1 @@ -545,26 +642,26 @@ def advanced1_DispatchGenRenewableMW(m): # consume only the selected fuel and allocate all production to that fuel (big-M constraints) m.Allocate_Dispatch_Output = Constraint( - m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - m.DispatchGenRenewableMW[g, tp, f] - <= + m.GEN_TP_FUELS, + rule=lambda m, g, tp, f: + m.DispatchGenRenewableMW[g, tp, f] + <= m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g] ) m.Allocate_Dispatch_Fuel = Constraint( - m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - m.GenFuelUseRate[g, tp, f] - <= + m.GEN_TP_FUELS, + rule=lambda m, g, tp, f: + m.GenFuelUseRate[g, tp, f] + <= m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] ) - - # note: in cases where a project has a single fuel, the presolver should force + + # note: in cases where a project has a single fuel, the presolver should force # DispatchGenRenewableMW for that fuel to match DispatchGen, and possibly # eliminate the allocation constraints - + # possible simplifications: - # omit binary variables and big-m constraints if len(m.FUELS_FOR_GEN[p]) == 1 + # omit binary variables and big-m constraints if len(m.FUELS_FOR_GEN[p]) == 1 # (assign all production to the single fuel) # use m.GenFuelUseRate[g, t, f] / m.gen_full_load_heat_rate[g] # for gects with no heat rate curve and no startup fuel @@ -572,12 +669,12 @@ def advanced1_DispatchGenRenewableMW(m): # note: a continuous, quadratic version of this function can be created as follows: # - make DispatchFuelFlag a PercentFraction instead of Binary # - replace gen_capacity_limit_mw with GenCapacity in Allocate_Dispatch_Output - # - replace m.gen_capacity_limit_mw * m.gen_full_load_heat_rate with + # - replace m.gen_capacity_limit_mw * m.gen_full_load_heat_rate with # sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) # in Allocate_Dispatch_Fuel (define this as an Expression in dispatch.py) # - replace <= with == in the allocation constraints # - drop the DispatchGenRenewableMW_Total constraint - + # or this would also work: # m.DispatchGenRenewableMW = Var(m.GEN_TP_FUELS) # m.DispatchGenRenewableMW_Allocate = Constraint( @@ -592,31 +689,31 @@ def advanced1_DispatchGenRenewableMW(m): def quadratic_DispatchGenRenewableMW(m): # choose how much power to obtain from renewables during each timepoint - m.DispatchRenewableFraction = Var(m._FUEL_BASED_GEN_TPS, within=PercentFraction) - + m.DispatchRenewableFraction = Var(m.FUEL_BASED_GEN_TPS, within=PercentFraction) + # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + # don't overcount renewable power production m.Set_DispatchRenewableFraction = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= + m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFraction[g, tp] * m.DispatchGen[g, tp] ) m.Enforce_DispatchRenewableFraction = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] ) >= m.DispatchRenewableFraction[g, tp] * sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] ) ) @@ -627,13 +724,13 @@ def quadratic1_DispatchGenRenewableMW(m): # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( - m._FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == m.DispatchGen[g, tp] ) - + m.DispatchGenRenewableMW_Allocate = Constraint( m.GEN_TP_FUELS, rule = lambda m, g, t, f: @@ -650,10 +747,16 @@ def load_inputs(m, switch_data, inputs_dir): filename=os.path.join(inputs_dir, 'fuels.tab'), select=('fuel', 'rps_eligible'), param=(m.f_rps_eligible,)) - switch_data.load_aug( - optional=True, - filename=os.path.join(inputs_dir, 'rps_targets.tab'), - autoselect=True, - index=m.RPS_YEARS, - param=(m.rps_target,)) - + if m.options.rps_targets is None: + switch_data.load_aug( + optional=True, + filename=os.path.join(inputs_dir, 'rps_targets.tab'), + autoselect=True, + index=m.RPS_YEARS, + param=(m.rps_target,)) + else: + # construct data from a target specified as 'year1 level1 year2 level2 ...' + iterator = iter(m.options.rps_targets) + rps_targets = {int(year): float(target) for year, target in zip(iterator, iterator)} + switch_data.data()['RPS_YEARS'] = {None: sorted(rps_targets.keys())} + switch_data.data()['rps_target'] = rps_targets diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index e3d7a5be5..60645be82 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -22,6 +22,7 @@ import os import switch_model.hawaii.util as util import switch_model.financials as financials +from collections import defaultdict from pyomo.environ import * def define_components(m): @@ -46,6 +47,16 @@ def summary_values(m): demand_components = [c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs') if hasattr(m, c)] values = [] + # Cache SystemCostPerPeriod and SystemCost to speed up saving large models + # The time needed to directly access the expressions seems to rise quadratically + # with the number of timepoints, so it gets very slow for big models and we don't + # want to repeat it if possible (e.g., without caching, this function takes up + # to an hour for an 8760 Oahu model) + SystemCostPerPeriod = dict() + for p in m.PERIODS: + SystemCostPerPeriod[p] = value(m.SystemCostPerPeriod[p]) + SystemCost = sum(SystemCostPerPeriod[p] for p in m.PERIODS) + # scenario name and looping variables values.extend([ str(m.options.scenario_name), @@ -53,23 +64,23 @@ def summary_values(m): ]) # total cost (all periods) - values.append(m.SystemCost) + values.append(SystemCost) # m.SystemCost) # NPV of total cost / NPV of kWh generated (equivalent to spreading # all costs uniformly over all generation) values.append( - m.SystemCost + SystemCost # m.SystemCost / sum( m.bring_timepoint_costs_to_base_year[t] * 1000.0 * sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) - for t in m.TIMEPOINTS + for t in m.TIMEPOINTS ) ) # total cost / kWh generated in each period # (both discounted to today, so the discounting cancels out) values.extend([ - m.SystemCostPerPeriod[p] + SystemCostPerPeriod[p] # m.SystemCostPerPeriod[p] / sum( m.bring_timepoint_costs_to_base_year[t] * 1000.0 * sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) @@ -114,8 +125,13 @@ def DispatchGenByFuel(m, g, tp, fuel): project.no_commit, not project.unitcommit.fuel_use. In the unit commitment version it can only be defined as a quadratically constrained variable, which we don't want to force on all users.""" - dispatch = value(m.DispatchGen[g, tp]) if (g, tp) in m.DispatchGen else 0.0 - total_fuel = value(sum(m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g])) + if (g, tp) in m.DispatchGen: + dispatch = value(m.DispatchGen[g, tp]) + total_fuel = value(sum(m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g])) + else: + dispatch = 0.0 + total_fuel = 0.0 + if dispatch == 0.0: result = 0.0 elif total_fuel == 0.0: @@ -138,6 +154,25 @@ def write_results(m, outputs_dir): values=lambda m: summary_values(m) ) + if hasattr(m, 'Spinning_Reserve_Up_Requirements'): + # pre-calculate amount of reserves provided and needed for each balancing area and timepoint + spinning_reserve_provisions = defaultdict(float) + spinning_reserve_requirements = defaultdict(float) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + for component in m.Spinning_Reserve_Up_Provisions: + for (rt, ba, tp), val in getattr(m, component).items(): + spinning_reserve_provisions[ba, tp] += val + for component in m.Spinning_Reserve_Up_Requirements: + for (rt, ba, tp), val in getattr(m, component).items(): + spinning_reserve_requirements[ba, tp] += val + else: # basic module + for component in m.Spinning_Reserve_Up_Provisions: + for (ba, tp), val in getattr(m, component).items(): + spinning_reserve_provisions[ba, tp] += val + for component in m.Spinning_Reserve_Up_Requirements: + for (ba, tp), val in getattr(m, component).items(): + spinning_reserve_requirements[ba, tp] += val + # # write out results # util.write_table(m, m.TIMEPOINTS, # output_file=os.path.join(outputs_dir, "dispatch{t}.tsv".format(t=tag)), @@ -158,34 +193,115 @@ def write_results(m, outputs_dir): +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) +tuple(m.Zone_Power_Injections) +tuple(m.Zone_Power_Withdrawals) - +("marginal_cost","peak_day"), + +("spinning_reserve_provision", "spinning_reserve_requirement") + +("marginal_cost", "peak_day"), values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( sum( DispatchGenByFuel(m, p, t, f) - for p in m.GENERATION_PROJECTS_BY_FUEL[f] if (p, t) in m.GEN_TPS + for p in m.GENERATION_PROJECTS_BY_FUEL[f] + if (p, t) in m.GEN_TPS and m.gen_load_zone[p] == z ) for f in m.FUELS ) +tuple( - sum(util.get(m.DispatchGen, (p, t), 0.0) for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]) + sum( + util.get(m.DispatchGen, (p, t), 0.0) + for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[p] == z + ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( util.get(m.DispatchUpperLimit, (p, t), 0.0) - util.get(m.DispatchGen, (p, t), 0.0) for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[p] == z ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + +( # save spinning reserve requirements and provisions; note: this assumes one zone per balancing area + (spinning_reserve_provisions[m.zone_balancing_area[z], t], spinning_reserve_requirements[m.zone_balancing_area[z], t]) + if hasattr(m, 'Spinning_Reserve_Up_Requirements') + else (0.0, 0.0) + ) +(util.get(m.dual, m.Zone_Energy_Balance[z, t], 0.0)/m.bring_timepoint_costs_to_base_year[t], # note: this uses 0.0 if no dual available, i.e., with glpk solver 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical') ) - + + if hasattr(m, 'Spinning_Reserve_Up_Requirements') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + # write the reserve values + util.write_table( + m, m.BALANCING_AREAS, m.TIMEPOINTS, + output_file=os.path.join(outputs_dir, "up_reserve_sources{t}.tsv".format(t=tag)), + headings= + ("balancing_area", "period", "timepoint_label") + +tuple(m.FUELS) + +tuple(m.NON_FUEL_ENERGY_SOURCES) + +tuple(m.Spinning_Reserve_Up_Provisions) + +tuple(m.Spinning_Reserve_Up_Requirements) + +tuple("marginal_cost_"+rt for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) + +("peak_day",), + values=lambda m, ba, t: + (ba, m.tp_period[t], m.tp_timestamp[t]) + +tuple( + ( + sum( + # total reserve production + sum( + m.CommitGenSpinningReservesUp[rt, p, t] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] + ) + # prorated by energy source used + * DispatchGenByFuel(m, p, t, f) / m.DispatchGen[p, t] + for p in m.GENERATION_PROJECTS_BY_FUEL[f] + if (p, t) in m.GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba + ) + ) + for f in m.FUELS + ) + +tuple( + sum( + m.CommitGenSpinningReservesUp[rt, p, t] + for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + if (p, t) in m.SPINNING_RESERVE_CAPABLE_GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] + ) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + +tuple( + sum(util.get(getattr(m, component), (rt, ba, t), 0.0) for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) + for component in m.Spinning_Reserve_Up_Provisions + ) + +tuple( + sum(util.get(getattr(m, component), (rt, ba, t), 0.0) for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) + for component in m.Spinning_Reserve_Up_Requirements + ) + +tuple( + util.get( + m.dual, + util.get(m.Satisfy_Spinning_Reserve_Up_Requirement, (rt, ba, t), None), + 0.0 # note: this uses 0.0 if no dual available, i.e., with glpk solver + ) / m.bring_timepoint_costs_to_base_year[t] + for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS + ) + +(('peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical'),) + ) + + sorted_projects = tuple(sorted(g for g in m.GENERATION_PROJECTS)) + util.write_table( + m, m.TIMEPOINTS, + output_file=os.path.join(outputs_dir, "gen_dispatch{t}.tsv".format(t=tag)), + headings=("period", "timepoint_label")+sorted_projects, + values=lambda m, t: + (m.tp_period[t], m.tp_timestamp[t]) + + tuple(util.get(m.DispatchGen, (p, t), 0.0) for p in sorted_projects) + ) + # installed capacity information def gen_energy_source(g): return ( @@ -196,15 +312,25 @@ def gen_energy_source(g): built_gens = tuple(sorted(set( g for pe in m.PERIODS for g in m.GENERATION_PROJECTS if value(m.GenCapacity[g, pe]) > 0.001 ))) - operate_gen_in_period = tuple(set( - (g, m.tp_period[tp]) - for g, tp in m.GEN_TPS if value(m.DispatchGen[g, tp]) > 0.001 - )) - built_tech = tuple(set(m.gen_tech[g] for g in built_gens)) - built_energy_source = tuple(set(gen_energy_source(g) for g in built_gens)) + active_periods_for_gen = defaultdict(set) + used_cap = getattr(m, 'CommitGen', m.DispatchGen) # use CommitGen if available, otherwise DispatchGen + for (g, tp) in m.GEN_TPS: + if value(used_cap[g, tp]) > 0.001: + active_periods_for_gen[g].add(m.tp_period[tp]) + # add the periods between the first and last active period if capacity was available then + operate_gen_in_period = set() + for g, active_periods in active_periods_for_gen.items(): + start = min(active_periods) + end = max(active_periods) + for p in m.PERIODS: + if start <= p <= end and value(m.GenCapacity[g, p]) > 0: + operate_gen_in_period.add((g, p)) + + built_tech = tuple(sorted(set(m.gen_tech[g] for g in built_gens))) + built_energy_source = tuple(sorted(set(gen_energy_source(g) for g in built_gens))) battery_capacity_mw = lambda m, z, pe: ( - (m.Battery_Capacity[z, pe] * m.battery_max_discharge / m.battery_min_discharge_time) + (m.Battery_Capacity[z, pe] / m.battery_min_discharge_time) if hasattr(m, "Battery_Capacity") else 0.0 ) @@ -241,129 +367,142 @@ def gen_energy_source(g): ) ) - def cost_breakdown_details(m, z, pe): - values = [z, pe] - # capacity built, conventional plants - - values += [ - sum( - m.BuildGen[g, pe] - for g in built_gens - if m.gen_tech[g] == t and m.gen_load_zone[g] == z and (g, pe) in m.BuildGen - ) - for t in built_tech - ] - # capacity built, batteries, MW and MWh - if hasattr(m, "BuildBattery"): - values.extend([ - m.BuildBattery[z, pe]/m.battery_min_discharge_time, - m.BuildBattery[z, pe] - ]) - else: - values.extend([0.0, 0.0]) - # capacity built, hydro - values.append( - sum( - m.BuildPumpedHydroMW[g, pe] - for g in m.PH_GENS if m.ph_load_zone[g]==z - ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, - ) - # capacity built, hydrogen - if hasattr(m, "BuildElectrolyzerMW"): - values.extend([ - m.BuildElectrolyzerMW[z, pe], - m.BuildLiquifierKgPerHour[z, pe], - m.BuildLiquidHydrogenTankKg[z, pe], - m.BuildFuelCellMW[z, pe] - ]) - else: - values.extend([0.0, 0.0, 0.0, 0.0]) - - # number of EVs and conventional vehicles - if hasattr(m, 'ev_share'): - values.append(m.n_all_vehicles[z, pe] * m.ev_share[z, pe]) - values.append(m.n_all_vehicles[z, pe] * (1.0 - m.ev_share[z, pe])) - # import pdb; pdb.set_trace() - - # capital investments - # regular projects - values += [ + util.write_table(m, m.LOAD_ZONES, m.PERIODS, + output_file=os.path.join(outputs_dir, "production_by_technology{t}.tsv".format(t=tag)), + headings=("load_zone", "period") + built_tech, + values=lambda m, z, pe: (z, pe,) + tuple( sum( - m.BuildGen[g, pe] * (m.gen_overnight_cost[g, pe] + m.gen_connect_cost_per_mw[g]) - for g in built_gens - if m.gen_tech[g] == t and m.gen_load_zone[g] == z \ - and (g, pe) in m.GEN_BLD_YRS + m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in built_gens if m.gen_tech[g] == t and m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] ) for t in built_tech - ] - # batteries - if hasattr(m, 'battery_capital_cost_per_mwh_capacity'): - # models with single capital cost (defunct) - values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity) - elif hasattr(m, 'battery_capital_cost_per_mwh_capacity_by_year'): - values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity_by_year[pe]) - else: - values.append(0.0) - # hydro - values.append( - sum( - m.BuildPumpedHydroMW[g, pe] * m.ph_capital_cost_per_mw[g] - for g in m.PH_GENS if m.ph_load_zone[g]==z - ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, - ) - # hydrogen - if hasattr(m, "BuildElectrolyzerMW"): - values.extend([ - m.BuildElectrolyzerMW[z, pe] * m.hydrogen_electrolyzer_capital_cost_per_mw, - m.BuildLiquifierKgPerHour[z, pe] * m.hydrogen_liquifier_capital_cost_per_kg_per_hour, - m.BuildLiquidHydrogenTankKg[z, pe] * m.liquid_hydrogen_tank_capital_cost_per_kg, - m.BuildFuelCellMW[z, pe] * m.hydrogen_fuel_cell_capital_cost_per_mw - ]) - else: - values.extend([0.0, 0.0, 0.0, 0.0]) - - # _annual_ fuel expenditures - if hasattr(m, "REGIONAL_FUEL_MARKETS"): - values.extend([ - sum(m.ConsumeFuelTier[rfm_st] * m.rfm_supply_tier_cost[rfm_st] for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, pe]) - for rfm in m.REGIONAL_FUEL_MARKETS - ]) - # costs to expand fuel markets (this could later be disaggregated by market and tier) - if hasattr(m, "RFM_Fixed_Costs_Annual"): - values.append(m.RFM_Fixed_Costs_Annual[pe]) - # TODO: add similar code for fuel_costs module instead of fuel_markets module - - # total cost per period - values.append(annualize_present_value_period_cost(m, pe, m.SystemCostPerPeriod[pe])) - - # total cost per year for transport - if hasattr(m, "ev_extra_annual_cost"): - values.append(m.ev_extra_annual_cost[pe]) - values.append(m.ice_annual_fuel_cost[pe]) - - return values - - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "cost_breakdown{t}.tsv".format(t=tag)), - headings=("load_zone", "period") + tuple(t+"_mw_added" for t in built_tech) - + ("batteries_mw_added", "batteries_mwh_added", "hydro_mw_added") - + ( "h2_electrolyzer_mw_added", "h2_liquifier_kg_per_hour_added", - "liquid_h2_tank_kg_added", "fuel_cell_mw_added") - + (('ev_count', 'ice_count') if hasattr(m, 'ev_share') else ()) - + tuple(t+"_overnight_cost" for t in built_tech) - + ("batteries_overnight_cost", "hydro_overnight_cost") - + ( "h2_electrolyzer_overnight_cost", "h2_liquifier_overnight_cost", - "liquid_h2_tank_overnight_cost", "fuel_cell_overnight_cost") - + (tuple(rfm+"_annual_cost" for rfm in m.REGIONAL_FUEL_MARKETS) - if hasattr(m, "REGIONAL_FUEL_MARKETS") else ()) - + (("fuel_market_expansion_annual_cost",) - if hasattr(m, "RFM_Fixed_Costs_Annual") else ()) - + ('total_electricity_cost',) - + (('ev_extra_capital_recovery',) - if hasattr(m, 'ev_extra_annual_cost') else ()) - + (('ice_annual_fuel_cost',) if hasattr(m, 'ice_annual_fuel_cost') else ()), - values=cost_breakdown_details + ) # TODO: add hydro and hydrogen ) + + # def cost_breakdown_details(m, z, pe): + # values = [z, pe] + # # capacity built, conventional plants + # + # values += [ + # sum( + # m.BuildGen[g, pe] + # for g in built_gens + # if m.gen_tech[g] == t and m.gen_load_zone[g] == z and (g, pe) in m.BuildGen + # ) + # for t in built_tech + # ] + # # capacity built, batteries, MW and MWh + # if hasattr(m, "BuildBattery"): + # values.extend([ + # m.BuildBattery[z, pe]/m.battery_min_discharge_time, + # m.BuildBattery[z, pe] + # ]) + # else: + # values.extend([0.0, 0.0]) + # # capacity built, hydro + # values.append( + # sum( + # m.BuildPumpedHydroMW[g, pe] + # for g in m.PH_GENS if m.ph_load_zone[g]==z + # ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, + # ) + # # capacity built, hydrogen + # if hasattr(m, "BuildElectrolyzerMW"): + # values.extend([ + # m.BuildElectrolyzerMW[z, pe], + # m.BuildLiquifierKgPerHour[z, pe], + # m.BuildLiquidHydrogenTankKg[z, pe], + # m.BuildFuelCellMW[z, pe] + # ]) + # else: + # values.extend([0.0, 0.0, 0.0, 0.0]) + # + # # number of EVs and conventional vehicles + # if hasattr(m, 'ev_share'): + # values.append(m.n_all_vehicles[z, pe] * m.ev_share[z, pe]) + # values.append(m.n_all_vehicles[z, pe] * (1.0 - m.ev_share[z, pe])) + # # import pdb; pdb.set_trace() + # + # # capital investments + # # regular projects + # values += [ + # sum( + # m.BuildGen[g, pe] * (m.gen_overnight_cost[g, pe] + m.gen_connect_cost_per_mw[g]) + # for g in built_gens + # if m.gen_tech[g] == t and m.gen_load_zone[g] == z \ + # and (g, pe) in m.GEN_BLD_YRS + # ) + # for t in built_tech + # ] + # # batteries + # if hasattr(m, 'battery_capital_cost_per_mwh_capacity'): + # # models with single capital cost (defunct) + # values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity) + # elif hasattr(m, 'battery_capital_cost_per_mwh_capacity_by_year'): + # values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity_by_year[pe]) + # else: + # values.append(0.0) + # # hydro + # values.append( + # sum( + # m.BuildPumpedHydroMW[g, pe] * m.ph_capital_cost_per_mw[g] + # for g in m.PH_GENS if m.ph_load_zone[g]==z + # ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, + # ) + # # hydrogen + # if hasattr(m, "BuildElectrolyzerMW"): + # values.extend([ + # m.BuildElectrolyzerMW[z, pe] * m.hydrogen_electrolyzer_capital_cost_per_mw, + # m.BuildLiquifierKgPerHour[z, pe] * m.hydrogen_liquifier_capital_cost_per_kg_per_hour, + # m.BuildLiquidHydrogenTankKg[z, pe] * m.liquid_hydrogen_tank_capital_cost_per_kg, + # m.BuildFuelCellMW[z, pe] * m.hydrogen_fuel_cell_capital_cost_per_mw + # ]) + # else: + # values.extend([0.0, 0.0, 0.0, 0.0]) + # + # # _annual_ fuel expenditures + # if hasattr(m, "REGIONAL_FUEL_MARKETS"): + # values.extend([ + # sum(m.ConsumeFuelTier[rfm_st] * m.rfm_supply_tier_cost[rfm_st] for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, pe]) + # for rfm in m.REGIONAL_FUEL_MARKETS + # ]) + # # costs to expand fuel markets (this could later be disaggregated by market and tier) + # if hasattr(m, "RFM_Fixed_Costs_Annual"): + # values.append(m.RFM_Fixed_Costs_Annual[pe]) + # # TODO: add similar code for fuel_costs module instead of fuel_markets module + # + # # total cost per period + # values.append(annualize_present_value_period_cost(m, pe, m.SystemCostPerPeriod[pe])) + # + # # total cost per year for transport + # if hasattr(m, "ev_extra_annual_cost"): + # values.append(m.ev_extra_annual_cost[pe]) + # values.append(m.ice_annual_fuel_cost[pe]) + # + # return values + # + # util.write_table(m, m.LOAD_ZONES, m.PERIODS, + # output_file=os.path.join(outputs_dir, "cost_breakdown{t}.tsv".format(t=tag)), + # headings=("load_zone", "period") + tuple(t+"_mw_added" for t in built_tech) + # + ("batteries_mw_added", "batteries_mwh_added", "hydro_mw_added") + # + ( "h2_electrolyzer_mw_added", "h2_liquifier_kg_per_hour_added", + # "liquid_h2_tank_kg_added", "fuel_cell_mw_added") + # + (('ev_count', 'ice_count') if hasattr(m, 'ev_share') else ()) + # + tuple(t+"_overnight_cost" for t in built_tech) + # + ("batteries_overnight_cost", "hydro_overnight_cost") + # + ( "h2_electrolyzer_overnight_cost", "h2_liquifier_overnight_cost", + # "liquid_h2_tank_overnight_cost", "fuel_cell_overnight_cost") + # + (tuple(rfm+"_annual_cost" for rfm in m.REGIONAL_FUEL_MARKETS) + # if hasattr(m, "REGIONAL_FUEL_MARKETS") else ()) + # + (("fuel_market_expansion_annual_cost",) + # if hasattr(m, "RFM_Fixed_Costs_Annual") else ()) + # + ('total_electricity_cost',) + # + (('ev_extra_capital_recovery',) + # if hasattr(m, 'ev_extra_annual_cost') else ()) + # + (('ice_annual_fuel_cost',) if hasattr(m, 'ice_annual_fuel_cost') else ()), + # values=cost_breakdown_details + # ) # util.write_table(m, m.PERIODS, # output_file=os.path.join(outputs_dir, "capacity{t}.tsv".format(t=t)), diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index cd9362aa0..e2b80fbc2 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -1,12 +1,38 @@ +# TODO: make this get data from the redr server via an HTTP api instead of psycopg2, as follows: + +# create a .rpy script on the redr server that can accept form data (the args dict) via POST +# and then return a .zip file containing all the files created by write_tables (most of the +# code in this module would go into that script). This can create the files as text blobs and +# then collect them into a single .zip file using the zip module +# Info on zipping multiple files together in memory: https://stackoverflow.com/a/25194850/3830997 +# See here for info on .rpy files: +# https://twistedmatrix.com/documents/15.0.0/web/howto/using-twistedweb.html#web-howto-using-twistedweb-rpys +# See here for info on receiving POST requests: +# https://www.saltycrane.com/blog/2010/08/twisted-web-post-example-json/ + +# client side will then just send a POST request with the args dictionary (probably using the +# requests module), receive back a zip file with all the relevant CSVs (probably with a whole +# relative directory structure). Client may also need to convert line endings (or unzip may do +# it automatically). +# See here for info on sending a Python dict as the body in a +# POST request: https://stackoverflow.com/a/14804320/3830997 +# https://stackoverflow.com/questions/15694120/why-does-http-post-request-body-need-to-be-json-enconded-in-python +# https://stackoverflow.com/questions/35212279/python-request-post-not-accepting-dictionary +# (the latter two are interesting edge cases but may be doing it wrong) +# Unzipping files in Python: https://stackoverflow.com/questions/3451111/unzipping-files-in-python +# some random info on converting line endings with Python zip/unzip: +# https://bytes.com/topic/python/answers/692751-module-zipfile-writestr-line-endings-issue +# https://stackoverflow.com/questions/2613800/how-to-convert-dos-windows-newline-crlf-to-unix-newline-n-in-a-bash-script + import time, sys, collections, os from textwrap import dedent from switch_model import __version__ as switch_version -# NOTE: instead of using the python csv writer, this directly writes tables to +# NOTE: instead of using the python csv writer, this directly writes tables to # file in the pyomo .tab format. This uses tabs between columns and the standard # line break for the system it is run on. This does the following translations (only): # - If a value contains double quotes, they get doubled. -# - If a value contains a single quote, tab or space character, the value gets enclosed in double quotes. +# - If a value contains a single quote, tab or space character, the value gets enclosed in double quotes. # (Note that pyomo doesn't allow quoting (and therefore spaces) in column headers.) # - null values are converted to . (the pyomo/ampl standard for missing data) # - any other values are simply passed to str(). @@ -18,38 +44,38 @@ # NOTE: ANSI SQL specifies single quotes for literal strings, and postgres conforms # to this, so all the queries below should use single quotes around strings. -# NOTE: write_table() will automatically convert null values to '.', +# NOTE: write_table() will automatically convert null values to '.', # so pyomo will recognize them as missing data # NOTE: the code below could be made more generic, e.g., a list of # table names and queries, which are then processed at the end. -# But that would be harder to debug, and wouldn't allow for ad hoc +# But that would be harder to debug, and wouldn't allow for ad hoc # calculations or writing .dat files (which are used for a few parameters) def write_tables(**args): - # TODO: any arguments that are defined with default values below (args.get()) could + # TODO: any arguments that are defined with default values below (args.get()) could # have those default values assigned here. Then they can be used directly in queries # instead of using them to create snippets that are used in the queries. This would # also document the available arguments a little better. - + # catch obsolete arguments (otherwise they would be silently ignored) if 'ev_scen_id' in args: raise ValueError("ev_scen_id argument is no longer supported; use ev_scenario instead.") - + # write version marker file with open(make_file_path('switch_inputs_version.txt', args), 'w') as f: f.write(switch_version) - + ######################### # timescales - + # reusable clause to calculate the length of each period # If this is within 1% of an integer number of years, it rounds to the integer, # to allow for weights that add up to 365 or 365.25 days per year with_period_length = """ WITH period_length as ( - SELECT + SELECT period, -- note: for some reason modulo doesn't work on real values in postgresql CASE WHEN mod((sum(ts_scale_to_period)/365.25)::numeric, 1) BETWEEN -0.01 and 0.01 @@ -64,24 +90,27 @@ def write_tables(**args): GROUP BY 1 ) """ - + # note: in contrast to earlier versions, this makes period_end - # point to the exact moment when the period finishes + # point to the exact moment when the period finishes # (switch_model.timescales can handle that now), - # and it lets period_end be a floating point number + # and it lets period_end be a floating point number # (postgresql will export it with a .0 in this case) - write_table('periods.tab', + # note: despite the comments above, this rounded period_end to + # the nearest whole number until 2018-02-17. This was removed to + # support fractional years for monthly batches in production-cost models. + write_table('periods.tab', with_period_length + """ SELECT p.period AS "INVESTMENT_PERIOD", p.period as period_start, - round(p.period + period_length) as period_end + p.period + period_length as period_end FROM study_periods p JOIN period_length l USING (period) WHERE time_sample = %(time_sample)s ORDER by 1; """, args) write_table('timeseries.tab', """ - SELECT study_date as "TIMESERIES", period as ts_period, + SELECT study_date as "TIMESERIES", period as ts_period, ts_duration_of_tp, ts_num_tps, ts_scale_to_period FROM study_date WHERE time_sample = %(time_sample)s @@ -89,58 +118,74 @@ def write_tables(**args): """, args) write_table('timepoints.tab', """ - SELECT h.study_hour as timepoint_id, + SELECT h.study_hour as timepoint_id, to_char(date_time + (period - extract(year from date_time)) * interval '1 year', 'YYYY-MM-DD-HH24:MI') as timestamp, - h.study_date as timeseries + h.study_date as timeseries FROM study_hour h JOIN study_date d USING (study_date, time_sample) WHERE h.time_sample = %(time_sample)s ORDER BY period, extract(doy from date), study_hour; """, args) + # double-check that arguments are valid + cur = db_cursor() + cur.execute( + 'select * from generator_costs_by_year where tech_scen_id = %(tech_scen_id)s', + args + ) + if len([r for r in cur]) == 0: + print "================================================================" + print "WARNING: no records found in generator_costs_by_year for tech_scen_id='{}'".format(args['tech_scen_id']) + print "================================================================" + time.sleep(2) + del cur + ######################### - # create temporary tables that can be referenced by other queries + # create temporary tables that can be referenced by other queries # to identify available projects and technologies db_cursor().execute(""" DROP TABLE IF EXISTS study_length; CREATE TEMPORARY TABLE study_length AS {} - SELECT min(period) as study_start, max(period+period_length) AS study_end + SELECT min(period)::real as study_start, max(period+period_length)::real AS study_end FROM period_length; DROP TABLE IF EXISTS study_projects; CREATE TEMPORARY TABLE study_projects AS - SELECT DISTINCT + SELECT DISTINCT CONCAT_WS('_', load_zone, p.technology, nullif(site, 'na'), nullif(orientation, 'na')) AS "GENERATION_PROJECT", - p.* + p.*, + g.tech_scen_id FROM project p JOIN generator_info g USING (technology) CROSS JOIN study_length -- existing projects still in use during the study LEFT JOIN proj_existing_builds e ON ( e.project_id = p.project_id - AND e.build_year + g.max_age_years > study_start + AND e.build_year + g.max_age_years > study_start AND e.build_year < study_end ) -- projects that could be built during the study LEFT JOIN generator_costs_by_year c ON ( - c.cap_cost_scen_id = %(cap_cost_scen_id)s - AND c.technology = g.technology - AND (g.min_vintage_year IS NULL OR c.year >= g.min_vintage_year) - AND c.year >= study_start + c.tech_scen_id = g.tech_scen_id + AND c.technology = g.technology + AND (g.min_vintage_year IS NULL OR c.year >= g.min_vintage_year) + AND c.year >= study_start AND c.year < study_end ) WHERE (e.project_id IS NOT NULL OR c.technology IS NOT NULL) AND p.load_zone in %(load_zones)s + AND g.tech_scen_id IN ('all', %(tech_scen_id)s) AND g.technology NOT IN %(exclude_technologies)s; DROP TABLE IF EXISTS study_generator_info; CREATE TEMPORARY TABLE study_generator_info AS - SELECT DISTINCT g.* - FROM generator_info g JOIN study_projects p USING (technology); - """.format(with_period_length), args) + SELECT DISTINCT g.* + FROM generator_info g JOIN study_projects p USING (tech_scen_id, technology); + """.format(with_period_length), args) + # import pdb; pdb.set_trace() ######################### # financials @@ -160,7 +205,7 @@ def write_tables(**args): # existing_local_td, local_td_annual_cost_per_mw write_table('load_zones.tab', """ SELECT load_zone as "LOAD_ZONE" - FROM load_zone + FROM load_zone WHERE load_zone in %(load_zones)s """, args) @@ -169,15 +214,15 @@ def write_tables(**args): # get system loads, scaled from the historical years to the model years # note: 'offset' is a keyword in postgresql, so we use double-quotes to specify the column name write_table('loads.tab', """ - SELECT - l.load_zone AS "LOAD_ZONE", + SELECT + l.load_zone AS "LOAD_ZONE", study_hour AS "TIMEPOINT", - system_load * scale + "offset" AS zone_demand_mw - FROM study_date d + GREATEST(0, system_load * scale + "offset") AS zone_demand_mw + FROM study_date d JOIN study_hour h USING (time_sample, study_date) JOIN system_load l USING (date_time) JOIN system_load_scale s ON ( - s.load_zone = l.load_zone + s.load_zone = l.load_zone AND s.year_hist = extract(year from l.date_time) AND s.year_fore = d.period) WHERE l.load_zone in %(load_zones)s @@ -188,7 +233,7 @@ def write_tables(**args): ######################### # fuels - + write_table('non_fuel_energy_sources.tab', """ SELECT DISTINCT fuel AS "NON_FUEL_ENERGY_SOURCES" FROM study_generator_info @@ -197,17 +242,17 @@ def write_tables(**args): # gather info on fuels write_table('fuels.tab', """ - SELECT DISTINCT c.fuel_type AS fuel, co2_intensity, 0.0 AS upstream_co2_intensity, rps_eligible + SELECT DISTINCT replace(c.fuel_type, ' ', '_') AS fuel, co2_intensity, 0.0 AS upstream_co2_intensity, rps_eligible FROM fuel_costs c JOIN energy_source_properties p on (p.energy_source = c.fuel_type) WHERE load_zone in %(load_zones)s AND fuel_scen_id=%(fuel_scen_id)s; """, args) ######################### # rps targets - + write_tab_file( - 'rps_targets.tab', - headers=('year', 'rps_target'), + 'rps_targets.tab', + headers=('year', 'rps_target'), data=[(y, args['rps_targets'][y]) for y in sorted(args['rps_targets'].keys())], arguments=args ) @@ -215,9 +260,9 @@ def write_tables(**args): ######################### # fuel_markets - # deflate HECO fuel scenarios to base year, and inflate EIA-based scenarios + # deflate HECO fuel scenarios to base year, and inflate EIA-based scenarios # from 2013 (forecast base year) to model base year. (ugh) - # TODO: add a flag to fuel_costs indicating whether forecasts are real or nominal, + # TODO: add a flag to fuel_costs indicating whether forecasts are real or nominal, # and base year, and possibly inflation rate. if args['fuel_scen_id'] in ('1', '2', '3'): # no base_year specified; these are in nominal dollars @@ -241,9 +286,9 @@ def write_tables(**args): else: lng_selector = "false" - write_table('fuel_cost.tab', + write_table('fuel_cost.tab', with_period_length + """ - SELECT load_zone, fuel_type as fuel, p.period, + SELECT load_zone, replace(fuel_type, ' ', '_') as fuel, p.period, avg(price_mmbtu * {inflator} + COALESCE(fixed_cost, 0.00)) as fuel_cost FROM fuel_costs c, study_periods p JOIN period_length l USING (period) WHERE load_zone in %(load_zones)s @@ -258,19 +303,21 @@ def write_tables(**args): # advanced fuel markets with LNG expansion options (used by forward-looking models) # (use fuel_markets module) write_table('regional_fuel_markets.tab', """ - SELECT DISTINCT concat('Hawaii_', fuel_type) AS regional_fuel_market, fuel_type AS fuel + SELECT DISTINCT + concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market, + replace(fuel_type, ' ', '_') AS fuel FROM fuel_costs WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; """, args) - write_table('fuel_supply_curves.tab', + write_table('fuel_supply_curves.tab', with_period_length + """ - SELECT concat('Hawaii_', fuel_type) as regional_fuel_market, - fuel_type as fuel, - tier, - p.period, - avg(price_mmbtu * {inflator}) as unit_cost, - avg(max_avail_at_cost) as max_avail_at_cost, + SELECT concat('Hawaii_', replace(fuel_type, ' ', '_')) as regional_fuel_market, + replace(fuel_type, ' ', '_') as fuel, + tier, + p.period, + avg(price_mmbtu * {inflator}) as unit_cost, + avg(max_avail_at_cost) as max_avail_at_cost, avg(fixed_cost) as fixed_cost, avg(max_age) as max_age FROM fuel_costs c, study_periods p JOIN period_length l USING (period) @@ -283,8 +330,8 @@ def write_tables(**args): """.format(inflator=inflator), args) write_table('zone_to_regional_fuel_market.tab', """ - SELECT DISTINCT load_zone, concat('Hawaii_', fuel_type) AS regional_fuel_market - FROM fuel_costs + SELECT DISTINCT load_zone, concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market + FROM fuel_costs WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; """, args) @@ -301,12 +348,7 @@ def write_tables(**args): # we report the fuel as 'multiple' and then provide data in a multi-fuel table. # Some of these are actually single-fuel, but this approach is simpler than sorting # them out within each query, and it doesn't add any complexity to the model. - - if args.get('connect_cost_per_mw_km', 0): - print( - "WARNING: ignoring connect_cost_per_mw_km specified in arguments; using" - "project.connect_cost_per_mw and generator_info.connect_cost_per_kw_generic instead." - ) + if args.get('wind_capital_cost_escalator', 0.0) or args.get('pv_capital_cost_escalator', 0.0): # user supplied a non-zero escalator raise ValueError( @@ -346,7 +388,7 @@ def write_tables(**args): "GENERATION_PROJECT", load_zone AS gen_load_zone, technology AS gen_tech, - connect_cost_per_mw AS gen_connect_cost_per_mw, + spur_line_cost_per_mw + 1000 * substation_cost_per_kw AS gen_connect_cost_per_mw, max_capacity AS gen_capacity_limit_mw, unit_size as gen_unit_size, max_age_years as gen_max_age, @@ -356,10 +398,16 @@ def write_tables(**args): baseload as gen_is_baseload, -- 0 as gen_is_flexible_baseload, cogen as gen_is_cogen, - non_cycling as gen_non_cycling, + -- non_cycling as gen_non_cycling, variable_o_m * 1000.0 AS gen_variable_om, - CASE WHEN fuel IN ('SUN', 'WND', 'MSW') THEN fuel ELSE 'multiple' END AS gen_energy_source, - CASE WHEN fuel IN ('SUN', 'WND', 'MSW') THEN null ELSE {flhr} END AS gen_full_load_heat_rate + CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') THEN fuel ELSE 'multiple' END AS gen_energy_source, + CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') THEN null ELSE {flhr} END AS gen_full_load_heat_rate, + min_uptime as gen_min_uptime, + min_downtime as gen_min_downtime, + startup_energy / unit_size as gen_startup_fuel, + gen_storage_efficiency, + gen_storage_energy_to_power_ratio, + gen_storage_max_cycles_per_year FROM study_projects JOIN study_generator_info USING (technology) ORDER BY 2, 3, 1; """.format(fo=forced_outage_rate, flhr=full_load_heat_rate), args) @@ -376,6 +424,13 @@ def write_tables(**args): # NOTE: these costs must be expressed in $/MW, $/MWh or $/MW-year, # not $/kW, $/kWh or $/kW-year. + # NOTE: for now, we only specify storage costs per unit of power, not + # on per unit of energy, so we insert $0 as the energy cost here. + # NOTE: projects should have NULL for overnight cost and fixed O&M in + # proj_existing_builds if they have an entry for the same year in + # generator_costs_by_year. If they have costs in both, they will both + # get passed through to the data table, and Switch will raise an error + # (as it should, because costs are ambiguous in this case). write_table('gen_build_costs.tab', """ WITH gen_build_costs AS ( SELECT @@ -384,38 +439,98 @@ def write_tables(**args): c.capital_cost_per_kw * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) AS gen_overnight_cost, - i.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) - AS gen_fixed_o_m + c.capital_cost_per_kwh AS gen_storage_energy_overnight_cost, + c.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) + AS gen_fixed_o_m, + i.min_vintage_year -- used for build_year filter below FROM study_generator_info i - JOIN generator_costs_by_year c USING (technology) - JOIN study_periods p ON p.period = c.year - WHERE time_sample = %(time_sample)s - AND (i.min_vintage_year IS NULL OR c.year >= i.min_vintage_year) - AND c.cap_cost_scen_id = %(cap_cost_scen_id)s + JOIN generator_costs_by_year c USING (technology, tech_scen_id) ORDER BY 1, 2 ) - SELECT + SELECT -- costs specified in proj_existing_builds "GENERATION_PROJECT", - build_year, - sum(proj_overnight_cost * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) + b.build_year, + SUM( + power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) + * b.proj_overnight_cost * 1000.0 * proj_existing_cap + ) / SUM(proj_existing_cap) AS gen_overnight_cost, - sum(proj_fixed_om * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) + null AS gen_storage_energy_overnight_cost, + SUM( + power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) + * b.proj_fixed_om * 1000.0 * proj_existing_cap + ) / SUM(proj_existing_cap) AS gen_fixed_om - FROM study_projects JOIN proj_existing_builds USING (project_id) + FROM study_projects p + JOIN proj_existing_builds b USING (project_id) + WHERE (b.proj_overnight_cost IS NOT NULL OR b.proj_fixed_om IS NOT NULL) GROUP BY 1, 2 UNION - SELECT "GENERATION_PROJECT", build_year, gen_overnight_cost, gen_fixed_o_m - FROM gen_build_costs JOIN study_projects USING (technology) + SELECT -- costs specified in generator_costs_by_year + "GENERATION_PROJECT", c.build_year, gen_overnight_cost, + gen_storage_energy_overnight_cost, gen_fixed_o_m + FROM study_projects proj + JOIN gen_build_costs c USING (technology) + LEFT JOIN study_periods per ON (per.time_sample = %(time_sample)s AND c.build_year = per.period) + LEFT JOIN proj_existing_builds e ON (e.project_id = proj.project_id AND e.build_year = c.build_year) + WHERE + -- note: this allows users to have build_year < min_vintage_year for predetermined projects + -- that have entries in the cost table, e.g., if they want to prespecify some, but postpone + -- additional construction until some later year (unlikely) + (per.period IS NOT NULL AND (c.min_vintage_year IS NULL OR c.build_year >= c.min_vintage_year)) + OR e.project_id IS NOT NULL ORDER BY 1, 2; """, args) + ######################### + # spinning_reserves_advanced (if wanted; otherwise defaults to just "spinning" + if 'max_reserve_capability' in args or args.get('write_generation_projects_reserve_capability', False): + + # args['max_reserve_capability'] is a list of tuples of (technology, reserve_type) + # (assumed equivalent to 'regulation' if not specified) + # We unzip it to use with the unnest function (psycopg2 passes lists of tuples + # as arrays of tuples, and unnest would keeps those as tuples) + try: + reserve_technologies, reserve_types = map(list, zip(*args['max_reserve_capability'])) + except KeyError: + reserve_technologies, reserve_types = [], [] + res_args = args.copy() + res_args['reserve_technologies']=reserve_technologies + res_args['reserve_types']=reserve_types + + # note: casting is needed if the lists are empty; see https://stackoverflow.com/a/41893576/3830997 + write_table('generation_projects_reserve_capability.tab', """ + WITH reserve_capability (technology, reserve_type) as ( + SELECT + UNNEST(%(reserve_technologies)s::varchar(40)[]) AS technology, + UNNEST(%(reserve_types)s::varchar(20)[]) AS reserve_type + ), + reserve_types (rank, reserve_type) as ( + VALUES + (0, 'none'), + (1, 'contingency'), + (2, 'regulation') + ) + SELECT + p."GENERATION_PROJECT", + t2.reserve_type AS "SPINNING_RESERVE_TYPE" + FROM + study_projects p + LEFT JOIN reserve_capability c USING (technology) + LEFT JOIN reserve_types t1 USING (reserve_type) + JOIN reserve_types t2 on t2.rank <= COALESCE(t1.rank, 100) + WHERE t2.rank > 0 + ORDER BY 1, t2.rank; + """, res_args) + + ######################### # operation.unitcommit.fuel_use # get part load heat rate curves if requested # note: we sort lexicographically by power output and fuel consumption, in case # there are segments where power or fuel consumption steps up while the other stays constant - # That is nonconvex and not currently supported by SWITCH, but could potentially be used + # That is nonconvex and not currently supported by SWITCH, but could potentially be used # in the future by assigning binary variables for activating each segment. # note: for sqlite, you could use "CONCAT(technology, ' ', output_mw, ' ', fuel_consumption_mmbtu_per_h) AS key" # TODO: rename fuel_consumption_mmbtu_per_h to fuel_use_mmbtu_per_h here and in import_data.py @@ -423,10 +538,10 @@ def write_tables(**args): if args.get('use_incremental_heat_rates', False): write_table('gen_inc_heat_rates.tab', """ WITH part_load AS ( - SELECT + SELECT row_number() OVER (ORDER BY technology, output_mw, fuel_consumption_mmbtu_per_h) AS key, technology, - output_mw, + output_mw, fuel_consumption_mmbtu_per_h FROM part_load_fuel_consumption JOIN study_generator_info USING (technology) ), prior AS ( @@ -436,32 +551,32 @@ def write_tables(**args): ), curves AS ( SELECT -- first step in each curve key, technology, - output_mw AS power_start_mw, - NULL::real AS power_end_mw, + output_mw AS power_start_mw, + NULL::real AS power_end_mw, NULL::real AS incremental_heat_rate_mbtu_per_mwhr, fuel_consumption_mmbtu_per_h AS fuel_use_rate_mmbtu_per_h FROM part_load LEFT JOIN prior USING (key) WHERE prior_key IS NULL UNION SELECT -- additional steps high.key AS key, high.technology, - low.output_mw AS power_start_mw, + low.output_mw AS power_start_mw, high.output_mw AS power_end_mw, - (high.fuel_consumption_mmbtu_per_h - low.fuel_consumption_mmbtu_per_h) + (high.fuel_consumption_mmbtu_per_h - low.fuel_consumption_mmbtu_per_h) / (high.output_mw - low.output_mw) AS incremental_heat_rate_mbtu_per_mwhr, NULL::real AS fuel_use_rate_mmbtu_per_h FROM part_load high JOIN prior USING (key) JOIN part_load low ON (low.key = prior.prior_key) ORDER BY 1 ) SELECT - "GENERATION_PROJECT" as project, + "GENERATION_PROJECT", power_start_mw, power_end_mw, incremental_heat_rate_mbtu_per_mwhr, fuel_use_rate_mmbtu_per_h FROM curves c JOIN study_projects p using (technology) ORDER BY c.technology, c.key, p."GENERATION_PROJECT"; """, args) - + # This gets a list of all the fueled projects (listed as "multiple" energy sources above), - # and lists them as accepting any equivalent or lighter fuel. (However, cogen plants and plants + # and lists them as accepting any equivalent or lighter fuel. (However, cogen plants and plants # using fuels with rank 0 are not changed.) Fuels are also filtered against the list of fuels with # costs reported for the current scenario, so this can end up re-mapping one fuel in the database # (e.g., LSFO) to a similar fuel in the scenario (e.g., LSFO-Diesel-Blend), even if the original fuel @@ -476,7 +591,7 @@ def write_tables(**args): cogen FROM study_generator_info ), all_fueled_techs AS ( - SELECT * from all_techs WHERE orig_fuel NOT IN ('SUN', 'WND', 'MSW') + SELECT * from all_techs WHERE orig_fuel NOT IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') ), gen_multiple_fuels AS ( SELECT DISTINCT technology, b.energy_source as fuel FROM all_fueled_techs t @@ -499,11 +614,11 @@ def write_tables(**args): print "SKIPPING variable_capacity_factors.tab" else: write_table('variable_capacity_factors.tab', """ - SELECT + SELECT "GENERATION_PROJECT", study_hour as timepoint, cap_factor as gen_max_capacity_factor - FROM study_generator_info g + FROM study_generator_info g JOIN study_projects p USING (technology) JOIN cap_factor c USING (project_id) JOIN study_hour h using (date_time) @@ -532,7 +647,7 @@ def write_tables(**args): SELECT * FROM ( SELECT "GENERATION_PROJECT", study_hour AS "TIMEPOINT", - CASE WHEN %(enable_must_run)s = 1 AND must_run = 1 THEN 1.0 ELSE null END + CASE WHEN %(enable_must_run)s = 1 AND must_run = 1 THEN 1.0 ELSE null END AS gen_min_commit_fraction, null AS gen_max_commit_fraction, null AS gen_min_load_fraction_TP @@ -556,34 +671,36 @@ def write_tables(**args): # trans_build # --- Not used --- - # + # # write_table('trans_lines.tab', """ - # SELECT load_area_start AS load_zone_start, load_area_end AS load_zone_end, + # SELECT load_area_start AS load_zone_start, load_area_end AS load_zone_end, # tid, length_km AS transmission_length_km, efficiency AS transmission_efficiency, - # existing_mw_from AS existing_transmission_from, - # existing_mw_to AS existing_transmission_to - # FROM trans_line + # existing_mw_from AS existing_transmission_from, + # existing_mw_to AS existing_transmission_to + # FROM trans_line # WHERE load_area_start IN %(load_zones)s OR load_area_end IN %(load_zones)s # """, args) - # - # - # + # + # + # ######################### # trans_dispatch # --- Not used --- - ######################### # batteries - # TODO: put these data in a database and write a .tab file instead + # (now included as standard storage projects, but kept here + # to support older projects that haven't upgraded yet) bat_years = 'BATTERY_CAPITAL_COST_YEARS' bat_cost = 'battery_capital_cost_per_mwh_capacity_by_year' - write_dat_file( - 'batteries.dat', - sorted([k for k in args if k.startswith('battery_') and k not in [bat_years, bat_cost]]), - args - ) + non_cost_bat_vars = sorted([k for k in args if k.startswith('battery_') and k not in [bat_years, bat_cost]]) + if non_cost_bat_vars: + write_dat_file( + 'batteries.dat', + non_cost_bat_vars, + args + ) if bat_years in args and bat_cost in args: # annual costs were provided -- write those to a tab file write_tab_file( @@ -594,12 +711,12 @@ def write_tables(**args): ) ######################### - # EV annual energy consumption + # EV annual energy consumption (original, basic version) # print "ev_scenario:", args.get('ev_scenario', None) if args.get('ev_scenario', None) is not None: write_table('ev_fleet_info.tab', """ - SELECT load_zone as "LOAD_ZONE", period as "PERIOD", - ev_share, ice_miles_per_gallon, ev_miles_per_kwh, ev_extra_cost_per_vehicle_year, + SELECT load_zone as "LOAD_ZONE", period as "PERIOD", + ev_share, ice_miles_per_gallon, ev_miles_per_kwh, ev_extra_cost_per_vehicle_year, n_all_vehicles, vmt_per_vehicle FROM ev_adoption a JOIN study_periods p on a.year = p.period WHERE load_zone in %(load_zones)s @@ -610,26 +727,108 @@ def write_tables(**args): # power consumption for each hour of the day under business-as-usual charging # note: the charge weights have a mean value of 1.0, but go up and down in different hours write_table('ev_bau_load.tab', """ - SELECT - load_zone AS "LOAD_ZONE", + SELECT + load_zone AS "LOAD_ZONE", study_hour AS "TIMEPOINT", charge_weight * ev_share * n_all_vehicles * vmt_per_vehicle / (1000.0 * ev_miles_per_kwh) / 8760 as ev_bau_mw FROM ev_adoption e JOIN study_date d ON d.period = e.year - JOIN study_hour h USING (study_date, time_sample) - JOIN ev_hourly_charge_profile p + JOIN study_hour h USING (study_date, time_sample) + JOIN ev_hourly_charge_profile p ON p.hour_of_day = h.hour_of_day WHERE load_zone in %(load_zones)s AND time_sample = %(time_sample)s AND ev_scenario = %(ev_scenario)s ORDER BY 1, 2; """, args) - + + ######################### + # EV annual energy consumption (advanced, frozen Dantzig-Wolfe version) + if args.get('ev_scenario', None) is not None: + write_table('ev_share.tab', """ + SELECT + load_zone as "LOAD_ZONE", period as "PERIOD", + ev_share + FROM ev_adoption a JOIN study_periods p on a.year = p.period + WHERE load_zone in %(load_zones)s + AND time_sample = %(time_sample)s + AND ev_scenario = %(ev_scenario)s + ORDER BY 1, 2; + """, args) + write_table('ev_fleet_info_advanced.tab', """ + WITH detailed_fleet AS ( + SELECT + a.load_zone AS "LOAD_ZONE", + replace(f."vehicle type", ' ', '_') AS "VEHICLE_TYPE", + p.period AS "PERIOD", + f."number of vehicles" AS "n_vehicles", -- for whole fleet, not current adoption level + CASE + WHEN period <= 2020 THEN "gals fuel per year 2020" + WHEN period >= 2045 THEN "gals fuel per year 2045" + ELSE + (period-2020)/25.0 * "gals fuel per year 2045" + + (2045-period)/25.0 * "gals fuel per year 2020" + END AS "ice_gals_per_year", + CONCAT_WS('_', 'Motor', "ICE fuel") AS "ice_fuel", + "kWh per year" AS "ev_kwh_per_year", + CASE + WHEN period <= 2020 THEN "EV extra capital cost per year 2020" + WHEN period >= 2045 THEN "EV extra capital cost per year 2045" + ELSE + (period-2020)/25.0 * "EV extra capital cost per year 2045" + + (2045-period)/25.0 * "EV extra capital cost per year 2020" + END AS "ev_extra_cost_per_vehicle_year" + FROM ev_adoption a + JOIN study_periods p ON a.year = p.period + JOIN ev_fleet f ON f.load_zone = a.load_zone + WHERE a.load_zone in %(load_zones)s + AND time_sample = %(time_sample)s + AND ev_scenario = %(ev_scenario)s + ) + SELECT "LOAD_ZONE", + CONCAT_WS('_', 'All', replace(ice_fuel, 'Motor_', ''), 'Vehicles') AS "VEHICLE_TYPE", + "PERIOD", + SUM(n_vehicles) AS n_vehicles, + SUM(ice_gals_per_year*n_vehicles)/SUM(n_vehicles) AS ice_gals_per_year, + ice_fuel, + SUM(ev_kwh_per_year*n_vehicles)/SUM(n_vehicles) AS ev_kwh_per_year, + SUM(ev_extra_cost_per_vehicle_year*n_vehicles)/SUM(n_vehicles) + AS ev_extra_cost_per_vehicle_year + FROM detailed_fleet + GROUP BY 1, 2, 3, 6 + ORDER BY 1, 2, 3; + """, args) + # power consumption bids for each hour of the day + # (consolidate to one vehicle class to accelerate data retrieval and + # reduce model memory requirements) (note that there are 6 classes of + # vehicle and 25 bids for for 24-hour models, which makes 150 entries + # per load zone and timestep, which is larger than the renewable + # capacity factor data) + if args.get("skip_ev_bids", False): + print "SKIPPING ev_charging_bids.tab" + else: + write_table('ev_charging_bids.tab', """ + SELECT + b.load_zone AS "LOAD_ZONE", + CONCAT_WS('_', 'All', "ICE fuel", 'Vehicles') AS "VEHICLE_TYPE", + bid_number AS "BID_NUM", + study_hour AS "TIMEPOINT", + sum(charge_mw) AS ev_bid_by_type + FROM study_date d + JOIN study_hour h USING (study_date, time_sample) + JOIN ev_charging_bids b + ON b.hour = h.hour_of_day AND b.hours_per_step = d.ts_duration_of_tp + JOIN ev_fleet f ON b.vehicle_type=f."vehicle type" AND b.load_zone=f.load_zone + WHERE b.load_zone in %(load_zones)s + AND d.time_sample = %(time_sample)s + GROUP BY 1, 2, 3, 4 + ORDER BY 1, 2, 3, 4; + """, args) ######################### # pumped hydro # TODO: put these data in a database with hydro_scen_id's and pull them from there - + if "pumped_hydro_headers" in args: write_tab_file( 'pumped_hydro.tab', @@ -653,12 +852,23 @@ def write_tables(**args): args ) + + ######################### + # PHA data + pha_params = sorted([k for k in args if k.startswith('pha_')]) + if pha_params: + write_dat_file( + 'pha.dat', + pha_params, + args + ) + # the two functions below could be used as the start of a system -# to write placeholder files for any files in the current scenario +# to write placeholder files for any files in the current scenario # that match the base files. This could be used to avoid creating large -# files (variable_cap_factor.tab) for alternative scenarios that are -# otherwise very similar. i.e., placeholder .tab or .dat files could -# be written with just the line 'include ../variable_cap_factor.tab' or +# files (variable_cap_factor.tab) for alternative scenarios that are +# otherwise very similar. i.e., placeholder .tab or .dat files could +# be written with just the line 'include ../variable_cap_factor.tab' or # 'include ../financial.dat'. def any_alt_args_in_list(args, l): @@ -667,13 +877,13 @@ def any_alt_args_in_list(args, l): if a in l: return True return False - + def any_alt_args_in_query(args, query): """Report whether any arguments in the args list appear in the list l.""" for a in args.get('alt_args', {}): if '%(' + a + ')s' in query: return True - return False + return False def make_file_path(file, args): """Create any directories and subdirectories needed to store data in the specified file, @@ -707,14 +917,14 @@ def db_cursor(): pghost='redr.eng.hawaii.edu' # note: the connection gets created when the module loads and never gets closed (until presumably python exits) con = psycopg2.connect(database='switch', host=pghost) #, user='switch_user') - + except psycopg2.OperationalError: print dedent(""" ############################################################################################ Error while connecting to switch database on postgres server {server}. Please ensure that the PGUSER environment variable is set with your postgres username - and there is a line like "*:*:*::" in ~/.pgpass (which should be chmod 0600) - or in %APPDATA%\postgresql\pgpass.conf (Windows). + and there is a line like "*:*:*::" in ~/.pgpass (which should be chmod 0600) + or in %APPDATA%\postgresql\pgpass.conf (Windows). See http://www.postgresql.org/docs/9.1/static/libpq-pgpass.html for more details. ############################################################################################ """.format(server=pghost)) @@ -722,9 +932,9 @@ def db_cursor(): return con.cursor() def write_dat_file(output_file, args_to_write, arguments): - """ write a simple .dat file with the arguments specified in args_to_write, + """ write a simple .dat file with the arguments specified in args_to_write, drawn from the arguments dictionary""" - + if any(arg in arguments for arg in args_to_write): output_file = make_file_path(output_file, arguments) print "Writing {file} ...".format(file=output_file), @@ -733,10 +943,10 @@ def write_dat_file(output_file, args_to_write, arguments): with open(output_file, 'w') as f: f.writelines([ - 'param ' + name + ' := ' + str(arguments[name]) + ';\n' + 'param ' + name + ' := ' + str(arguments[name]) + ';\n' for name in args_to_write if name in arguments ]) - + print "time taken: {dur:.2f}s".format(dur=time.time()-start) def write_table(output_file, query, arguments): @@ -775,7 +985,7 @@ def write_tab_file(output_file, headers, data, arguments={}): def write_indexed_set_dat_file(output_file, set_name, query, arguments): """Write a .dat file defining an indexed set, based on the query provided. - + Note: the query should produce a table with index values in all columns except the last, and then set members for each index in the last column. (There should be multiple rows with the same values in the index columns.)""" @@ -788,7 +998,7 @@ def write_indexed_set_dat_file(output_file, set_name, query, arguments): cur = db_cursor() cur.execute(dedent(query), arguments) - + # build a dictionary grouping all values (last column) according to their index keys (earlier columns) data_dict = collections.defaultdict(list) for r in cur: @@ -800,7 +1010,7 @@ def write_indexed_set_dat_file(output_file, set_name, query, arguments): with open(output_file, 'w') as f: f.writelines([ 'set {sn}[{idx}] := {items} ;\n'.format( - sn=set_name, + sn=set_name, idx=', '.join(k), items=' '.join(v)) for k, v in data_dict.iteritems() diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index b45438d44..224ecb294 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -5,7 +5,7 @@ import switch_model.solve def define_components(m): - if m.options.solver in ('cplex', 'cplexamp', 'gurobi'): + if m.options.solver in ('cplex', 'cplexamp', 'gurobi', 'gurobi_ampl'): m.options.smooth_dispatch = True else: # glpk and cbc can't handle quadratic problem used for smoothing @@ -13,8 +13,8 @@ def define_components(m): if m.options.verbose: print "Not smoothing dispatch because {} cannot solve a quadratic model.".format(m.options.solver) print "Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message." - - # add an alternative objective function that smoothes out various non-cost variables + + # add an alternative objective function that smoothes out time-shiftable energy sources and sinks if m.options.smooth_dispatch: def Smooth_Free_Variables_obj_rule(m): # minimize production (i.e., maximize curtailment / minimize losses) @@ -25,7 +25,7 @@ def Smooth_Free_Variables_obj_rule(m): for component in m.Zone_Power_Injections) # minimize the variability of various slack responses adjustable_components = [ - 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', 'ChargeEVs', + 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', 'ChargeEVs', 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW' ] for var in adjustable_components: @@ -34,6 +34,22 @@ def Smooth_Free_Variables_obj_rule(m): print "Will smooth {}.".format(var) comp = getattr(m, var) obj += sum(comp[z, t]*comp[z, t] for z in m.LOAD_ZONES for t in m.TIMEPOINTS) + # include standard storage generators too + if hasattr(m, 'STORAGE_GEN_TPS'): + print "Will smooth charging and discharging of standard storage." + obj += sum(m.ChargeStorage[g, tp]*m.ChargeStorage[g, tp] for g, tp in m.STORAGE_GEN_TPS) + obj += sum(m.DispatchGen[g, tp]*m.DispatchGen[g, tp] for g, tp in m.STORAGE_GEN_TPS) + # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables + # (e.g., via storage) and (b) give better representation of the amount of reserves actually available + if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + print "Will maximize provision of up reserves." + reserve_weight = {'contingency': 0.9, 'regulation': 1.1} + for comp_name in m.Spinning_Reserve_Up_Provisions: + component = getattr(m, comp_name) + obj += -0.1 * sum( + reserve_weight.get(rt, 1.0) * component[rt, ba, tp] + for rt, ba, tp in component + ) return obj m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) # leave standard objective in effect for now diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index 87a084590..783e2bb94 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -1,6 +1,78 @@ from pyomo.environ import * import switch_model.utilities as utilities -from util import get + +# patch Pyomo's solver to retrieve duals and reduced costs for MIPs from cplex lp solver +# (This could be made permanent in pyomo.solvers.plugins.solvers.CPLEX.create_command_line) +def new_create_command_line(*args, **kwargs): + # call original command + command = old_create_command_line(*args, **kwargs) + # alter script + if hasattr(command, 'script') and 'optimize\n' in command.script: + command.script = command.script.replace( + 'optimize\n', + 'optimize\nchange problem fix\noptimize\n' + # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 + # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 + ) + print "changed CPLEX solve script to the following:" + print command.script + return command +from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL +old_create_command_line = CPLEXSHELL.create_command_line +CPLEXSHELL.create_command_line = new_create_command_line + +# # TODO: combine the following changes into a pull request for Pyomo +# # patch Pyomo's table-reading function to allow .tab files with headers but no data +# import os, re +# def new_tab_read(self): +# if not os.path.exists(self.filename): +# raise IOError("Cannot find file '%s'" % self.filename) +# self.FILE = open(self.filename, 'r') +# try: +# tmp=[] +# for line in self.FILE: +# line=line.strip() +# tokens = re.split("[\t ]+",line) +# if tokens != ['']: +# tmp.append(tokens) +# if len(tmp) == 0: +# raise IOError("Empty *.tab file") +# else: # removed strange special handling for one-row files +# self._set_data(tmp[0], tmp[1:]) +# except: +# raise +# finally: +# self.FILE.close() +# self.FILE = None +# from pyomo.core.plugins.data.text import TextTable +# TextTable.read = new_tab_read +# +# try: +# import inspect +# import pyomo.core.data.process_data +# pp_code = inspect.getsource(pyomo.core.data.process_data._process_param) +# start = pp_code.find('if singledef:', 0, 2000) +# if start < 0: +# raise RuntimeError('unable to find singledef statement') +# # patch to allow command to have no more arguments at this point (i.e., no data) +# srch, repl = 'if cmd[0] == "(tr)":', 'if cmd and cmd[0] == "(tr)":' +# start = pp_code.find(srch, start, start + 500) +# if start < 0: +# raise RuntimeError('unable to find (tr) statement') +# pp_code = pp_code[:start] + repl + pp_code[start+len(srch):] +# # patch next line for the same reason +# srch, repl = 'if cmd[0] != ":":', 'if not cmd or cmd[0] != ":":' +# start = pp_code.find(srch, start, start + 500) +# if start < 0: +# raise RuntimeError('unable to find ":" statement') +# pp_code = pp_code[:start] + repl + pp_code[start+len(srch):] +# # compile code to a function in the process_data module +# exec(pp_code, vars(pyomo.core.data.process_data)) +# except Exception as e: +# print "Unable to patch current version of pyomo.core.data.process_data:" +# print '{}({})'.format(type(e).__name__, ','.join(repr(a) for a in e.args)) +# print "Switch will not be able to read empty data files." + def define_components(m): """Make various changes to the model to facilitate reporting and avoid unwanted behavior""" diff --git a/switch_model/hawaii/unserved_load.py b/switch_model/hawaii/unserved_load.py index 40e55119e..1a6737d12 100644 --- a/switch_model/hawaii/unserved_load.py +++ b/switch_model/hawaii/unserved_load.py @@ -24,9 +24,24 @@ def define_components(m): m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved load m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) + m.tp_duration_hrs[tp] + * sum(m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) ) # add the unserved load to the model's energy balance m.Zone_Power_Injections.append('UnservedLoad') # add the unserved load penalty to the model's objective function m.Cost_Components_Per_TP.append('UnservedLoadPenalty') + + # amount of unserved reserves during each timepoint + m.UnservedUpReserves = Var(m.TIMEPOINTS, within=NonNegativeReals) + m.UnservedDownReserves = Var(m.TIMEPOINTS, within=NonNegativeReals) + # total cost for unserved reserves (90% as high as cost of unserved load, + # to make the model prefer to serve load when possible) + m.UnservedReservePenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: + m.tp_duration_hrs[tp] + * 0.9 + * m.unserved_load_penalty_per_mwh + * (m.UnservedUpReserves[tp] + m.UnservedDownReserves[tp]) + ) + # add the unserved load penalty to the model's objective function + m.Cost_Components_Per_TP.append('UnservedReservePenalty') diff --git a/switch_model/policies/rps_simple.py b/switch_model/policies/rps_simple.py index 7f98a2aaa..cd60ce1e3 100644 --- a/switch_model/policies/rps_simple.py +++ b/switch_model/policies/rps_simple.py @@ -37,7 +37,7 @@ def define_components(mod): rps_target[p in RPS_PERIODS] is the fraction of total generated energy in a period that has to be provided by RPS-elegible sources. - RPSProjFuelPower[g, t in _FUEL_BASED_GEN_TPS] is an + RPSProjFuelPower[g, t in FUEL_BASED_GEN_TPS] is an expression summarizing the power generated by RPS-elegible fuels in every fuel-based project. This cannot be simply taken to be equal to the dispatch level of the project, since a mix of RPS-elegible and unelegible diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 4f1cdc97f..ecb68e053 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -8,7 +8,7 @@ Modules within this directory may implement custom exports that depend on multiple Switch modules. Each individual Switch module that defines components should only access model components that -it defined or that were defined upstream in Switch modules that +it defined or that were defined upstream in Switch modules that it depends on. For example, the load_zone module cannot assume whether users will be including project.no_commit or project.unitcommit, so it cannot reference model components defined in either of those files. However, @@ -26,6 +26,7 @@ import itertools import cPickle as pickle from pyomo.environ import value, Var +from switch_model.utilities import make_iterable csv.register_dialect( "ampl-tab", @@ -44,15 +45,13 @@ def define_arguments(argparser): def write_table(instance, *indexes, **kwargs): # there must be a way to accept specific named keyword arguments and - # also an open-ended list of positional arguments (*indexes), but I + # also an open-ended list of positional arguments (*indexes), but I # don't know what that is. output_file = kwargs["output_file"] headings = kwargs["headings"] values = kwargs["values"] digits = kwargs.get('digits', 6) - # create a master indexing set - # this is a list of lists, even if only one list was specified - idx = itertools.product(*indexes) + with open(output_file, 'wb') as f: w = csv.writer(f, dialect="ampl-tab") # write header row @@ -68,30 +67,53 @@ def format_row(row): else: row[i] = sig_digits.format(v) return tuple(row) - w.writerows( - format_row(row=values(instance, *x)) - for x in idx - ) - -def make_iterable(item): - """Return an iterable for the one or more items passed.""" - if isinstance(item, basestring): - i = iter([item]) - else: try: - # check if it's iterable - i = iter(item) - except TypeError: - i = iter([item]) - return i + w.writerows( + format_row(row=values(instance, *unpack_elements(x))) + for x in itertools.product(*indexes) + ) + except TypeError: # lambda got wrong number of arguments + # use old code, which doesn't unpack the indices + w.writerows( + # TODO: flatten x (unpack tuples) like Pyomo before calling values() + # That may cause problems elsewhere though... + format_row(row=values(instance, *x)) + for x in itertools.product(*indexes) + ) + print "DEPRECATION WARNING: switch_model.reporting.write_table() was called with a function" + print "that expects multidimensional index values to be stored in tuples, but Switch now unpacks" + print "these tuples automatically. Please update your code to work with unpacked index values." + print "Problem occured with {}.".format(values.func_code) + +def unpack_elements(items): + """Unpack any multi-element objects within items, to make a single flat list. + Note: this is not recursive. + This is used to flatten the product of a multi-dimensional index with anything else.""" + l=[] + for x in items: + if isinstance(x, basestring): + l.append(x) + else: + try: + l.extend(x) + except TypeError: # x isn't iterable + l.append(x) + return l -def _save_generic_results(instance, outdir, sorted_output): - for var in instance.component_objects(): - if not isinstance(var, Var): - continue +def post_solve(instance, outdir): + """ + Minimum output generation for all model runs. + """ + save_generic_results(instance, outdir, instance.options.sorted_output) + save_total_cost_value(instance, outdir) + save_cost_components(instance, outdir) + save_results(instance, outdir) + +def save_generic_results(instance, outdir, sorted_output): + for var in instance.component_objects(Var): output_file = os.path.join(outdir, '%s.tab' % var.name) with open(output_file, 'wb') as fh: writer = csv.writer(fh, dialect='ampl-tab') @@ -113,24 +135,53 @@ def _save_generic_results(instance, outdir, sorted_output): writer.writerow([value(obj)]) -def _save_total_cost_value(instance, outdir): - values = instance.Minimize_System_Cost.values() - assert len(values) == 1 - total_cost = values[0].expr() +def save_total_cost_value(instance, outdir): with open(os.path.join(outdir, 'total_cost.txt'), 'w') as fh: - fh.write('%s\n' % total_cost) - - -def _save_results(instance, outdir): - with open(os.path.join(outdir, 'results.pickle'), 'wb') as fh: - pickle.dump(instance.last_results, fh, protocol=-1) + fh.write('{}\n'.format(value(instance.SystemCost))) -def post_solve(instance, outdir): +def save_cost_components(m, outdir): """ - Minimum output generation for all model runs. - + Save values for all individual components of total system cost on NPV basis. + """ + cost_dict = dict() + for annual_cost in m.Cost_Components_Per_Period: + cost = getattr(m, annual_cost) + # note: storing value() instead of the expression may save + # some memory while this function runs + cost_dict[annual_cost] = value(sum( + cost[p] * m.bring_annual_costs_to_base_year[p] + for p in m.PERIODS + )) + for tp_cost in m.Cost_Components_Per_TP: + cost = getattr(m, tp_cost) + cost_dict[tp_cost] = value(sum( + cost[t] * m.tp_weight_in_year[t] + * m.bring_annual_costs_to_base_year[m.tp_period[t]] + for t in m.TIMEPOINTS + )) + write_table( + m, + cost_dict.keys(), + output_file=os.path.join(outdir, "cost_components.tab"), + headings=('component', 'npv_cost'), + values=lambda m, c: (c, cost_dict[c]), + digits=16 + ) + + +def save_results(instance, outdir): """ - _save_generic_results(instance, outdir, instance.options.sorted_output) - _save_total_cost_value(instance, outdir) - _save_results(instance, outdir) + Save model solution for later reuse. + + Note that this pickles a solver results object because the instance itself + cannot be pickled -- see + https://stackoverflow.com/questions/39941520/pyomo-ipopt-does-not-return-solution + """ + # First, save the full solution data to the results object, because recent + # versions of Pyomo only store execution metadata there by default. + instance.solutions.store_to(instance.last_results) + with open(os.path.join(outdir, 'results.pickle'), 'wb') as fh: + pickle.dump(instance.last_results, fh, protocol=-1) + # remove the solution from the results object, to minimize long-term memory use + instance.last_results.solution.clear() diff --git a/switch_model/solve.py b/switch_model/solve.py old mode 100644 new mode 100755 index 2e6e4d607..283c76484 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -8,21 +8,23 @@ from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition import pyomo.version -from switch_model.utilities import create_model, _ArgumentParser, Logging +from switch_model.utilities import ( + create_model, _ArgumentParser, Logging, StepTimer, make_iterable +) from switch_model.upgrade import do_inputs_need_upgrade, upgrade_inputs def main(args=None, return_model=False, return_instance=False): - start_time = time.time() + timer = StepTimer() if args is None: - # combine default arguments read from options.txt file with + # combine default arguments read from options.txt file with # additional arguments specified on the command line args = get_option_file_args(extra_args=sys.argv[1:]) # Get options needed before any modules are loaded pre_module_options = parse_pre_module_options(args) - + # turn on post-mortem debugging mode if requested # (from http://stackoverflow.com/a/1237407 ; more options available there) if pre_module_options.debug: @@ -63,7 +65,7 @@ def debug(type, value, tb): # build a module list based on configuration options, and add # the current module (to register define_arguments callback) modules = get_module_list(args) - + # Patch pyomo if needed, to allow reconstruction of expressions. # This must be done before the model is constructed. patch_pyomo() @@ -73,7 +75,7 @@ def debug(type, value, tb): # Add any suffixes specified on the command line (usually only iis) add_extra_suffixes(model) - + # return the model as-is if requested if return_model and not return_instance: return model @@ -84,11 +86,10 @@ def debug(type, value, tb): # get a list of modules to iterate through iterate_modules = get_iteration_list(model) - + if model.options.verbose: - creation_time = time.time() print "\n=======================================================================" - print "SWITCH model created in {:.2f} s.\nArguments:".format(creation_time - start_time) + print "SWITCH model created in {:.2f} s.\nArguments:".format(timer.step_time()) print ", ".join(k+"="+repr(v) for k, v in model.options.__dict__.items() if v) print "Modules:\n"+", ".join(m for m in modules) if iterate_modules: @@ -96,13 +97,15 @@ def debug(type, value, tb): print "=======================================================================\n" print "Loading inputs..." - # create an instance + # create an instance (also reports time spent reading data and loading into model) instance = model.load_inputs() + + #### Below here, we refer to instance instead of model #### + instance.pre_solve() - instantiation_time = time.time() - if model.options.verbose: - print "Inputs loaded in {:.2f} s.\n".format(instantiation_time - creation_time) - + if instance.options.verbose: + print "Total time spent constructing model: {:.2f} s.\n".format(timer.step_time()) + # return the instance as-is if requested if return_instance: if return_model: @@ -110,43 +113,48 @@ def debug(type, value, tb): else: return instance - if model.options.reload_prior_solution: - reload_prior_solution_from_pickle(instance, model.options.outputs_dir) + if instance.options.reload_prior_solution: + reload_prior_solution_from_pickle(instance, instance.options.outputs_dir) + if instance.options.verbose: + print( + 'Loaded previous results into model instance in {:.2f} s.' + .format(timer.step_time()) + ) else: # make sure the outputs_dir exists (used by some modules during iterate) # use a race-safe approach in case this code is run in parallel try: - os.makedirs(model.options.outputs_dir) + os.makedirs(instance.options.outputs_dir) except OSError: # directory probably exists already, but double-check - if not os.path.isdir(model.options.outputs_dir): + if not os.path.isdir(instance.options.outputs_dir): raise - # solve the model + # solve the model (reports time for each step as it goes) if iterate_modules: - if model.options.verbose: + if instance.options.verbose: print "Iterating model..." iterate(instance, iterate_modules) else: results = solve(instance) - if model.options.verbose: + if instance.options.verbose: print "Optimization termination condition was {}.\n".format( results.solver.termination_condition) + if instance.options.verbose: + timer.step_time() # restart counter for next step + # report/save results - if model.options.verbose: - post_solve_start_time = time.time() + if instance.options.verbose: print "Executing post solve functions..." instance.post_solve() - if model.options.verbose: - post_solve_end_time = time.time() - print "Post solve processing completed in {:.2f} s.".format( - post_solve_end_time - post_solve_start_time) + if instance.options.verbose: + print "Post solve processing completed in {:.2f} s.".format(timer.step_time()) # return stdout to original sys.stdout = stdout_copy - if model.options.interact or model.options.reload_prior_solution: + if instance.options.interact or instance.options.reload_prior_solution: m = instance # present the solved model as 'm' for convenience banner = ( "\n" @@ -165,7 +173,7 @@ def reload_prior_solution_from_pickle(instance, outdir): with open(os.path.join(outdir, 'results.pickle'), 'rb') as fh: results = pickle.load(fh) instance.solutions.load_from(results) - return instance + return instance patched_pyomo = False @@ -192,24 +200,66 @@ def new_construct(self, *args, **kwargs): pyomo.environ.Expression.construct = new_construct del m +def reload_prior_solution_from_tabs(instance): + """ + Assign values to all model variables from .tab files saved after + previous solution. (Not currently used.) + """ + import csv + var_objects = instance.component_objects(Var) + for var in var_objects: + var_file = os.path.join(instance.options.outputs_dir, '{}.tab'.format(var.name)) + if not os.path.isfile(var_file): + raise RuntimeError( + "Tab output file for variable {} cannot be found in outputs " + "directory. Exiting.".format(var.name) + ) + try: + # check types of the first tuple of keys for this variable + key_types = [type(i) for i in make_iterable(next(var.iterkeys()))] + except StopIteration: + key_types = [] # no keys + with open(var_file,'r') as f: + reader = csv.reader(f, delimiter='\t') + next(reader) # skip headers + for row in reader: + index = tuple(t(k) for t, k in zip(key_types, row[:-1])) + try: + v = var[index] + except KeyError: + raise KeyError( + "Unable to set value for {}[{}]; index is invalid." + .format(var.name, keys) + ) + if row[-1] == '': + # Variables that are not used in the model end up with no + # value after the solve and get saved as blanks; we skip those. + continue + val = float(row[-1]) + if v.is_integer() or v.is_binary(): + val = int(val) + v.value = val + if instance.options.verbose: + print 'Loaded variable {} values into instance.'.format(var.name) + def iterate(m, iterate_modules, depth=0): """Iterate through all modules listed in the iterate_list (usually iterate.txt), if any. If there is no iterate_list, then this will just solve the model once. - + If it exists, the iterate_list contains one row per level of iteration, and each row contains a list of modules to test for iteration at that level (these can be separated with commas, spaces or tabs). - The model will run through the levels like nested loops, running the lowest level + The model will run through the levels like nested loops, running the lowest level till it converges, then advancing the next higher level by one step, then running the lowest level to convergence/completion again, repeating until all levels are complete. During each iteration, the pre_iterate() and post_iterate() functions of each specified - module (if they exist) will be called before and after solving. When a module is - converged or completed, its post_iterate() function should return True. + module (if they exist) will be called before and after solving. When a module is + converged or completed, its post_iterate() function should return True. All modules specified in the iterate_list should also be loaded via the module_list or include_module(s) arguments. """ - + # create or truncate the iteration tree if depth == 0: m.iteration_node = tuple() @@ -222,11 +272,11 @@ def iterate(m, iterate_modules, depth=0): else: # iterate until converged at the current level - # note: the modules in iterate_modules were also specified in the model's + # note: the modules in iterate_modules were also specified in the model's # module list, and have already been loaded, so they are accessible via sys.modules # This prepends 'switch_model.' if needed, to be consistent with modules.txt. current_modules = [ - sys.modules[module_name if module_name in sys.modules else 'switch_model.' + module_name] + sys.modules[module_name if module_name in sys.modules else 'switch_model.' + module_name] for module_name in iterate_modules[depth]] j = 0 @@ -246,7 +296,7 @@ def iterate(m, iterate_modules, depth=0): # converge the deeper-level modules, if any (inner loop) iterate(m, iterate_modules, depth=depth+1) - + # post-iterate modules at this level m.iteration_number = j # may have been changed during iterate() m.iteration_node = m.iteration_node[:depth] + (j,) @@ -261,8 +311,8 @@ def iterate(m, iterate_modules, depth=0): return def iterate_module_func(m, module, func, converged): - """Call function func() in specified module (if available) and use the result to - adjust model convergence status. If func doesn't exist or returns None, convergence + """Call function func() in specified module (if available) and use the result to + adjust model convergence status. If func doesn't exist or returns None, convergence status will not be changed.""" module_converged = None iter_func = getattr(module, func, None) @@ -273,7 +323,7 @@ def iterate_module_func(m, module, func, converged): return converged else: return converged and module_converged - + def define_arguments(argparser): # callback function to define model configuration arguments while the model is built @@ -309,23 +359,23 @@ def define_arguments(argparser): # Define solver-related arguments # These are a subset of the arguments offered by "pyomo solve --solver=cplex --help" - argparser.add_argument("--solver", default="glpk", + argparser.add_argument("--solver", default="glpk", help='Name of Pyomo solver to use for the model (default is "glpk")') argparser.add_argument("--solver-manager", default="serial", help='Name of Pyomo solver manager to use for the model ("neos" to use remote NEOS server)') argparser.add_argument("--solver-io", default=None, help="Method for Pyomo to use to communicate with solver") # note: pyomo has a --solver-options option but it is not clear # whether that does the same thing as --solver-options-string so we don't reuse the same name. - argparser.add_argument("--solver-options-string", default=None, + argparser.add_argument("--solver-options-string", default=None, help='A quoted string of options to pass to the model solver. Each option must be of the form option=value. ' - '(e.g., --solver-options-string "mipgap=0.001 primalopt advance=2 threads=1")') + '(e.g., --solver-options-string "mipgap=0.001 primalopt=\'\' advance=2 threads=1")') argparser.add_argument("--keepfiles", action='store_true', default=None, help="Keep temporary files produced by the solver (may be useful with --symbolic-solver-labels)") argparser.add_argument( "--stream-output", "--stream-solver", action='store_true', dest="tee", default=None, - help="Display information from the solver about its progress (usually combined with a suitable --solver-options string)") + help="Display information from the solver about its progress (usually combined with a suitable --solver-options-string)") argparser.add_argument( - "--symbolic-solver-labels", action='store_true', default=None, + "--symbolic-solver-labels", action='store_true', default=None, help='Use symbol names derived from the model when interfacing with the solver. ' 'See "pyomo solve --solver=x --help" for more details.') argparser.add_argument("--tempdir", default=None, @@ -347,6 +397,9 @@ def define_arguments(argparser): argparser.add_argument( '--verbose', '-v', default=False, action='store_true', help='Show information about model preparation and solution') + # argparser.add_argument( + # '--quiet', '-q', dest='verbose', action='store_false', + # help="Don't show information about model preparation and solution (cancels --verbose setting)") argparser.add_argument( '--interact', default=False, action='store_true', help='Enter interactive shell after solving the instance to enable inspection of the solved model.') @@ -357,7 +410,7 @@ def define_arguments(argparser): def add_module_args(parser): parser.add_argument( - "--module-list", default=None, + "--module-list", default=None, help='Text file with a list of modules to include in the model (default is "modules.txt")' ) parser.add_argument( @@ -368,9 +421,9 @@ def add_module_args(parser): "--exclude-modules", "--exclude-module", dest="exclude_modules", nargs='+', default=[], help="Module(s) to remove from the model after processing --module-list and --include-modules" ) - # note: we define --inputs-dir here because it may be used to specify the location of + # note: we define --inputs-dir here because it may be used to specify the location of # the module list, which is needed before it is loaded. - parser.add_argument("--inputs-dir", default="inputs", + parser.add_argument("--inputs-dir", default="inputs", help='Directory containing input files (default is "inputs")') @@ -434,7 +487,7 @@ def get_module_list(args): # remove modules requested by the user for module_name in module_options.exclude_modules: modules.remove(module_name) - + # add the current module, since it has callbacks, e.g. define_arguments for iteration and suffixes modules.append("switch_model.solve") @@ -499,7 +552,7 @@ def solve(model): model.solver = SolverFactory(model.options.solver, solver_io=model.options.solver_io) # patch for Pyomo < 4.2 - # note: Pyomo added an options_string argument to solver.solve() in Pyomo 4.2 rev 10587. + # note: Pyomo added an options_string argument to solver.solve() in Pyomo 4.2 rev 10587. # (See https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10587 ) # This is misreported in the documentation as options=, but options= actually accepts a dictionary. if model.options.solver_options_string and not hasattr(model.solver, "_options_string_to_dict"): @@ -533,16 +586,16 @@ def solve(model): # while i is not None: # c, i = m._decl_order[i] # solver_args[suffixes].append(c.name) - + # patch for Pyomo < 4.2 if not hasattr(model.solver, "_options_string_to_dict"): solver_args.pop("options_string", "") # solve the model if model.options.verbose: - solve_start_time = time.time() + timer = StepTimer() print "\nSolving model..." - + if model.options.tempdir is not None: # from https://software.sandia.gov/downloads/pub/pyomo/PyomoOnlineDocs.html#_changing_the_temporary_directory from pyutilib.services import TempfileManager @@ -550,31 +603,25 @@ def solve(model): results = model.solver_manager.solve(model, opt=model.solver, **solver_args) - # Load the solution data into the results object (it only has execution - # metadata by default in recent versions of Pyomo). This will enable us to - # save and restore model solutions; the results object can be pickled to a - # file on disk, but the instance cannot. - # https://stackoverflow.com/questions/39941520/pyomo-ipopt-does-not-return-solution - # - model.solutions.store_to(results) - model.last_results = results - if model.options.verbose: - solve_end_time = time.time() - print "Solved model. Total time spent in solver: {:2f} s.".format(solve_end_time - solve_start_time) - + print "Solved model. Total time spent in solver: {:2f} s.".format(timer.step_time()) # Only return if the model solved correctly, otherwise throw a useful error - if(results.solver.status == SolverStatus.ok and + if(results.solver.status in {SolverStatus.ok, SolverStatus.warning} and results.solver.termination_condition == TerminationCondition.optimal): - return results + # Cache a copy of the results object, to allow saving and restoring model + # solutions later. + model.last_results = results + # Successful solution, return results + return results elif (results.solver.termination_condition == TerminationCondition.infeasible): if hasattr(model, "iis"): - print "Model was infeasible; irreducible infeasible set (IIS) returned by solver:" - print "\n".join(c.name for c in model.iis) + print "Model was infeasible; irreducibly inconsistent set (IIS) returned by solver:" + print "\n".join(sorted(c.name for c in model.iis)) else: - print "Model was infeasible; if the solver can generate an irreducible infeasible set," - print "more information may be available by calling this script with --suffixes iis ..." + print "Model was infeasible; if the solver can generate an irreducibly inconsistent set (IIS)," + print "more information may be available by setting the appropriate flags in the " + print 'solver_options_string and calling this script with "--suffixes iis".' raise RuntimeError("Infeasible model") else: print "Solver terminated abnormally." @@ -584,6 +631,7 @@ def solve(model): print "Hint: glpk has been known to classify infeasible problems as 'other'." raise RuntimeError("Solver failed to find an optimal solution.") + # no default return, because we'll never reach here # taken from https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10784 # This can be removed when all users are on Pyomo 4.2 diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py old mode 100644 new mode 100755 index f2ea718b4..25fb17585 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -2,19 +2,19 @@ # Copyright (c) 2015-2017 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. -"""Scenario management module. -Reads scenario-related arguments from the command line and the same options -file that solve.py would read, and uses them to setup scenarios_to_run(). -For each scenario, this generator yields a tokenized list of arguments that -define that scenario (similar to sys.argv, but based on a line from a scenario -definition file, followed by any options specified on the command line). +"""Scenario management module. +Reads scenario-related arguments from the command line and the same options +file that solve.py would read, and uses them to setup scenarios_to_run(). +For each scenario, this generator yields a tokenized list of arguments that +define that scenario (similar to sys.argv, but based on a line from a scenario +definition file, followed by any options specified on the command line). Then it calls solve.main() with this list of arguments (once for each scenario). A queueing system (based on lock directories within a queue directory) is used to -ensure that scenarios_to_run() will always return the next unsolved +ensure that scenarios_to_run() will always return the next unsolved scenario from the scenario list file, even if the file is edited while this script is running. This makes it possible to amend the scenario list while -long solver jobs are running. Multiple solver scripts can also use +long solver jobs are running. Multiple solver scripts can also use scenarios_to_run() in separate processes to select the next job to run. """ @@ -35,7 +35,7 @@ # Parse scenario-manager-related command-line arguments. # Other command-line arguments will be passed through to solve.py via scenario_cmd_line_args parser = _ArgumentParser( - allow_abbrev=False, description='Solve one or more SWITCH scenarios.' + allow_abbrev=False, description='Solve one or more Switch scenarios.' ) parser.add_argument('--scenario', '--scenarios', nargs='+', dest='scenarios', default=[]) #parser.add_argument('--scenarios', nargs='+', default=[]) @@ -43,8 +43,10 @@ parser.add_argument("--scenario-queue", default="scenario_queue") parser.add_argument("--job-id", default=None) -#import pdb; pdb.set_trace() +# import pdb; pdb.set_trace() +# get a namespace object with successfully parsed scenario manager arguments scenario_manager_args = parser.parse_known_args(args=option_file_args + cmd_line_args)[0] +# get lists of other arguments to pass through to standard solve routine scenario_option_file_args = parser.parse_known_args(args=option_file_args)[1] scenario_cmd_line_args = parser.parse_known_args(args=cmd_line_args)[1] @@ -53,39 +55,48 @@ scenario_queue_dir = scenario_manager_args.scenario_queue job_id = scenario_manager_args.job_id -# Make a best effort to get a unique, persistent job_id for each job. -# This is used to clear the queue of running tasks if a task is stopped and -# restarted. (would be better if other jobs could do this when this job dies -# but it's hard to see how they can detect when this job fails.) -# (The idea is that the user will run multiple jobs in parallel, with one -# thread per job, to process all the scenarios. These might be run in separate -# terminal windows, or in separate instances of gnu screen, or as numbered -# jobs on an HPC system. Sometimes a job will get interrupted, e.g., if the -# user presses ctrl-c in a terminal window or if the job is launched on an -# interruptible queue. This script attempts to detect when that job gets -# relaunched, and re-run the interrupted scenario.) -if job_id is None: - job_id = os.environ.get('JOB_ID') # could be set by user -if job_id is None: - job_id = os.environ.get('JOBID') # could be set by user -if job_id is None: - job_id = os.environ.get('SLURM_JOBID') -if job_id is None: - job_id = os.environ.get('OMPI_MCA_ess_base_jobid') -if job_id is None: - # construct one from hostname and parent's pid - # this way, each job launched from a different terminal window - # or different instance of gnu screen will have a persistent ID - # (This won't work on Windows before Python 3.2; in that case, - # users should specify a --job-id or set an environment variable - # when running multiple jobs in parallel. Without that, all - # jobs will think they have the same ID, and at startup they will - # try to re-run the scenario currently being run by some other job.) - if hasattr(os, 'getppid'): - job_id = socket.gethostname() + '_' + str(os.getppid()) - else: - # won't be able to automatically clear previously interrupted job - job_id = socket.gethostname() + '_' + str(os.getpid()) +# Get a unique job id. Note: in the past we have tried to get a +# persistent ID for each parallel task, so that it could requeue any +# jobs it was previously working on when it restarted. However, this +# is kludgy to begin with (only works if restarted under similar conditions) +# and tends to fail. It also cannot work with a bare "srun" or "mpirun" +# command, which might launch 20+ tasks that end up thinking they're the +# same job and race to reset the queue. +job_id = socket.gethostname() + '_' + str(os.getpid()) + +# # Make a best effort to get a unique, persistent job_id for each job. +# # This is used to clear the queue of running tasks if a task is stopped and +# # restarted. (would be better if other jobs could do this when this job dies +# # but it's hard to see how they can detect when this job fails.) +# # (The idea is that the user will run multiple jobs in parallel, with one +# # thread per job, to process all the scenarios. These might be run in separate +# # terminal windows, or in separate instances of gnu screen, or as numbered +# # jobs on an HPC system. Sometimes a job will get interrupted, e.g., if the +# # user presses ctrl-c in a terminal window or if the job is launched on an +# # interruptible queue. This script attempts to detect when that job gets +# # relaunched, and re-run the interrupted scenario.) +# if job_id is None: +# job_id = os.environ.get('JOB_ID') # could be set by user +# if job_id is None: +# job_id = os.environ.get('JOBID') # could be set by user +# if job_id is None: +# job_id = os.environ.get('SLURM_JOBID') +# if job_id is None: +# job_id = os.environ.get('OMPI_MCA_ess_base_jobid') +# if job_id is None: +# # construct one from hostname and parent's pid +# # this way, each job launched from a different terminal window +# # or different instance of gnu screen will have a persistent ID +# # (This won't work on Windows before Python 3.2; in that case, +# # users should specify a --job-id or set an environment variable +# # when running multiple jobs in parallel. Without that, all +# # jobs will think they have the same ID, and at startup they will +# # try to re-run the scenario currently being run by some other job.) +# if hasattr(os, 'getppid'): +# job_id = socket.gethostname() + '_' + str(os.getppid()) +# else: +# # won't be able to automatically clear previously interrupted job +# job_id = socket.gethostname() + '_' + str(os.getpid()) running_scenarios_file = os.path.join(scenario_queue_dir, job_id+"_running.txt") @@ -101,11 +112,11 @@ def main(args=None): os.makedirs(scenario_queue_dir) except OSError: pass # directory probably exists already - + # remove lock directories for any scenarios that were # previously being solved by this job but were interrupted unlock_running_scenarios() - + for (scenario_name, args) in scenarios_to_run(): print( "\n\n=======================================================================\n" @@ -126,15 +137,15 @@ def main(args=None): def scenarios_to_run(): """Generator function which returns argument lists for each scenario that should be run. - + Note: each time a new scenario is required, this re-reads the scenario_list file and then returns the first scenario that hasn't already started running. - This allows multiple copies of the script to be run and allocate scenarios among + This allows multiple copies of the script to be run and allocate scenarios among themselves.""" - + skipped = [] ran = [] - + if requested_scenarios: # user requested one or more scenarios # just run them in the order specified, with no queue-management @@ -150,35 +161,38 @@ def scenarios_to_run(): # Run every scenario in the list, with queue management # This is done by repeatedly scanning the scenario list and choosing # the first scenario that hasn't been run. This way, users can edit the - # list and this script will adapt to the changes as soon as it finishes + # list and this script will adapt to the changes as soon as it finishes # the current scenario. all_done = False while not all_done: all_done = True - # cache a list of scenarios that have been run, to avoid trying to checkout every one. + # cache a list of scenarios that have been run, to avoid trying to checkout every one. # This list is found by retrieving the names of the lock-directories. already_run = filter(os.path.isdir, os.listdir(".")) for scenario_name, base_args in get_scenario_dict().items(): + scenario_args = scenario_option_file_args + base_args + scenario_cmd_line_args if scenario_name not in already_run and checkout(scenario_name): # run this scenario, then start again at the top of the list ran.append(scenario_name) - scenario_args = scenario_option_file_args + base_args + scenario_cmd_line_args yield (scenario_name, scenario_args) all_done = False break else: if scenario_name not in skipped and scenario_name not in ran: skipped.append(scenario_name) - print("Skipping {} because it was already run.".format(scenario_name)) + if is_verbose(scenario_args): + print("Skipping {} because it was already run.".format(scenario_name)) # move on to the next candidate # no more scenarios to run if skipped and not ran: print( - "Please remove the {sq} directory or its contents if you would like to " - "run these scenarios again. (rm -rf {sq})".format(sq=scenario_queue_dir) + "Skipping all scenarios because they have already been solved. " + "If you would like to run these scenarios again, " + "please remove the {sq} directory or its contents. (rm -rf {sq})" + .format(sq=scenario_queue_dir) ) return - + def parse_arg(arg, args=sys.argv[1:], **parse_kw): """Parse one argument from the argument list, using options as specified for argparse""" @@ -196,13 +210,18 @@ def get_scenario_name(scenario_args): # use ad-hoc parsing to extract the scenario name from a scenario-definition string return parse_arg("--scenario-name", default=None, args=scenario_args) +def is_verbose(scenario_args): + # check options settings for --verbose flag + # note: this duplicates settings in switch_model.solve, so it may fall out of date + return parse_arg("--verbose", action='store_true', default=False, args=scenario_args) + def get_scenario_dict(): # note: we read the list from the disk each time so that we get a fresher version # if the standard list is changed during a long solution effort. with open(scenario_list_file, 'r') as f: scenario_list_text = [r.strip() for r in f.read().splitlines()] scenario_list_text = [r for r in scenario_list_text if r and not r.startswith("#")] - + # note: text.splitlines() omits newlines and ignores presence/absence of \n at end of the text # shlex.split() breaks an command-line-style argument string into a list like sys.argv scenario_list = [shlex.split(r) for r in scenario_list_text] @@ -236,9 +255,9 @@ def mark_completed(scenario_name): # remove the scenario from the list of running scenarios (since it's been completed now) running_scenarios.remove(scenario_name) write_running_scenarios_file() - # note: the scenario lock directory is left in place so the scenario won't get checked + # note: the scenario lock directory is left in place so the scenario won't get checked # out again - + def write_running_scenarios_file(): # write the list of scenarios currently being run by this job to disk # so they can be released back to the queue if the job is interrupted and restarted @@ -255,7 +274,7 @@ def write_running_scenarios_file(): f.truncate() else: # remove the running_scenarios_file entirely if it would be empty - try: + try: os.remove(running_scenarios_file) except OSError as e: if e.errno != 2: # no such file @@ -268,8 +287,8 @@ def unlock_running_scenarios(): with open(running_scenarios_file) as f: interrupted = f.read().splitlines() for scenario_name in interrupted: - try: - os.rmdir(scenario_name) + try: + os.rmdir(os.path.join(scenario_queue_dir, scenario_name)) except OSError as e: if e.errno != 2: # no such file raise diff --git a/switch_model/transmission/transport/build.py b/switch_model/transmission/transport/build.py index c031b2281..e493748fd 100644 --- a/switch_model/transmission/transport/build.py +++ b/switch_model/transmission/transport/build.py @@ -134,7 +134,7 @@ def define_components(mod): describes which transmission builds will be operational in a given period. Currently, transmission lines are kept online indefinitely, with parts being replaced as they wear out. - + TX_BUILDS_IN_PERIOD[p] will return a subset of (tx, bld_yr) in BLD_YRS_FOR_TX. @@ -175,13 +175,16 @@ def define_components(mod): mod.TRANSMISSION_LINES = Set() mod.trans_lz1 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES) mod.trans_lz2 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES) - mod.min_data_check('TRANSMISSION_LINES', 'trans_lz1', 'trans_lz2') + # we don't do a min_data_check for TRANSMISSION_LINES, because it may be empty for model + # configurations that are sometimes run with interzonal transmission and sometimes not + # (e.g., island interconnect scenarios). However, presence of this column will still be + # checked by load_data_aug. + mod.min_data_check('trans_lz1', 'trans_lz2') mod.trans_dbid = Param(mod.TRANSMISSION_LINES, default=lambda m, tx: tx) - mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=PositiveReals) + mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=NonNegativeReals) mod.trans_efficiency = Param( mod.TRANSMISSION_LINES, - within=PositiveReals, - validate=lambda m, val, tx: val <= 1) + within=PercentFraction) mod.BLD_YRS_FOR_EXISTING_TX = Set( dimen=2, initialize=lambda m: set( @@ -189,9 +192,10 @@ def define_components(mod): mod.existing_trans_cap = Param( mod.TRANSMISSION_LINES, within=NonNegativeReals) + # Note: we don't do a min_data_check for BLD_YRS_FOR_EXISTING_TX, because it may be empty for + # models that start with no pre-existing transmission (e.g., island interconnect scenarios). mod.min_data_check( - 'trans_length_km', 'trans_efficiency', 'BLD_YRS_FOR_EXISTING_TX', - 'existing_trans_cap') + 'trans_length_km', 'trans_efficiency', 'existing_trans_cap') mod.trans_new_build_allowed = Param( mod.TRANSMISSION_LINES, within=Boolean, default=True) mod.NEW_TRANS_BLD_YRS = Set( @@ -226,9 +230,8 @@ def bounds_BuildTx(model, tx, bld_yr): if tx2 == tx and (bld_yr == 'Legacy' or bld_yr <= period))) mod.trans_derating_factor = Param( mod.TRANSMISSION_LINES, - within=NonNegativeReals, - default=1, - validate=lambda m, val, tx: val <= 1) + within=PercentFraction, + default=1) mod.TxCapacityNameplateAvailable = Expression( mod.TRANSMISSION_LINES, mod.PERIODS, rule=lambda m, tx, period: ( @@ -239,13 +242,13 @@ def bounds_BuildTx(model, tx, bld_yr): default=1, validate=lambda m, val, tx: val >= 0.5 and val <= 3) mod.trans_capital_cost_per_mw_km = Param( - within=PositiveReals, + within=NonNegativeReals, default=1000) mod.trans_lifetime_yrs = Param( - within=PositiveReals, + within=NonNegativeReals, default=20) mod.trans_fixed_om_fraction = Param( - within=PositiveReals, + within=NonNegativeReals, default=0.03) # Total annual fixed costs for building new transmission lines... # Multiply capital costs by capital recover factor to get annual @@ -253,7 +256,7 @@ def bounds_BuildTx(model, tx, bld_yr): # overnight costs. mod.trans_cost_annual = Param( mod.TRANSMISSION_LINES, - within=PositiveReals, + within=NonNegativeReals, initialize=lambda m, tx: ( m.trans_capital_cost_per_mw_km * m.trans_terrain_multiplier[tx] * m.trans_length_km[tx] * (crf(m.interest_rate, m.trans_lifetime_yrs) + @@ -315,7 +318,7 @@ def load_inputs(mod, switch_data, inputs_dir): trans_terrain_multiplier, trans_new_build_allowed Note that the next file is formatted as .dat, not as .tab. The - distribution_loss_rate parameter should only be inputted if the + distribution_loss_rate parameter should only be inputted if the local_td module is loaded in the simulation. If this parameter is specified a value in trans_params.dat and local_td is not included in the module list, then an error will be raised. @@ -327,20 +330,28 @@ def load_inputs(mod, switch_data, inputs_dir): """ + # TODO: send issue / pull request to Pyomo to allow .tab files with + # no rows after header (fix bugs in pyomo.core.plugins.data.text) switch_data.load_aug( filename=os.path.join(inputs_dir, 'transmission_lines.tab'), - select=('TRANSMISSION_LINE', 'trans_lz1', 'trans_lz2', - 'trans_length_km', 'trans_efficiency', 'existing_trans_cap'), + select=( + 'TRANSMISSION_LINE', 'trans_lz1', 'trans_lz2', + 'trans_length_km', 'trans_efficiency', 'existing_trans_cap', + 'trans_dbid', 'trans_derating_factor', + 'trans_terrain_multiplier', 'trans_new_build_allowed' + ), index=mod.TRANSMISSION_LINES, - param=(mod.trans_lz1, mod.trans_lz2, mod.trans_length_km, - mod.trans_efficiency, mod.existing_trans_cap)) - switch_data.load_aug( - filename=os.path.join(inputs_dir, 'trans_optional_params.tab'), - optional=True, - select=('TRANSMISSION_LINE', 'trans_dbid', 'trans_derating_factor', - 'trans_terrain_multiplier', 'trans_new_build_allowed'), - param=(mod.trans_dbid, mod.trans_derating_factor, - mod.trans_terrain_multiplier, mod.trans_new_build_allowed)) + optional_params=( + 'trans_dbid', 'trans_derating_factor', + 'trans_terrain_multiplier', 'trans_new_build_allowed' + ), + param=( + mod.trans_lz1, mod.trans_lz2, + mod.trans_length_km, mod.trans_efficiency, mod.existing_trans_cap, + mod.trans_dbid, mod.trans_derating_factor, + mod.trans_terrain_multiplier, mod.trans_new_build_allowed + ) + ) trans_params_path = os.path.join(inputs_dir, 'trans_params.dat') if os.path.isfile(trans_params_path): switch_data.load(filename=trans_params_path) diff --git a/switch_model/upgrade/manager.py b/switch_model/upgrade/manager.py index e7983b912..8af391d31 100644 --- a/switch_model/upgrade/manager.py +++ b/switch_model/upgrade/manager.py @@ -12,6 +12,7 @@ import upgrade_2_0_0b1 import upgrade_2_0_0b2 +import upgrade_2_0_0b4 # Available upgrade code. This needs to be in consecutive order so # upgrade_inputs can incrementally apply the upgrades. @@ -22,20 +23,27 @@ (upgrade_2_0_0b2, upgrade_2_0_0b2.upgrades_from, upgrade_2_0_0b2.upgrades_to), + (upgrade_2_0_0b4, + upgrade_2_0_0b4.upgrades_from, + upgrade_2_0_0b4.upgrades_to), ] +# Not every code revision requires an update, so we hard-code the last +# revision that required an update. +last_required_update = '2.0.0b4' code_version = StrictVersion(switch_model.__version__) version_file = 'switch_inputs_version.txt' #verbose = False verbose = True -def scan_and_upgrade(top_dir, input_dir_name = 'inputs'): +def scan_and_upgrade(top_dir, inputs_dir_name='inputs', backup=True): for dirpath, dirnames, filenames in os.walk(top_dir): for dirname in dirnames: path = os.path.join(dirpath, dirname) - if os.path.exists(os.path.join(path, input_dir_name, 'modules.txt')): - upgrade_inputs(os.path.join(path, input_dir_name), verbose) + if os.path.exists(os.path.join(path, inputs_dir_name, 'modules.txt')): + # print_verbose('upgrading {}'.format(os.path.join(path, inputs_dir_name))) + upgrade_inputs(os.path.join(path, inputs_dir_name), backup) def get_input_version(inputs_dir): @@ -70,7 +78,6 @@ def _write_input_version(inputs_dir, new_version): version_path = os.path.join(inputs_dir, version_file) with open(version_path, 'w') as f: f.write(new_version + "\n") - def do_inputs_need_upgrade(inputs_dir): """ @@ -83,7 +90,6 @@ def do_inputs_need_upgrade(inputs_dir): # Not every code revision requires an update, so just hard-code the last # revision that required an update. inputs_version = get_input_version(inputs_dir) - last_required_update = '2.0.0b2' return StrictVersion(inputs_version) < StrictVersion(last_required_update) @@ -113,36 +119,44 @@ def upgrade_inputs(inputs_dir, backup=True): # Successively apply the upgrade scripts as needed. for (upgrader, v_from, v_to) in upgrade_plugins: inputs_v = StrictVersion(get_input_version(inputs_dir)) - if inputs_v == StrictVersion(v_from): + # note: the next line catches datasets created by/for versions of Switch that + # didn't require input directory upgrades + if StrictVersion(v_from) <= inputs_v < StrictVersion(v_to): print_verbose('\tUpgrading from ' + v_from + ' to ' + v_to) upgrader.upgrade_input_dir(inputs_dir) print_verbose('\tFinished upgrading ' + inputs_dir + '\n') else: - print_verbose('Skipped ' + inputs_dir + ' it does not need upgrade.') + print_verbose('Skipped ' + inputs_dir + '; it does not need upgrade.') def main(args=None): if args is None: + # note: we don't pass the args object directly to scan_and_upgrade or upgrade_inputs + # because those may be called from elsewhere with custom arguments parser = argparse.ArgumentParser() add_parser_args(parser) args = parser.parse_args() set_verbose(args.verbose) - if args.recusive: - scan_and_upgrade(args.path) + if args.recursive: + scan_and_upgrade('.', args.inputs_dir_name, args.backup) else: - if not os.path.isdir(args.path): - print("Error: Input directory {} does not exist.".format(args.path)) + if not os.path.isdir(args.inputs_dir_name): + print("Error: Input directory {} does not exist.".format(args.inputs_dir_name)) return -1 - upgrade_inputs(os.path.normpath(args.path)) + upgrade_inputs(os.path.normpath(args.inputs_dir_name), args.backup) def set_verbose(verbosity): global verbose verbose = verbosity def add_parser_args(parser): - parser.add_argument("--path", type=str, default="inputs", - help='Input directory path (default is "inputs")') - parser.add_argument("--recursive", dest="recusive", + parser.add_argument("--inputs-dir-name", type=str, default="inputs", + help='Input directory name (default is "inputs")') + parser.add_argument("--backup", action='store_true', default=True, + help='Make backup of inputs directory before upgrading (set true by default)') + parser.add_argument("--no-backup", action='store_false', dest='backup', + help='Do not make backup of inputs directory before upgrading') + parser.add_argument("--recursive", dest="recursive", default=False, action='store_true', help=('Recursively scan the provided path for inputs directories ' 'named "inputs", and upgrade each directory found. Note, this ' diff --git a/switch_model/upgrade/upgrade_2_0_0b4.py b/switch_model/upgrade/upgrade_2_0_0b4.py new file mode 100644 index 000000000..f3048bac8 --- /dev/null +++ b/switch_model/upgrade/upgrade_2_0_0b4.py @@ -0,0 +1,58 @@ +# Copyright (c) 2015-2017 The Switch Authors. All rights reserved. +# Licensed under the Apache License, Version 2.0, which is in the LICENSE file. + +""" +Upgrade input directories from 2.0.0b2 to 2.0.0b4. (There were no changes for 2.0.0b3) +Changes are: +* rename 'project' column to 'GENERATION_PROJECT' in 'gen_inc_heat_rates.tab' file. +""" + +import os, shutil, argparse +import pandas +import switch_model.upgrade + +upgrades_from = '2.0.0b2' +upgrades_to = '2.0.0b4' + +def upgrade_input_dir(inputs_dir): + """ + Upgrade an input directory. + """ + + def rename_file(old_name, new_name, optional_file=True): + old_path = os.path.join(inputs_dir, old_name) + new_path = os.path.join(inputs_dir, new_name) + if optional_file and not os.path.isfile(old_path): + return + shutil.move(old_path, new_path) + + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): + path = os.path.join(inputs_dir, file_name) + if optional_file and not os.path.isfile(path): + return + df = pandas.read_csv(path, na_values=['.'], sep='\t') + df.rename(columns={old_col_name: new_col_name}, inplace=True) + df.to_csv(path, sep='\t', na_rep='.', index=False) + + old_new_column_names_in_file = { + 'gen_inc_heat_rates.tab': [('project', 'GENERATION_PROJECT')] + } + + for fname, old_new_pairs in old_new_column_names_in_file.iteritems(): + for old, new in old_new_pairs: + rename_column(fname, old_col_name=old, new_col_name=new) + + # merge trans_optional_params.tab with transmission_lines.tab + trans_lines_path = os.path.join(inputs_dir, 'transmission_lines.tab') + trans_opt_path = os.path.join(inputs_dir, 'trans_optional_params.tab') + if os.path.isfile(trans_lines_path) and os.path.isfile(trans_lines_path): + trans_lines = pandas.read_csv(trans_lines_path, na_values=['.'], sep='\t') + if os.path.isfile(trans_opt_path): + trans_opt = pandas.read_csv(trans_opt_path, na_values=['.'], sep='\t') + trans_lines = trans_lines.merge(trans_opt, on='TRANSMISSION_LINE', how='left') + trans_lines.to_csv(trans_lines_path, sep='\t', na_rep='.', index=False) + if os.path.isfile(trans_opt_path): + os.remove(trans_opt_path) + + # Write a new version text file. + switch_model.upgrade._write_input_version(inputs_dir, upgrades_to) diff --git a/switch_model/utilities.py b/switch_model/utilities.py index a7364fa5f..0f5374d51 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -5,12 +5,7 @@ Utility functions for SWITCH-pyomo. """ -import os -import types -import importlib -import re -import sys -import argparse +import os, types, importlib, re, sys, argparse, time import __main__ as main from pyomo.environ import * import pyomo.opt @@ -36,7 +31,7 @@ def create_model(module_list=None, args=sys.argv[1:]): This is implemented as calling the following functions for each module that has them defined: - + define_dynamic_lists(model): Add lists to the model that other modules can register with. Used for power balance equations, cost components of the objective function, etc. @@ -44,13 +39,13 @@ def create_model(module_list=None, args=sys.argv[1:]): define_components(model): Add components to the model object (parameters, sets, decisions variables, expressions, and/or constraints). Also register with relevant dynamic_lists. - + define_dynamic_components(model): Add dynamic components to the model that depend on the contents of dyanmics lists. Power balance constraints and the objective function are defined in this manner. - + See financials and balancing.load_zones for examples of dynamic definitions. - + All modules can request access to command line parameters and set their default values for those options. If this codebase is being used more like a library than a stand-alone executable, this behavior can cause problems. @@ -86,7 +81,7 @@ def create_model(module_list=None, args=sys.argv[1:]): if hasattr(module, 'define_arguments'): module.define_arguments(argparser) model.options = argparser.parse_args(args) - + # Define model components for module in model.get_modules(): if hasattr(module, 'define_dynamic_lists'): @@ -107,33 +102,63 @@ def get_modules(model): yield sys.modules[m] -def load_inputs(model, inputs_dir=None, attachDataPortal=True): +def make_iterable(item): + """Return an iterable for the one or more items passed.""" + if isinstance(item, basestring): + i = iter([item]) + else: + try: + # check if it's iterable + i = iter(item) + except TypeError: + i = iter([item]) + return i + +class StepTimer(object): + """ + Keep track of elapsed time for steps of a process. + Use timer = StepTimer() to create a timer, then retrieve elapsed time and/or + reset the timer at each step by calling timer.step_time() """ + def __init__(self): + self.start_time = time.time() + def step_time(self): + """ + Reset timer to current time and return time elapsed since last step. + """ + last_start = self.start_time + self.start_time = now = time.time() + return now - last_start +def load_inputs(model, inputs_dir=None, attach_data_portal=True): + """ Load input data for an AbstractModel using the modules in the given list and return a model instance. This is implemented as calling the load_inputs() function of each module, if the module has that function. - """ if inputs_dir is None: inputs_dir = getattr(model.options, "inputs_dir", "inputs") # Load data; add a fancier load function to the data portal + timer = StepTimer() data = DataPortal(model=model) data.load_aug = types.MethodType(load_aug, data) for module in model.get_modules(): if hasattr(module, 'load_inputs'): module.load_inputs(model, data, inputs_dir) + if model.options.verbose: + print "Data read in {:.2f} s.\n".format(timer.step_time()) - # At some point, pyomo deprecated 'create' in favor of - # 'create_instance'. Determine which option is available - # and use that. + # At some point, pyomo deprecated 'create' in favor of 'create_instance'. + # Determine which option is available and use that. if hasattr(model, 'create_instance'): instance = model.create_instance(data) else: instance = model.create(data) + if model.options.verbose: + print "Instance created from data in {:.2f} s.\n".format(timer.step_time()) - if attachDataPortal: + if attach_data_portal: instance.DataPortal = data return instance @@ -145,52 +170,46 @@ def save_inputs_as_dat(model, instance, save_path="inputs/complete_inputs.dat", tools that have not been fully integrated with DataPortal. SYNOPSIS: save_inputs_as_dat(model, instance, save_path) - - """ # helper function to convert values to strings, # putting quotes around values that start as strings quote_str = lambda v: '"{}"'.format(v) if isinstance(v, basestring) else '{}'.format(str(v)) - + # helper function to create delimited lists from single items or iterables of any data type + from switch_model.reporting import make_iterable + join_space = lambda items: ' '.join(map(str, make_iterable(items))) # space-separated list + join_comma = lambda items: ','.join(map(str, make_iterable(items))) # comma-separated list + with open(save_path, "w") as f: for component_name in instance.DataPortal.data(): if component_name in exclude: - continue # don't write data for components in exclude list + continue # don't write data for components in exclude list # (they're in scenario-specific files) component = getattr(model, component_name) comp_class = type(component).__name__ component_data = instance.DataPortal.data(name=component_name) if comp_class == 'SimpleSet' or comp_class == 'OrderedSimpleSet': - f.write("set " + component_name + " := ") - f.write(' '.join(map(str, component_data))) # space-separated list - f.write(";\n") + f.write( + "set {} := {};\n" + .format(component_name, join_space(component_data)) + ) elif comp_class == 'IndexedParam': - if len(component_data) > 0: # omit components for which no data were provided - f.write("param " + component_name + " := ") - if component.index_set().dimen == 1: - f.write(' '.join(str(key) + " " + quote_str(value) - for key,value in component_data.iteritems())) - else: - f.write("\n") - for key,value in (sorted(component_data.iteritems()) - if sorted_output - else component_data.iteritems()): - f.write(" " + - ' '.join(map(str, key)) + " " + - quote_str(value) + "\n") + if component_data: # omit components for which no data were provided + f.write("param {} := \n".format(component_name)) + for key, value in ( + sorted(iteritems(component_data)) + if sorted_output + else iteritems(component_data) + ): + f.write(" {} {}\n".format(join_space(key), quote_str(value))) f.write(";\n") elif comp_class == 'SimpleParam': - f.write("param " + component_name + " := " + str(component_data) + ";\n") + f.write("param {} := {};\n".format(component_name, component_data)) elif comp_class == 'IndexedSet': - # raise RuntimeError( - # "Error with IndexedSet {}. Support for .dat export is not tested.". - # format(component_name)) - # print "Warning: exporting IndexedSet {}, but code has not been tested.".format( - # component_name) - for key in component_data: # note: key is always a tuple - f.write("set " + component_name + "[" + ",".join(map(str, key)) + "] := ") - f.write(' '.join(map(str, component_data[key]))) # space-separated list - f.write(";\n") + for key, vals in iteritems(component_data): + f.write( + "set {}[{}] := {};\n" + .format(component_name, join_comma(key), join_space(vals)) + ) else: raise ValueError( "Error! Component type {} not recognized for model element '{}'.". @@ -207,18 +226,18 @@ def pre_solve(instance, outputs_dir=None): def post_solve(instance, outputs_dir=None): """ - Call post-solve function (if present) in all modules used to compose this model. + Call post-solve function (if present) in all modules used to compose this model. This function can be used to report or save results from the solved model. """ if outputs_dir is None: outputs_dir = getattr(instance.options, "outputs_dir", "outputs") if not os.path.exists(outputs_dir): os.makedirs(outputs_dir) - + # TODO: implement a check to call post solve functions only if # solver termination condition is not 'infeasible' or 'unknown' # (the latter may occur when there are problems with licenses, etc) - + for module in instance.get_modules(): if hasattr(module, 'post_solve'): module.post_solve(instance, outputs_dir) @@ -273,16 +292,12 @@ def _add_min_data_check(model): def has_discrete_variables(model): - for variable in model.component_objects(Var, active=True): - if variable.is_indexed(): - for v in variable.itervalues(): - if v.is_binary() or v.is_integer(): - return True - else: - if v.is_binary() or v.is_integer(): - return True - return False - + all_elements = lambda v: v.itervalues() if v.is_indexed() else [v] + return any( + v.is_binary() or v.is_integer() + for variable in model.component_objects(Var, active=True) + for v in all_elements(variable) + ) def check_mandatory_components(model, *mandatory_model_components): """ @@ -363,17 +378,26 @@ def load_aug(switch_data, optional=False, auto_select=False, The name load_aug() is not great and may be changed. """ + # TODO: + # Allow user to specify filename when defining parameters and sets. + # Also allow user to specify the name(s) of the column(s) in each set. + # Then use those automatically to pull data from the right file (and to + # write correct index column names in the generic output files). + # This will simplify code and ease comprehension (user can see + # immediately where the data come from for each component). This can + # also support auto-documenting of parameters and input files. + path = kwds['filename'] # Skip if the file is missing if optional and not os.path.isfile(path): return # If this is a .dat file, then skip the rest of this fancy business; we'll - # only check if the file is missing and optional for .dat files. + # only check if the file is missing and optional for .tab files. filename, extension = os.path.splitext(path) if extension == '.dat': switch_data.load(**kwds) return - + # copy the optional_params to avoid side-effects when the list is altered below optional_params=list(optional_params) # Parse header and first row @@ -459,6 +483,7 @@ def load_aug(switch_data, optional=False, auto_select=False, for (i, p_i) in del_items: del kwds['select'][i] del kwds['param'][p_i] + if optional and file_has_no_data_rows: # Skip the file. Note that we are only doing this after having # validated the file's column headings. @@ -468,10 +493,10 @@ def load_aug(switch_data, optional=False, auto_select=False, switch_data.load(**kwds) -# Define an argument parser that accepts the allow_abbrev flag to +# Define an argument parser that accepts the allow_abbrev flag to # prevent partial matches, even on versions of Python before 3.5. # See https://bugs.python.org/issue14910 -# This is needed because the parser may sometimes be called with only a subset +# This is needed because the parser may sometimes be called with only a subset # of the eventual argument list (e.g., to parse module-related arguments before # loading the modules and adding their arguments to the list), and without this # flag, the parser could match arguments that are meant to be used later @@ -481,7 +506,7 @@ def load_aug(switch_data, optional=False, auto_select=False, if sys.version_info >= (3, 5): _ArgumentParser = argparse.ArgumentParser else: - # patch ArgumentParser to accept the allow_abbrev flag + # patch ArgumentParser to accept the allow_abbrev flag # (works on Python 2.7 and maybe others) class _ArgumentParser(argparse.ArgumentParser): def __init__(self, *args, **kwargs): @@ -519,7 +544,7 @@ class Logging: def __init__(self, logs_dir): # Make logs directory if class is initialized if not os.path.exists(logs_dir): - os.mkdir(logs_dir) + os.makedirs(logs_dir) # Assign sys.stdout and a log file as locations to write to self.terminal = sys.stdout @@ -542,3 +567,11 @@ def flush(self): self.terminal.flush() self.log_file.flush() + +def iteritems(obj): + """ Iterator of key, value pairs for obj; + equivalent to obj.items() on Python 3+ and obj.iteritems() on Python 2 """ + try: + return obj.iteritems() + except AttributeError: # Python 3+ + return obj.items() diff --git a/switch_model/version.py b/switch_model/version.py index 228d73dad..3d80c09b1 100644 --- a/switch_model/version.py +++ b/switch_model/version.py @@ -2,8 +2,8 @@ # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ This file should only include the version. Do not import any packages or -modules in here because this file needs to be executed before SWITCH is +modules here because this file needs to be executed before SWITCH is installed and executed in environments that don't have any dependencies installed. """ -__version__='2.0.0b3' \ No newline at end of file +__version__='2.0.0' diff --git a/tests/upgrade_test.py b/tests/upgrade_test.py index dced11df1..7ca609673 100644 --- a/tests/upgrade_test.py +++ b/tests/upgrade_test.py @@ -62,7 +62,8 @@ def test_upgrade(): '--outputs-dir', upgrade_dir_outputs]) total_cost = read_file(os.path.join(upgrade_dir_outputs, 'total_cost.txt')) finally: - sys.path.remove(upgrade_dir) + if upgrade_dir in sys.path: # code above may have failed before appending + sys.path.remove(upgrade_dir) _remove_temp_dir(temp_dir) expectation_file = get_expectation_path(example_dir) if UPDATE_EXPECTATIONS: