Nomopyomo (#116)
* play around, add new nomopyomo feature * fix constraints * fix constraints * set solver to cbc due to error with gurobi900 * update environments shift lv and lc constraints to prepare_network start modifying opt constraints * correct BAU * correct environment * fix import * fix SAFE constraint * fix battery charger ratio constraint * code cleaning * restructure solve scripts * fix CCL constraint * adjust doc * solve_network: update doc string * revert unwanted changes * update environment.yaml * update environment.yaml II * force conda update, revert last commit * revert last change, use other channel priority * remove trace_sove_network * add skip_iterating and track_iterations options to config file * revert last commit, fix environment to current pypsa master * line break, trigger CI with updated pypsa * nomopyomo: PR review Co-authored-by: Fabian Neumann <fabian.neumann@outlook.de>
This commit is contained in:
parent
ef9e64d457
commit
1ecca802f3
2
.gitignore
vendored
2
.gitignore
vendored
@ -18,3 +18,5 @@ gurobi.log
|
|||||||
doc/_build
|
doc/_build
|
||||||
|
|
||||||
config.yaml
|
config.yaml
|
||||||
|
|
||||||
|
dconf
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
os:
|
os:
|
||||||
- windows
|
- windows
|
||||||
- linux
|
- linux
|
||||||
- osx
|
- osx
|
||||||
@ -9,11 +9,11 @@ before_install:
|
|||||||
# install conda
|
# install conda
|
||||||
- wget https://raw.githubusercontent.com/trichter/conda4travis/latest/conda4travis.sh -O conda4travis.sh
|
- wget https://raw.githubusercontent.com/trichter/conda4travis/latest/conda4travis.sh -O conda4travis.sh
|
||||||
- source conda4travis.sh
|
- source conda4travis.sh
|
||||||
|
|
||||||
# install conda environment
|
# install conda environment
|
||||||
- conda env create -f ./environment.yaml
|
- conda env create -f ./environment.yaml
|
||||||
- conda activate pypsa-eur
|
- conda activate pypsa-eur
|
||||||
|
|
||||||
# install open-source solver
|
# install open-source solver
|
||||||
- conda install -c conda-forge ipopt glpk
|
- conda install -c conda-forge ipopt glpk
|
||||||
|
|
||||||
|
@ -297,15 +297,6 @@ rule solve_network:
|
|||||||
# group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain
|
# group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain
|
||||||
script: "scripts/solve_network.py"
|
script: "scripts/solve_network.py"
|
||||||
|
|
||||||
rule trace_solve_network:
|
|
||||||
input: "networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
|
|
||||||
output: "results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_trace.nc"
|
|
||||||
shadow: "shallow"
|
|
||||||
log: python="logs/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_python_trace.log",
|
|
||||||
threads: 4
|
|
||||||
resources: mem=memory
|
|
||||||
script: "scripts/trace_solve_network.py"
|
|
||||||
|
|
||||||
rule solve_operations_network:
|
rule solve_operations_network:
|
||||||
input:
|
input:
|
||||||
unprepared="networks/{network}_s{simpl}_{clusters}_ec.nc",
|
unprepared="networks/{network}_s{simpl}_{clusters}_ec.nc",
|
||||||
|
@ -184,6 +184,8 @@ solving:
|
|||||||
min_iterations: 3
|
min_iterations: 3
|
||||||
max_iterations: 5
|
max_iterations: 5
|
||||||
clip_p_max_pu: 0.01
|
clip_p_max_pu: 0.01
|
||||||
|
skip_iterations: false
|
||||||
|
track_iterations: false
|
||||||
#nhours: 10
|
#nhours: 10
|
||||||
solver:
|
solver:
|
||||||
name: gurobi
|
name: gurobi
|
||||||
|
@ -161,6 +161,8 @@ solving:
|
|||||||
min_iterations: 1
|
min_iterations: 1
|
||||||
max_iterations: 1
|
max_iterations: 1
|
||||||
clip_p_max_pu: 0.01
|
clip_p_max_pu: 0.01
|
||||||
|
skip_iterations: false
|
||||||
|
track_iterations: false
|
||||||
#nhours: 10
|
#nhours: 10
|
||||||
solver:
|
solver:
|
||||||
name: cbc
|
name: cbc
|
||||||
|
@ -5,4 +5,6 @@ noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of genera
|
|||||||
min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
||||||
max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
||||||
nhours,--,int,"Specifies the :math:`n` first snapshots to take into account. Must be less than the total number of snapshots. Rather recommended only for debugging."
|
nhours,--,int,"Specifies the :math:`n` first snapshots to take into account. Must be less than the total number of snapshots. Rather recommended only for debugging."
|
||||||
clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
|
clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
|
||||||
|
skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches."
|
||||||
|
track_iterations,bool,"{'true','false'}","Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)"
|
|
@ -2,6 +2,12 @@
|
|||||||
Release Notes
|
Release Notes
|
||||||
##########################################
|
##########################################
|
||||||
|
|
||||||
|
PyPSA-Eur 0.2.0 (TBD)
|
||||||
|
==================================
|
||||||
|
|
||||||
|
* The optimization is now performed using the ``pyomo=False`` setting in the :func:`pypsa.lopf.network_lopf`. This speeds up the solving process significantly and consumes much less memory. The inclusion of additional constraints were adjusted to the new implementation. They are all passed to the :func:`network_lopf` function via the ``extra_functionality`` argument. The rule ``trace_solve_network`` was integrated into the rule :mod:`solve_network` and can be activated via configuration with ``solving: options: track_iterations: true``. The charging and discharging capacities of batteries modelled as store-link combination are now coupled (`#116 <https://github.com/PyPSA/pypsa-eur/pull/116>`_).
|
||||||
|
|
||||||
|
|
||||||
PyPSA-Eur 0.1.0 (9th January 2020)
|
PyPSA-Eur 0.1.0 (9th January 2020)
|
||||||
==================================
|
==================================
|
||||||
|
|
||||||
|
@ -2,11 +2,10 @@
|
|||||||
Solving Networks
|
Solving Networks
|
||||||
##########################################
|
##########################################
|
||||||
|
|
||||||
After generating and simplifying the networks they can be solved through the rule :mod:`solve_network` by using the collection rule :mod:`solve_all_elec_networks`. Moreover, networks can be solved for another focus with the derivative rules :mod:`solve_network` by using the collection rule :mod:`trace_solve_network` to log changes during iterations and :mod:`solve_network` by using the collection rule :mod:`solve_operations_network` for dispatch-only analyses on an already solved network.
|
After generating and simplifying the networks they can be solved through the rule :mod:`solve_network` by using the collection rule :mod:`solve_all_elec_networks`. Moreover, networks can be solved for another focus with the derivative rules :mod:`solve_network` by using the collection rule :mod:`solve_operations_network` for dispatch-only analyses on an already solved network.
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:caption: Overview
|
:caption: Overview
|
||||||
|
|
||||||
solving/solve_network
|
solving/solve_network
|
||||||
solving/trace_solve_network
|
|
||||||
solving/solve_operations_network
|
solving/solve_operations_network
|
||||||
|
@ -1,34 +0,0 @@
|
|||||||
.. _trace_solve:
|
|
||||||
|
|
||||||
Rule ``trace_solve_network``
|
|
||||||
===============================
|
|
||||||
|
|
||||||
.. graphviz::
|
|
||||||
:align: center
|
|
||||||
|
|
||||||
digraph snakemake_dag {
|
|
||||||
graph [bgcolor=white,
|
|
||||||
margin=0,
|
|
||||||
size="8,5"
|
|
||||||
];
|
|
||||||
node [fontname=sans,
|
|
||||||
fontsize=10,
|
|
||||||
penwidth=2,
|
|
||||||
shape=box,
|
|
||||||
style=rounded
|
|
||||||
];
|
|
||||||
edge [color=grey,
|
|
||||||
penwidth=2
|
|
||||||
];
|
|
||||||
0 [color="0.17 0.6 0.85",
|
|
||||||
fillcolor=gray,
|
|
||||||
label=trace_solve_network,
|
|
||||||
style=filled];
|
|
||||||
1 [color="0.58 0.6 0.85",
|
|
||||||
label=prepare_network];
|
|
||||||
1 -> 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
||||||
.. automodule:: trace_solve_network
|
|
3
environment.docs.yaml
Normal file → Executable file
3
environment.docs.yaml
Normal file → Executable file
@ -5,7 +5,7 @@ channels:
|
|||||||
dependencies:
|
dependencies:
|
||||||
#- python
|
#- python
|
||||||
- pip
|
- pip
|
||||||
- pypsa>=0.16
|
# - pypsa>=0.16 # until PyPSA/a3c0991 released
|
||||||
- atlite
|
- atlite
|
||||||
|
|
||||||
# Dependencies of the workflow itself
|
# Dependencies of the workflow itself
|
||||||
@ -47,6 +47,7 @@ dependencies:
|
|||||||
- git+https://github.com/FRESNA/vresutils.git#egg=vresutils
|
- git+https://github.com/FRESNA/vresutils.git#egg=vresutils
|
||||||
- git+https://github.com/PyPSA/glaes.git#egg=glaes
|
- git+https://github.com/PyPSA/glaes.git#egg=glaes
|
||||||
- git+https://github.com/PyPSA/geokit.git#egg=geokit
|
- git+https://github.com/PyPSA/geokit.git#egg=geokit
|
||||||
|
- git+https://github.com/PyPSA/pypsa.git#egg=pypsa # until PyPSA/a3c0991 released
|
||||||
- cdsapi
|
- cdsapi
|
||||||
- sphinx
|
- sphinx
|
||||||
- sphinx_rtd_theme
|
- sphinx_rtd_theme
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
name: pypsa-eur
|
name: pypsa-eur
|
||||||
channels:
|
channels:
|
||||||
|
- defaults
|
||||||
- conda-forge
|
- conda-forge
|
||||||
- bioconda
|
- bioconda
|
||||||
dependencies:
|
dependencies:
|
||||||
- python
|
- python
|
||||||
- pip
|
- pip
|
||||||
|
|
||||||
- pypsa>=0.16
|
# - pypsa>=0.16 # until PyPSA/a3c0991 released
|
||||||
- atlite
|
- atlite
|
||||||
|
|
||||||
# Dependencies of the workflow itself
|
# Dependencies of the workflow itself
|
||||||
@ -33,18 +34,19 @@ dependencies:
|
|||||||
# environment by calling ipython
|
# environment by calling ipython
|
||||||
- ipython
|
- ipython
|
||||||
|
|
||||||
# GIS dependencies have to come all from conda-forge
|
# GIS dependencies:
|
||||||
- conda-forge::cartopy
|
- cartopy
|
||||||
- conda-forge::fiona
|
- fiona
|
||||||
- conda-forge::proj
|
- proj
|
||||||
- conda-forge::pyshp
|
- pyshp
|
||||||
- conda-forge::geopandas
|
- geopandas
|
||||||
- conda-forge::rasterio
|
- rasterio
|
||||||
- conda-forge::shapely
|
- shapely
|
||||||
- conda-forge::libgdal
|
- libgdal
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
- git+https://github.com/FRESNA/vresutils.git#egg=vresutils
|
- git+https://github.com/FRESNA/vresutils.git#egg=vresutils
|
||||||
- git+https://github.com/PyPSA/glaes.git#egg=glaes
|
- git+https://github.com/PyPSA/glaes.git#egg=glaes
|
||||||
- git+https://github.com/PyPSA/geokit.git#egg=geokit
|
- git+https://github.com/PyPSA/geokit.git#egg=geokit
|
||||||
|
- git+https://github.com/PyPSA/pypsa.git#egg=pypsa # until PyPSA/a3c0991 released
|
||||||
- cdsapi
|
- cdsapi
|
||||||
|
0
scripts/plot_network.py
Normal file → Executable file
0
scripts/plot_network.py
Normal file → Executable file
12
scripts/prepare_network.py
Normal file → Executable file
12
scripts/prepare_network.py
Normal file → Executable file
@ -122,8 +122,10 @@ def set_line_cost_limit(n, lc, Nyears=1.):
|
|||||||
n.links.loc[links_dc_b, 'p_nom_extendable'] = True
|
n.links.loc[links_dc_b, 'p_nom_extendable'] = True
|
||||||
|
|
||||||
if lc != 'opt':
|
if lc != 'opt':
|
||||||
n.line_cost_limit = float(lc) * total_line_cost
|
line_cost = float(lc) * total_line_cost
|
||||||
|
n.add('GlobalConstraint', 'lc_limit',
|
||||||
|
type='transmission_expansion_cost_limit',
|
||||||
|
sense='<=', constant=line_cost, carrier_attribute='AC, DC')
|
||||||
return n
|
return n
|
||||||
|
|
||||||
def set_line_volume_limit(n, lv, Nyears=1.):
|
def set_line_volume_limit(n, lv, Nyears=1.):
|
||||||
@ -156,8 +158,10 @@ def set_line_volume_limit(n, lv, Nyears=1.):
|
|||||||
n.links.loc[links_dc_b, 'p_nom_extendable'] = True
|
n.links.loc[links_dc_b, 'p_nom_extendable'] = True
|
||||||
|
|
||||||
if lv != 'opt':
|
if lv != 'opt':
|
||||||
n.line_volume_limit = float(lv) * total_line_volume
|
line_volume = float(lv) * total_line_volume
|
||||||
|
n.add('GlobalConstraint', 'lv_limit',
|
||||||
|
type='transmission_volume_expansion_limit',
|
||||||
|
sense='<=', constant=line_volume, carrier_attribute='AC, DC')
|
||||||
return n
|
return n
|
||||||
|
|
||||||
def average_every_nhours(n, offset):
|
def average_every_nhours(n, offset):
|
||||||
|
346
scripts/solve_network.py
Normal file → Executable file
346
scripts/solve_network.py
Normal file → Executable file
@ -20,6 +20,8 @@ Relevant Settings
|
|||||||
nhours:
|
nhours:
|
||||||
min_iterations:
|
min_iterations:
|
||||||
max_iterations:
|
max_iterations:
|
||||||
|
skip_iterating:
|
||||||
|
track_iterations:
|
||||||
solver:
|
solver:
|
||||||
name:
|
name:
|
||||||
(solveroptions):
|
(solveroptions):
|
||||||
@ -51,7 +53,8 @@ Total annual system costs are minimised with PyPSA. The full formulation of the
|
|||||||
linear optimal power flow (plus investment planning
|
linear optimal power flow (plus investment planning
|
||||||
is provided in the
|
is provided in the
|
||||||
`documentation of PyPSA <https://pypsa.readthedocs.io/en/latest/optimal_power_flow.html#linear-optimal-power-flow>`_.
|
`documentation of PyPSA <https://pypsa.readthedocs.io/en/latest/optimal_power_flow.html#linear-optimal-power-flow>`_.
|
||||||
Additionaly some extra constraints from :mod:`prepare_network` are added.
|
The optimization is based on the ``pyomo=False`` setting in the :func:`network.lopf` and :func:`pypsa.linopf.ilopf` function.
|
||||||
|
Additionally, some extra constraints specified in :mod:`prepare_network` are added.
|
||||||
|
|
||||||
Solving the network in multiple iterations is motivated through the dependence of transmission line capacities and impedances.
|
Solving the network in multiple iterations is motivated through the dependence of transmission line capacities and impedances.
|
||||||
As lines are expanded their electrical parameters change, which renders the optimisation bilinear even if the power flow
|
As lines are expanded their electrical parameters change, which renders the optimisation bilinear even if the power flow
|
||||||
@ -83,27 +86,14 @@ from _helpers import configure_logging
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import gc
|
|
||||||
|
|
||||||
import pypsa
|
import pypsa
|
||||||
from pypsa.descriptors import free_output_series_dataframes
|
from pypsa.linopf import (get_var, define_constraints, linexpr, join_exprs,
|
||||||
|
network_lopf, ilopf)
|
||||||
# Suppress logging of the slack bus choices
|
from pathlib import Path
|
||||||
pypsa.pf.logger.setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
from vresutils.benchmark import memory_logger
|
from vresutils.benchmark import memory_logger
|
||||||
|
|
||||||
def patch_pyomo_tmpdir(tmpdir):
|
def prepare_network(n, solve_opts):
|
||||||
# PYOMO should write its lp files into tmp here
|
|
||||||
import os
|
|
||||||
if not os.path.isdir(tmpdir):
|
|
||||||
os.mkdir(tmpdir)
|
|
||||||
from pyutilib.services import TempfileManager
|
|
||||||
TempfileManager.tempdir = tmpdir
|
|
||||||
|
|
||||||
def prepare_network(n, solve_opts=None):
|
|
||||||
if solve_opts is None:
|
|
||||||
solve_opts = snakemake.config['solving']['options']
|
|
||||||
|
|
||||||
if 'clip_p_max_pu' in solve_opts:
|
if 'clip_p_max_pu' in solve_opts:
|
||||||
for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow):
|
for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow):
|
||||||
@ -120,17 +110,19 @@ def prepare_network(n, solve_opts=None):
|
|||||||
# willingness to pay
|
# willingness to pay
|
||||||
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
|
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
|
||||||
p_nom=1e9 # kW
|
p_nom=1e9 # kW
|
||||||
)
|
)
|
||||||
|
|
||||||
if solve_opts.get('noisy_costs'):
|
if solve_opts.get('noisy_costs'):
|
||||||
for t in n.iterate_components(n.one_port_components):
|
for t in n.iterate_components(n.one_port_components):
|
||||||
#if 'capital_cost' in t.df:
|
#if 'capital_cost' in t.df:
|
||||||
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
|
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
|
||||||
if 'marginal_cost' in t.df:
|
if 'marginal_cost' in t.df:
|
||||||
t.df['marginal_cost'] += 1e-2 + 2e-3*(np.random.random(len(t.df)) - 0.5)
|
t.df['marginal_cost'] += (1e-2 + 2e-3 *
|
||||||
|
(np.random.random(len(t.df)) - 0.5))
|
||||||
|
|
||||||
for t in n.iterate_components(['Line', 'Link']):
|
for t in n.iterate_components(['Line', 'Link']):
|
||||||
t.df['capital_cost'] += (1e-1 + 2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length']
|
t.df['capital_cost'] += (1e-1 +
|
||||||
|
2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length']
|
||||||
|
|
||||||
if solve_opts.get('nhours'):
|
if solve_opts.get('nhours'):
|
||||||
nhours = solve_opts['nhours']
|
nhours = solve_opts['nhours']
|
||||||
@ -139,253 +131,125 @@ def prepare_network(n, solve_opts=None):
|
|||||||
|
|
||||||
return n
|
return n
|
||||||
|
|
||||||
def add_opts_constraints(n, opts=None):
|
|
||||||
if opts is None:
|
|
||||||
opts = snakemake.wildcards.opts.split('-')
|
|
||||||
|
|
||||||
if 'BAU' in opts:
|
def add_CCL_constraints(n, config):
|
||||||
mincaps = snakemake.config['electricity']['BAU_mincapacities']
|
agg_p_nom_limits = config['electricity'].get('agg_p_nom_limits')
|
||||||
def bau_mincapacities_rule(model, carrier):
|
|
||||||
gens = n.generators.index[n.generators.p_nom_extendable & (n.generators.carrier == carrier)]
|
|
||||||
return sum(model.generator_p_nom[gen] for gen in gens) >= mincaps[carrier]
|
|
||||||
n.model.bau_mincapacities = pypsa.opt.Constraint(list(mincaps), rule=bau_mincapacities_rule)
|
|
||||||
|
|
||||||
if 'SAFE' in opts:
|
try:
|
||||||
peakdemand = (1. + snakemake.config['electricity']['SAFE_reservemargin']) * n.loads_t.p_set.sum(axis=1).max()
|
agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits,
|
||||||
conv_techs = snakemake.config['plotting']['conv_techs']
|
index_col=list(range(2)))
|
||||||
exist_conv_caps = n.generators.loc[n.generators.carrier.isin(conv_techs) & ~n.generators.p_nom_extendable, 'p_nom'].sum()
|
except IOError:
|
||||||
ext_gens_i = n.generators.index[n.generators.carrier.isin(conv_techs) & n.generators.p_nom_extendable]
|
logger.exception("Need to specify the path to a .csv file containing "
|
||||||
n.model.safe_peakdemand = pypsa.opt.Constraint(expr=sum(n.model.generator_p_nom[gen] for gen in ext_gens_i) >= peakdemand - exist_conv_caps)
|
"aggregate capacity limits per country in "
|
||||||
|
"config['electricity']['agg_p_nom_limit'].")
|
||||||
|
logger.info("Adding per carrier generation capacity constraints for "
|
||||||
|
"individual countries")
|
||||||
|
|
||||||
# Add constraints on the per-carrier capacity in each country
|
gen_country = n.generators.bus.map(n.buses.country)
|
||||||
if 'CCL' in opts:
|
# cc means country and carrier
|
||||||
agg_p_nom_limits = snakemake.config['electricity'].get('agg_p_nom_limits')
|
p_nom_per_cc = (pd.DataFrame(
|
||||||
|
{'p_nom': linexpr((1, get_var(n, 'Generator', 'p_nom'))),
|
||||||
|
'country': gen_country, 'carrier': n.generators.carrier})
|
||||||
|
.dropna(subset=['p_nom'])
|
||||||
|
.groupby(['country', 'carrier']).p_nom
|
||||||
|
.apply(join_exprs))
|
||||||
|
minimum = agg_p_nom_minmax['min'].dropna()
|
||||||
|
if not minimum.empty:
|
||||||
|
minconstraint = define_constraints(n, p_nom_per_cc[minimum.index],
|
||||||
|
'>=', minimum, 'agg_p_nom', 'min')
|
||||||
|
maximum = agg_p_nom_minmax['max'].dropna()
|
||||||
|
if not maximum.empty:
|
||||||
|
maxconstraint = define_constraints(n, p_nom_per_cc[maximum.index],
|
||||||
|
'<=', maximum, 'agg_p_nom', 'max')
|
||||||
|
|
||||||
try:
|
|
||||||
agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, index_col=list(range(2)))
|
|
||||||
except IOError:
|
|
||||||
logger.exception("Need to specify the path to a .csv file containing aggregate capacity limits per country in config['electricity']['agg_p_nom_limit'].")
|
|
||||||
|
|
||||||
logger.info("Adding per carrier generation capacity constraints for individual countries")
|
def add_BAU_constraints(n, config):
|
||||||
|
mincaps = pd.Series(config['electricity']['BAU_mincapacities'])
|
||||||
|
lhs = (linexpr((1, get_var(n, 'Generator', 'p_nom')))
|
||||||
|
.groupby(n.generators.carrier).apply(join_exprs))
|
||||||
|
define_constraints(n, lhs, '>=', mincaps[lhs.index], 'Carrier', 'bau_mincaps')
|
||||||
|
|
||||||
gen_country = n.generators.bus.map(n.buses.country)
|
|
||||||
|
|
||||||
def agg_p_nom_min_rule(model, country, carrier):
|
def add_SAFE_constraints(n, config):
|
||||||
min = agg_p_nom_minmax.at[(country, carrier), 'min']
|
peakdemand = (1. + config['electricity']['SAFE_reservemargin']) *\
|
||||||
return ((sum(model.generator_p_nom[gen]
|
n.loads_t.p_set.sum(axis=1).max()
|
||||||
for gen in n.generators.index[(gen_country == country) & (n.generators.carrier == carrier)])
|
conv_techs = config['plotting']['conv_techs']
|
||||||
>= min)
|
exist_conv_caps = n.generators.query('~p_nom_extendable & carrier in @conv_techs')\
|
||||||
if np.isfinite(min) else pypsa.opt.Constraint.Skip)
|
.p_nom.sum()
|
||||||
|
ext_gens_i = n.generators.query('carrier in @conv_techs & p_nom_extendable').index
|
||||||
|
lhs = linexpr((1, get_var(n, 'Generator', 'p_nom')[ext_gens_i])).sum()
|
||||||
|
rhs = peakdemand - exist_conv_caps
|
||||||
|
define_constraints(n, lhs, '>=', rhs, 'Safe', 'mintotalcap')
|
||||||
|
|
||||||
def agg_p_nom_max_rule(model, country, carrier):
|
|
||||||
max = agg_p_nom_minmax.at[(country, carrier), 'max']
|
|
||||||
return ((sum(model.generator_p_nom[gen]
|
|
||||||
for gen in n.generators.index[(gen_country == country) & (n.generators.carrier == carrier)])
|
|
||||||
<= max)
|
|
||||||
if np.isfinite(max) else pypsa.opt.Constraint.Skip)
|
|
||||||
|
|
||||||
n.model.agg_p_nom_min = pypsa.opt.Constraint(list(agg_p_nom_minmax.index), rule=agg_p_nom_min_rule)
|
def add_battery_constraints(n):
|
||||||
n.model.agg_p_nom_max = pypsa.opt.Constraint(list(agg_p_nom_minmax.index), rule=agg_p_nom_max_rule)
|
nodes = n.buses.index[n.buses.carrier == "battery"]
|
||||||
|
if nodes.empty or ('Link', 'p_nom') not in n.variables.index:
|
||||||
|
return
|
||||||
|
link_p_nom = get_var(n, "Link", "p_nom")
|
||||||
|
lhs = linexpr((1,link_p_nom[nodes + " charger"]),
|
||||||
|
(-n.links.loc[nodes + " discharger", "efficiency"].values,
|
||||||
|
link_p_nom[nodes + " discharger"].values))
|
||||||
|
define_constraints(n, lhs, "=", 0, 'Link', 'charger_ratio')
|
||||||
|
|
||||||
def add_lv_constraint(n):
|
|
||||||
line_volume = getattr(n, 'line_volume_limit', None)
|
|
||||||
if line_volume is not None and not np.isinf(line_volume):
|
|
||||||
links_dc_ext_i = n.links.index[(n.links.carrier == 'DC') & n.links.p_nom_extendable] if not n.links.empty else pd.Index([])
|
|
||||||
n.model.line_volume_constraint = pypsa.opt.Constraint(
|
|
||||||
expr=((sum(n.model.passive_branch_s_nom["Line",line]*n.lines.at[line,"length"]
|
|
||||||
for line in n.lines.index[n.lines.s_nom_extendable]) +
|
|
||||||
sum(n.model.link_p_nom[link]*n.links.at[link,"length"]
|
|
||||||
for link in links_dc_ext_i))
|
|
||||||
<= line_volume)
|
|
||||||
)
|
|
||||||
|
|
||||||
def add_lc_constraint(n):
|
def extra_functionality(n, snapshots):
|
||||||
line_cost = getattr(n, 'line_cost_limit', None)
|
"""
|
||||||
if line_cost is not None and not np.isinf(line_cost):
|
Collects supplementary constraints which will be passed to ``pypsa.linopf.network_lopf``.
|
||||||
links_dc_ext_i = n.links.index[(n.links.carrier == 'DC') & n.links.p_nom_extendable] if not n.links.empty else pd.Index([])
|
If you want to enforce additional custom constraints, this is a good location to add them.
|
||||||
n.model.line_cost_constraint = pypsa.opt.Constraint(
|
The arguments ``opts`` and ``snakemake.config`` are expected to be attached to the network.
|
||||||
expr=((sum(n.model.passive_branch_s_nom["Line",line]*n.lines.at[line,"capital_cost_lc"]
|
"""
|
||||||
for line in n.lines.index[n.lines.s_nom_extendable]) +
|
opts = n.opts
|
||||||
sum(n.model.link_p_nom[link]*n.links.at[link,"capital_cost_lc"]
|
config = n.config
|
||||||
for link in links_dc_ext_i))
|
if 'BAU' in opts and n.generators.p_nom_extendable.any():
|
||||||
<= line_cost)
|
add_BAU_constraints(n, config)
|
||||||
)
|
if 'SAFE' in opts and n.generators.p_nom_extendable.any():
|
||||||
|
add_SAFE_constraints(n, config)
|
||||||
|
if 'CCL' in opts and n.generators.p_nom_extendable.any():
|
||||||
|
add_CCL_constraints(n, config)
|
||||||
|
add_battery_constraints(n)
|
||||||
|
|
||||||
def add_eps_storage_constraint(n):
|
|
||||||
if not hasattr(n, 'epsilon'):
|
|
||||||
n.epsilon = 1e-5
|
|
||||||
fix_sus_i = n.storage_units.index[~ n.storage_units.p_nom_extendable]
|
|
||||||
n.model.objective.expr += sum(n.epsilon * n.model.state_of_charge[su, n.snapshots[0]] for su in fix_sus_i)
|
|
||||||
|
|
||||||
def fix_branches(n, lines_s_nom=None, links_p_nom=None):
|
def solve_network(n, config=None, solver_log=None, opts=None, **kwargs):
|
||||||
if lines_s_nom is not None and len(lines_s_nom) > 0:
|
solve_opts = snakemake.config['solving']['options']
|
||||||
for l, s_nom in lines_s_nom.iteritems():
|
solver_options = config['solving']['solver'].copy()
|
||||||
n.model.passive_branch_s_nom["Line", l].fix(s_nom)
|
|
||||||
if isinstance(n.opt, pypsa.opf.PersistentSolver):
|
|
||||||
n.opt.update_var(n.model.passive_branch_s_nom)
|
|
||||||
|
|
||||||
if links_p_nom is not None and len(links_p_nom) > 0:
|
|
||||||
for l, p_nom in links_p_nom.iteritems():
|
|
||||||
n.model.link_p_nom[l].fix(p_nom)
|
|
||||||
if isinstance(n.opt, pypsa.opf.PersistentSolver):
|
|
||||||
n.opt.update_var(n.model.link_p_nom)
|
|
||||||
|
|
||||||
def solve_network(n, config=None, solver_log=None, opts=None, callback=None,
|
|
||||||
skip_iterating=False,
|
|
||||||
extra_functionality=None, extra_functionality_args=None,
|
|
||||||
extra_postprocessing=None):
|
|
||||||
if config is None:
|
|
||||||
config = snakemake.config['solving']
|
|
||||||
solve_opts = config['options']
|
|
||||||
|
|
||||||
solver_options = config['solver'].copy()
|
|
||||||
if solver_log is None:
|
|
||||||
solver_log = snakemake.log.solver
|
|
||||||
solver_name = solver_options.pop('name')
|
solver_name = solver_options.pop('name')
|
||||||
|
skip_iterating = solve_opts.get('skip_iterating', False)
|
||||||
if extra_postprocessing is None:
|
track_iterations = solve_opts.get('track_iterations', False)
|
||||||
|
|
||||||
def get_line_limit_duals(n, snapshots, duals):
|
# add to network for extra_functionality
|
||||||
if hasattr(n, 'line_volume_limit') and hasattr(n.model, 'line_volume_constraint'):
|
n.config = config
|
||||||
cdata = pd.Series(list(n.model.line_volume_constraint.values()),
|
n.opts = opts
|
||||||
index=list(n.model.line_volume_constraint.keys()))
|
|
||||||
n.line_volume_limit_dual = -cdata.map(duals).sum()
|
if skip_iterating:
|
||||||
|
network_lopf(n, solver_name=solver_name, solver_options=solver_options,
|
||||||
if hasattr(n, 'line_cost_limit') and hasattr(n.model, 'line_cost_constraint'):
|
extra_functionality=extra_functionality, **kwargs)
|
||||||
cdata = pd.Series(list(n.model.line_cost_constraint.values()),
|
|
||||||
index=list(n.model.line_cost_constraint.keys()))
|
|
||||||
n.line_cost_limit_dual = -cdata.map(duals).sum()
|
|
||||||
|
|
||||||
extra_postprocessing = get_line_limit_duals
|
|
||||||
|
|
||||||
def run_lopf(n, allow_warning_status=False, fix_ext_lines=False):
|
|
||||||
free_output_series_dataframes(n)
|
|
||||||
|
|
||||||
pypsa.opf.network_lopf_build_model(n, formulation=solve_opts['formulation'])
|
|
||||||
|
|
||||||
add_opts_constraints(n, opts)
|
|
||||||
|
|
||||||
if not fix_ext_lines:
|
|
||||||
add_lv_constraint(n)
|
|
||||||
add_lc_constraint(n)
|
|
||||||
|
|
||||||
if extra_functionality is not None:
|
|
||||||
extra_functionality(n, n.snapshots, *extra_functionality_args)
|
|
||||||
|
|
||||||
pypsa.opf.network_lopf_prepare_solver(n, solver_name=solver_name)
|
|
||||||
|
|
||||||
if fix_ext_lines:
|
|
||||||
fix_branches(n,
|
|
||||||
lines_s_nom=n.lines.loc[n.lines.s_nom_extendable, 's_nom_opt'],
|
|
||||||
links_p_nom=n.links.loc[n.links.p_nom_extendable, 'p_nom_opt'])
|
|
||||||
|
|
||||||
# Firing up solve will increase memory consumption tremendously, so
|
|
||||||
# make sure we freed everything we can
|
|
||||||
gc.collect()
|
|
||||||
status, termination_condition = \
|
|
||||||
pypsa.opf.network_lopf_solve(n,
|
|
||||||
solver_logfile=solver_log,
|
|
||||||
solver_options=solver_options,
|
|
||||||
formulation=solve_opts['formulation'],
|
|
||||||
extra_postprocessing=extra_postprocessing
|
|
||||||
#free_memory={'pypsa'}
|
|
||||||
)
|
|
||||||
|
|
||||||
assert status == "ok" or allow_warning_status and status == 'warning', \
|
|
||||||
("network_lopf did abort with status={} "
|
|
||||||
"and termination_condition={}"
|
|
||||||
.format(status, termination_condition))
|
|
||||||
|
|
||||||
return status, termination_condition
|
|
||||||
|
|
||||||
if not skip_iterating:
|
|
||||||
iteration = 0
|
|
||||||
lines_ext_b = n.lines.s_nom_extendable
|
|
||||||
if lines_ext_b.any():
|
|
||||||
# puh: ok, we need to iterate, since there is a relation
|
|
||||||
# between s/p_nom and r, x for branches.
|
|
||||||
msq_threshold = 0.01
|
|
||||||
lines = pd.DataFrame(n.lines[['r', 'x', 'type', 'num_parallel']])
|
|
||||||
|
|
||||||
lines['s_nom'] = (
|
|
||||||
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
|
|
||||||
n.lines.bus0.map(n.buses.v_nom)
|
|
||||||
).where(n.lines.type != '', n.lines['s_nom'])
|
|
||||||
|
|
||||||
lines_ext_typed_b = (n.lines.type != '') & lines_ext_b
|
|
||||||
lines_ext_untyped_b = (n.lines.type == '') & lines_ext_b
|
|
||||||
|
|
||||||
def update_line_parameters(n, zero_lines_below=10):
|
|
||||||
if zero_lines_below > 0:
|
|
||||||
n.lines.loc[n.lines.s_nom_opt < zero_lines_below, 's_nom_opt'] = 0.
|
|
||||||
n.links.loc[n.links.p_nom_opt < zero_lines_below, 'p_nom_opt'] = 0.
|
|
||||||
|
|
||||||
if lines_ext_untyped_b.any():
|
|
||||||
for attr in ('r', 'x'):
|
|
||||||
n.lines.loc[lines_ext_untyped_b, attr] = (
|
|
||||||
lines[attr].multiply(lines['s_nom']/n.lines['s_nom_opt'])
|
|
||||||
)
|
|
||||||
|
|
||||||
if lines_ext_typed_b.any():
|
|
||||||
n.lines.loc[lines_ext_typed_b, 'num_parallel'] = (
|
|
||||||
n.lines['s_nom_opt']/lines['s_nom']
|
|
||||||
)
|
|
||||||
logger.debug("lines.num_parallel={}".format(n.lines.loc[lines_ext_typed_b, 'num_parallel']))
|
|
||||||
|
|
||||||
iteration += 1
|
|
||||||
lines['s_nom_opt'] = lines['s_nom'] * n.lines['num_parallel'].where(n.lines.type != '', 1.)
|
|
||||||
status, termination_condition = run_lopf(n, allow_warning_status=True)
|
|
||||||
if callback is not None: callback(n, iteration, status)
|
|
||||||
|
|
||||||
def msq_diff(n):
|
|
||||||
lines_err = np.sqrt(((n.lines['s_nom_opt'] - lines['s_nom_opt'])**2).mean())/lines['s_nom_opt'].mean()
|
|
||||||
logger.info("Mean square difference after iteration {} is {}".format(iteration, lines_err))
|
|
||||||
return lines_err
|
|
||||||
|
|
||||||
min_iterations = solve_opts.get('min_iterations', 2)
|
|
||||||
max_iterations = solve_opts.get('max_iterations', 999)
|
|
||||||
while msq_diff(n) > msq_threshold or iteration < min_iterations:
|
|
||||||
if iteration >= max_iterations:
|
|
||||||
logger.info("Iteration {} beyond max_iterations {}. Stopping ...".format(iteration, max_iterations))
|
|
||||||
break
|
|
||||||
|
|
||||||
update_line_parameters(n)
|
|
||||||
lines['s_nom_opt'] = n.lines['s_nom_opt']
|
|
||||||
iteration += 1
|
|
||||||
|
|
||||||
status, termination_condition = run_lopf(n, allow_warning_status=True)
|
|
||||||
if callback is not None: callback(n, iteration, status)
|
|
||||||
|
|
||||||
|
|
||||||
update_line_parameters(n, zero_lines_below=100)
|
|
||||||
|
|
||||||
logger.info("Starting last run with fixed extendable lines")
|
|
||||||
|
|
||||||
iteration += 1
|
|
||||||
status, termination_condition = run_lopf(n, fix_ext_lines=True)
|
|
||||||
else:
|
else:
|
||||||
status, termination_condition = run_lopf(n, fix_ext_lines=False)
|
ilopf(n, solver_name=solver_name, solver_options=solver_options,
|
||||||
if callback is not None: callback(n, iteration, status)
|
track_iterations=track_iterations,
|
||||||
|
extra_functionality=extra_functionality, **kwargs)
|
||||||
return n
|
return n
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if 'snakemake' not in globals():
|
if 'snakemake' not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
snakemake = mock_snakemake('solve_network', network='elec', simpl='',
|
snakemake = mock_snakemake('solve_network', network='elec', simpl='',
|
||||||
clusters='5', ll='copt', opts='Co2L-24H')
|
clusters='5', ll='copt', opts='Co2L-BAU-CCL-24H')
|
||||||
configure_logging(snakemake)
|
configure_logging(snakemake)
|
||||||
|
|
||||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
tmpdir = snakemake.config['solving'].get('tmpdir')
|
||||||
if tmpdir is not None:
|
if tmpdir is not None:
|
||||||
patch_pyomo_tmpdir(tmpdir)
|
Path(tmpdir).mkdir(parents=True, exist_ok=True)
|
||||||
|
opts = snakemake.wildcards.opts.split('-')
|
||||||
|
solve_opts = snakemake.config['solving']['options']
|
||||||
|
|
||||||
with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem:
|
with memory_logger(filename=getattr(snakemake.log, 'memory', None),
|
||||||
|
interval=30.) as mem:
|
||||||
n = pypsa.Network(snakemake.input[0])
|
n = pypsa.Network(snakemake.input[0])
|
||||||
|
n = prepare_network(n, solve_opts)
|
||||||
n = prepare_network(n)
|
n = solve_network(n, config=snakemake.config, solver_dir=tmpdir,
|
||||||
n = solve_network(n)
|
solver_log=snakemake.log.solver, opts=opts)
|
||||||
|
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
n.export_to_netcdf(snakemake.output[0])
|
||||||
|
|
||||||
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
||||||
|
@ -49,31 +49,37 @@ import pypsa
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
from vresutils.benchmark import memory_logger
|
from vresutils.benchmark import memory_logger
|
||||||
from solve_network import patch_pyomo_tmpdir, solve_network, prepare_network
|
from solve_network import solve_network, prepare_network
|
||||||
|
|
||||||
def set_parameters_from_optimized(n, n_optim):
|
def set_parameters_from_optimized(n, n_optim):
|
||||||
lines_typed_i = n.lines.index[n.lines.type != '']
|
lines_typed_i = n.lines.index[n.lines.type != '']
|
||||||
n.lines.loc[lines_typed_i, 'num_parallel'] = n_optim.lines['num_parallel'].reindex(lines_typed_i, fill_value=0.)
|
n.lines.loc[lines_typed_i, 'num_parallel'] = \
|
||||||
|
n_optim.lines['num_parallel'].reindex(lines_typed_i, fill_value=0.)
|
||||||
n.lines.loc[lines_typed_i, 's_nom'] = (
|
n.lines.loc[lines_typed_i, 's_nom'] = (
|
||||||
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
|
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
|
||||||
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel
|
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel)
|
||||||
)
|
|
||||||
lines_untyped_i = n.lines.index[n.lines.type == '']
|
lines_untyped_i = n.lines.index[n.lines.type == '']
|
||||||
for attr in ('s_nom', 'r', 'x'):
|
for attr in ('s_nom', 'r', 'x'):
|
||||||
n.lines.loc[lines_untyped_i, attr] = n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.)
|
n.lines.loc[lines_untyped_i, attr] = \
|
||||||
|
n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.)
|
||||||
n.lines['s_nom_extendable'] = False
|
n.lines['s_nom_extendable'] = False
|
||||||
|
|
||||||
links_dc_i = n.links.index[n.links.carrier == 'DC']
|
links_dc_i = n.links.index[n.links.carrier == 'DC']
|
||||||
n.links.loc[links_dc_i, 'p_nom'] = n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.)
|
n.links.loc[links_dc_i, 'p_nom'] = \
|
||||||
|
n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.)
|
||||||
n.links.loc[links_dc_i, 'p_nom_extendable'] = False
|
n.links.loc[links_dc_i, 'p_nom_extendable'] = False
|
||||||
|
|
||||||
gen_extend_i = n.generators.index[n.generators.p_nom_extendable]
|
gen_extend_i = n.generators.index[n.generators.p_nom_extendable]
|
||||||
n.generators.loc[gen_extend_i, 'p_nom'] = n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.)
|
n.generators.loc[gen_extend_i, 'p_nom'] = \
|
||||||
|
n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.)
|
||||||
n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False
|
n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False
|
||||||
|
|
||||||
stor_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable]
|
stor_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable]
|
||||||
n.storage_units.loc[stor_extend_i, 'p_nom'] = n_optim.storage_units['p_nom_opt'].reindex(stor_extend_i, fill_value=0.)
|
n.storage_units.loc[stor_extend_i, 'p_nom'] = \
|
||||||
|
n_optim.storage_units['p_nom_opt'].reindex(stor_extend_i, fill_value=0.)
|
||||||
n.storage_units.loc[stor_extend_i, 'p_nom_extendable'] = False
|
n.storage_units.loc[stor_extend_i, 'p_nom_extendable'] = False
|
||||||
|
|
||||||
return n
|
return n
|
||||||
@ -82,27 +88,25 @@ if __name__ == "__main__":
|
|||||||
if 'snakemake' not in globals():
|
if 'snakemake' not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
snakemake = mock_snakemake('solve_operations_network', network='elec',
|
snakemake = mock_snakemake('solve_operations_network', network='elec',
|
||||||
simpl='', clusters='5', ll='copt', opts='Co2L-24H')
|
simpl='', clusters='5', ll='copt', opts='Co2L-BAU-24H')
|
||||||
configure_logging(snakemake)
|
configure_logging(snakemake)
|
||||||
|
|
||||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
tmpdir = snakemake.config['solving'].get('tmpdir')
|
||||||
if tmpdir is not None:
|
if tmpdir is not None:
|
||||||
patch_pyomo_tmpdir(tmpdir)
|
Path(tmpdir).mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
n = pypsa.Network(snakemake.input.unprepared)
|
n = pypsa.Network(snakemake.input.unprepared)
|
||||||
|
|
||||||
n_optim = pypsa.Network(snakemake.input.optimized)
|
n_optim = pypsa.Network(snakemake.input.optimized)
|
||||||
n = set_parameters_from_optimized(n, n_optim)
|
n = set_parameters_from_optimized(n, n_optim)
|
||||||
del n_optim
|
del n_optim
|
||||||
|
|
||||||
opts = [o
|
opts = snakemake.wildcards.opts.split('-')
|
||||||
for o in snakemake.wildcards.opts.split('-')
|
|
||||||
if not re.match(r'^\d+h$', o, re.IGNORECASE)]
|
|
||||||
|
|
||||||
with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem:
|
with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem:
|
||||||
n = prepare_network(n, solve_opts=snakemake.config['solving']['options'])
|
n = prepare_network(n, solve_opts=snakemake.config['solving']['options'])
|
||||||
n = solve_network(n, config=snakemake.config['solving'], solver_log=snakemake.log.solver, opts=opts)
|
n = solve_network(n, config=snakemake.config, solver_dir=tmpdir,
|
||||||
|
solver_log=snakemake.log.solver, opts=opts,
|
||||||
|
skip_iterating=True)
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
n.export_to_netcdf(snakemake.output[0])
|
||||||
|
|
||||||
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
||||||
|
@ -1,81 +0,0 @@
|
|||||||
"""
|
|
||||||
Iteratively solves expansion problem like the rule :mod:`solve_network`, but additionally
|
|
||||||
records intermediate branch capacity steps and values of the objective function.
|
|
||||||
|
|
||||||
Relevant Settings
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
.. code:: yaml
|
|
||||||
|
|
||||||
solving:
|
|
||||||
tmpdir:
|
|
||||||
options:
|
|
||||||
formulation:
|
|
||||||
clip_p_max_pu:
|
|
||||||
load_shedding:
|
|
||||||
noisy_costs:
|
|
||||||
nhours:
|
|
||||||
min_iterations:
|
|
||||||
max_iterations:
|
|
||||||
solver:
|
|
||||||
name:
|
|
||||||
(solveroptions):
|
|
||||||
|
|
||||||
.. seealso::
|
|
||||||
Documentation of the configuration file ``config.yaml`` at
|
|
||||||
:ref:`solving_cf`
|
|
||||||
|
|
||||||
Inputs
|
|
||||||
------
|
|
||||||
|
|
||||||
- ``networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc``: confer :ref:`prepare`
|
|
||||||
|
|
||||||
Outputs
|
|
||||||
-------
|
|
||||||
|
|
||||||
- ``results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_trace.nc``: Solved PyPSA network including optimisation results (with trace)
|
|
||||||
|
|
||||||
Description
|
|
||||||
-----------
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
from _helpers import configure_logging
|
|
||||||
|
|
||||||
from solve_network import patch_pyomo_tmpdir, prepare_network, solve_network
|
|
||||||
|
|
||||||
import pypsa
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if 'snakemake' not in globals():
|
|
||||||
from _helpers import mock_snakemake
|
|
||||||
snakemake = mock_snakemake('trace_solve_network', network='elec', simpl='',
|
|
||||||
clusters='5', ll='copt', opts='Co2L-24H')
|
|
||||||
configure_logging(snakemake)
|
|
||||||
|
|
||||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
|
||||||
if tmpdir is not None:
|
|
||||||
patch_pyomo_tmpdir(tmpdir)
|
|
||||||
|
|
||||||
|
|
||||||
n = pypsa.Network(snakemake.input[0])
|
|
||||||
|
|
||||||
solver_log = 'solver.log'
|
|
||||||
config = snakemake.config['solving']
|
|
||||||
opts = snakemake.wildcards.opts.split('-')
|
|
||||||
|
|
||||||
def save_optimal_capacities(net, iteration, status):
|
|
||||||
net.lines[f"s_nom_opt_{iteration}"] = net.lines["s_nom_opt"]
|
|
||||||
net.links[f"p_nom_opt_{iteration}"] = net.links["p_nom_opt"]
|
|
||||||
setattr(net, f"status_{iteration}", status)
|
|
||||||
setattr(net, f"objective_{iteration}", net.objective)
|
|
||||||
net.iteration = iteration
|
|
||||||
|
|
||||||
net.export_to_netcdf(snakemake.output[0])
|
|
||||||
|
|
||||||
config['options']['max_iterations'] = 12
|
|
||||||
n = prepare_network(n, config['options'])
|
|
||||||
n = solve_network(n, config, solver_log, opts, save_optimal_capacities)
|
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
|
Loading…
Reference in New Issue
Block a user