Merge branch 'master' into simplify_to_substations

This commit is contained in:
Martha Frysztacki 2021-08-26 15:22:02 +02:00 committed by GitHub
commit f30ca8a18a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 131 additions and 97 deletions

View File

@ -4,5 +4,8 @@
version: 2
conda:
environment: envs/environment.docs.yaml
python:
version: 3.8
install:
- requirements: doc/requirements.txt
system_packages: true

View File

@ -239,7 +239,8 @@ rule simplify_network:
network='networks/elec_s{simpl}.nc',
regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson",
regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson",
busmap='resources/busmap_elec_s{simpl}.csv'
busmap='resources/busmap_elec_s{simpl}.csv',
connection_costs='resources/connection_costs_s{simpl}.csv'
log: "logs/simplify_network/elec_s{simpl}.log"
benchmark: "benchmarks/simplify_network/elec_s{simpl}"
threads: 1
@ -360,7 +361,6 @@ def input_make_summary(w):
ll = w.ll
return ([COSTS] +
expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
network=w.network,
ll=ll,
**{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k)
for k in ["simpl", "clusters", "opts"]}))

View File

@ -50,7 +50,8 @@ An exemplary dependency graph (starting from the simplification rules) then look
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 14-18
:start-at: scenario:
:end-before: countries:
.. csv-table::
:header-rows: 1
@ -66,7 +67,8 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 22-25
:start-at: snapshots:
:end-before: enable:
.. csv-table::
:header-rows: 1
@ -80,7 +82,8 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 36-60
:start-at: electricity:
:end-before: atlite:
.. csv-table::
:header-rows: 1
@ -117,7 +120,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 77-94
:start-at: renewable:
:end-before: offwind-ac:
.. csv-table::
:header-rows: 1
@ -129,7 +133,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 77,95-107
:start-at: offwind-ac:
:end-before: offwind-dc:
.. csv-table::
:header-rows: 1
@ -141,7 +146,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 77,108-121
:start-at: offwind-dc:
:end-before: solar:
.. csv-table::
:header-rows: 1
@ -153,7 +159,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 77,122-141
:start-at: solar:
:end-before: hydro:
.. csv-table::
:header-rows: 1
@ -165,7 +172,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 77,142-147
:start-at: hydro:
:end-before: lines:
.. csv-table::
:header-rows: 1
@ -179,7 +187,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 149-157
:start-at: lines:
:end-before: links:
.. csv-table::
:header-rows: 1
@ -193,7 +202,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 159-163
:start-at: links:
:end-before: transformers:
.. csv-table::
:header-rows: 1
@ -207,7 +217,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 165-168
:start-at: transformers:
:end-before: load:
.. csv-table::
:header-rows: 1
@ -221,7 +232,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 170-176
:start-at: load:
:end-before: costs:
.. csv-table::
:header-rows: 1
@ -235,7 +247,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 178-190
:start-after: scaling_factor:
:end-before: solving:
.. csv-table::
:header-rows: 1
@ -256,7 +269,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 208-217
:start-at: solving:
:end-before: solver:
.. csv-table::
:header-rows: 1
@ -268,7 +282,8 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 218-227
:start-at: solver:
:end-before: plotting:
.. csv-table::
:header-rows: 1
@ -282,7 +297,7 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 236-314
:start-at: plotting:
.. csv-table::
:header-rows: 1

View File

@ -25,7 +25,10 @@ Upcoming Release
* The ``focus_weights`` are now also considered when pre-clustering in the :mod:`simplify_network` rule [`#241 <https://github.com/PyPSA/pypsa-eur/pull/241>`_].
* Continuous integration testing switches to Github Actions from Travis CI [`#252 <https://github.com/PyPSA/pypsa-eur/pull/252>`_].
* Bugfix in :mod:`build_renewable_profile` where offshore wind profiles could no longer be created [`#249 <https://github.com/PyPSA/pypsa-eur/pull/249>`_].
* Implements changes to ``n.snapshot_weightings`` in upcoming PyPSA version (cf. `PyPSA/PyPSA/#227 <https://github.com/PyPSA/PyPSA/pull/227>`_) [`#259 <https://github.com/PyPSA/pypsa-eur/pull/259>`_].
* Bugfix: Lower expansion limit of extendable carriers is now set to the existing capacity, i.e. ``p_nom_min = p_nom`` (0 before). Simultaneously, the upper limit (``p_nom_max``) is now the maximum of the installed capacity (``p_nom``) and the previous estimate based on land availability (``p_nom_max``) [`#260 <https://github.com/PyPSA/pypsa-eur/pull/260>`_].
* Bugfix: Solving an operations network now includes optimized store capacities as well. Before only lines, links, generators and storage units were considered.
* Bugfix: With ``load_shedding: true`` in the solving options of ``config.yaml`` load shedding generators are only added at the AC buses, excluding buses for H2 and battery stores.
PyPSA-Eur 0.3.0 (7th December 2020)
==================================

21
doc/requirements.txt Normal file
View File

@ -0,0 +1,21 @@
# SPDX-FileCopyrightText: : 2019-2021 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
sphinx
sphinx_rtd_theme
pypsa
vresutils>=0.3.1
powerplantmatching>=0.4.8
atlite>=0.2.2
dask<=2021.3.1
# cartopy
scikit-learn
pycountry
pyyaml
seaborn
memory_profiler
tables
descartes

View File

@ -1,32 +0,0 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
name: pypsa-eur-docs
channels:
- conda-forge
dependencies:
- python<=3.7
- pip
- pypsa>=0.17.1
- atlite>=0.2.2
- dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved
- pre-commit
# Dependencies of the workflow itself
- scikit-learn
- pycountry
- seaborn
- memory_profiler
- yaml
- pytables
- powerplantmatching>=0.4.8
# GIS dependencies have to come all from conda-forge
- cartopy
- descartes
- pip:
- vresutils==0.3.1
- sphinx
- sphinx_rtd_theme

View File

@ -12,7 +12,7 @@ dependencies:
- pip
- mamba # esp for windows build
- pypsa>=0.17.1
#- pypsa>=0.17.1
- atlite>=0.2.4
- dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved
@ -52,7 +52,9 @@ dependencies:
- tqdm
- pytz
- country_converter
- tabula-py
- pip:
- git+https://github.com/pypsa/pypsa.git#egg=pypsa
- vresutils==0.3.1
- tsam>=1.1.0

View File

@ -119,7 +119,7 @@ def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True):
# bus_carrier = n.storage_units.bus.map(n.buses.carrier)
# n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks"
Nyears = n.snapshot_weightings.sum() / 8760.
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(Nyears, tech_costs, config['costs'], config['electricity'])
update_transmission_costs(n, costs)

View File

@ -561,7 +561,7 @@ if __name__ == "__main__":
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.base_network)
Nyears = n.snapshot_weightings.sum() / 8760.
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(Nyears)
ppl = load_powerplants()

View File

@ -197,7 +197,7 @@ if __name__ == "__main__":
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
Nyears = n.snapshot_weightings.sum() / 8760.
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(Nyears, tech_costs=snakemake.input.tech_costs,
config=snakemake.config['costs'],
elec_config=snakemake.config['electricity'])

View File

@ -114,7 +114,7 @@ def _find_closest_links(links, new_links, distance_upper_bound=1.5):
def _load_buses_from_eg():
buses = (pd.read_csv(snakemake.input.eg_buses, quotechar="'",
true_values='t', false_values='f',
true_values=['t'], false_values=['f'],
dtype=dict(bus_id="str"))
.set_index("bus_id")
.drop(['station_id'], axis=1)
@ -136,7 +136,7 @@ def _load_buses_from_eg():
def _load_transformers_from_eg(buses):
transformers = (pd.read_csv(snakemake.input.eg_transformers, quotechar="'",
true_values='t', false_values='f',
true_values=['t'], false_values=['f'],
dtype=dict(transformer_id='str', bus0='str', bus1='str'))
.set_index('transformer_id'))
@ -147,7 +147,7 @@ def _load_transformers_from_eg(buses):
def _load_converters_from_eg(buses):
converters = (pd.read_csv(snakemake.input.eg_converters, quotechar="'",
true_values='t', false_values='f',
true_values=['t'], false_values=['f'],
dtype=dict(converter_id='str', bus0='str', bus1='str'))
.set_index('converter_id'))
@ -159,7 +159,7 @@ def _load_converters_from_eg(buses):
def _load_links_from_eg(buses):
links = (pd.read_csv(snakemake.input.eg_links, quotechar="'", true_values='t', false_values='f',
links = (pd.read_csv(snakemake.input.eg_links, quotechar="'", true_values=['t'], false_values=['f'],
dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool"))
.set_index('link_id'))
@ -249,7 +249,7 @@ def _add_links_from_tyndp(buses, links):
def _load_lines_from_eg(buses):
lines = (pd.read_csv(snakemake.input.eg_lines, quotechar="'", true_values='t', false_values='f',
lines = (pd.read_csv(snakemake.input.eg_lines, quotechar="'", true_values=['t'], false_values=['f'],
dtype=dict(line_id='str', bus0='str', bus1='str',
underground="bool", under_construction="bool"))
.set_index('line_id')

View File

@ -357,7 +357,8 @@ if __name__ == "__main__":
clustering = pypsa.networkclustering.Clustering(n, busmap, linemap, linemap, pd.Series(dtype='O'))
else:
line_length_factor = snakemake.config['lines']['length_factor']
hvac_overhead_cost = (load_costs(n.snapshot_weightings.sum()/8760,
Nyears = n.snapshot_weightings.objective.sum()/8760
hvac_overhead_cost = (load_costs(Nyears,
tech_costs=snakemake.input.tech_costs,
config=snakemake.config['costs'],
elec_config=snakemake.config['electricity'])

View File

@ -111,15 +111,15 @@ def calculate_costs(n, label, costs):
costs.loc[idx[raw_index],label] = capital_costs_grouped.values
if c.name == "Link":
p = c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum()
p = c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum()
elif c.name == "Line":
continue
elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings,axis=0)
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0)
p_all[p_all < 0.] = 0.
p = p_all.sum()
else:
p = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum()
p = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum()
marginal_costs = p*c.df.marginal_cost
@ -144,10 +144,12 @@ def calculate_energy(n, label, energy):
for c in n.iterate_components(n.one_port_components|n.branch_components):
if c.name in n.one_port_components:
c_energies = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
if c.name in {'Generator', 'Load', 'ShuntImpedance'}:
c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
elif c.name in {'StorageUnit', 'Store'}:
c_energies = c.pnl.p.multiply(n.snapshot_weightings.stores,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
else:
c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum()).groupby(c.df.carrier).sum()
c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings.generators,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum()).groupby(c.df.carrier).sum()
energy = include_in_summary(energy, [c.list_name], label, c_energies)
@ -400,7 +402,7 @@ def make_summaries(networks_dict, country='all'):
if country != 'all':
n = n[n.buses.country == country]
Nyears = n.snapshot_weightings.sum() / 8760.
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(Nyears, snakemake.input[0],
snakemake.config['costs'], snakemake.config['electricity'])
update_transmission_costs(n, costs, simple_hvdc_costs=False)
@ -442,7 +444,7 @@ if __name__ == "__main__":
ll = [snakemake.wildcards.ll]
networks_dict = {(simpl,clusters,l,opts) :
os.path.join(network_dir, f'{snakemake.wildcards.network}_s{simpl}_'
os.path.join(network_dir, f'elec_s{simpl}_'
f'{clusters}_ec_l{l}_{opts}.nc')
for simpl in expand_from_wildcard("simpl")
for clusters in expand_from_wildcard("clusters")

View File

@ -88,36 +88,43 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}):
# bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum()
bus_sizes = pd.concat((n.generators.query('carrier != "load"').groupby(['bus', 'carrier']).p_nom_opt.sum(),
n.storage_units.groupby(['bus', 'carrier']).p_nom_opt.sum()))
line_widths_exp = dict(Line=n.lines.s_nom_opt, Link=n.links.p_nom_opt)
line_widths_cur = dict(Line=n.lines.s_nom_min, Link=n.links.p_nom_min)
line_widths_exp = n.lines.s_nom_opt
line_widths_cur = n.lines.s_nom_min
link_widths_exp = n.links.p_nom_opt
link_widths_cur = n.links.p_nom_min
else:
raise 'plotting of {} has not been implemented yet'.format(attribute)
line_colors_with_alpha = \
dict(Line=(line_widths_cur['Line'] / n.lines.s_nom > 1e-3)
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}),
Link=(line_widths_cur['Link'] / n.links.p_nom > 1e-3)
((line_widths_cur / n.lines.s_nom > 1e-3)
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}))
link_colors_with_alpha = \
((link_widths_cur / n.links.p_nom > 1e-3)
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}))
## FORMAT
linewidth_factor = opts['map'][attribute]['linewidth_factor']
bus_size_factor = opts['map'][attribute]['bus_size_factor']
## PLOT
n.plot(line_widths=pd.concat(line_widths_exp)/linewidth_factor,
line_colors=dict(Line=line_colors['exp'], Link=line_colors['exp']),
n.plot(line_widths=line_widths_exp/linewidth_factor,
link_widths=link_widths_exp/linewidth_factor,
line_colors=line_colors['exp'],
link_colors=line_colors['exp'],
bus_sizes=bus_sizes/bus_size_factor,
bus_colors=tech_colors,
boundaries=map_boundaries,
geomap=True,
color_geomap=True, geomap=True,
ax=ax)
n.plot(line_widths=pd.concat(line_widths_cur)/linewidth_factor,
line_colors=pd.concat(line_colors_with_alpha),
n.plot(line_widths=line_widths_cur/linewidth_factor,
link_widths=link_widths_cur/linewidth_factor,
line_colors=line_colors_with_alpha,
link_colors=link_colors_with_alpha,
bus_sizes=0,
bus_colors=tech_colors,
boundaries=map_boundaries,
geomap=False,
color_geomap=True, geomap=False,
ax=ax)
ax.set_aspect('equal')
ax.axis('off')
@ -138,7 +145,7 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}):
loc="upper left", bbox_to_anchor=(0.24, 1.01),
frameon=False,
labelspacing=0.8, handletextpad=1.5,
title='Transmission Exist./Exp. ')
title='Transmission Exp./Exist. ')
ax.add_artist(l1_1)
handles = []
@ -196,7 +203,7 @@ def plot_total_energy_pie(n, ax=None):
def plot_total_cost_bar(n, ax=None):
if ax is None: ax = plt.gca()
total_load = (n.snapshot_weightings * n.loads_t.p.sum(axis=1)).sum()
total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum()
tech_colors = opts['tech_colors']
def split_costs(n):

View File

@ -150,6 +150,7 @@ def average_every_nhours(n, offset):
return m
def apply_time_segmentation(n, segments):
logger.info(f"Aggregating time series to {segments} segments.")
try:
@ -223,7 +224,7 @@ if __name__ == "__main__":
opts = snakemake.wildcards.opts.split('-')
n = pypsa.Network(snakemake.input[0])
Nyears = n.snapshot_weightings.sum() / 8760.
Nyears = n.snapshot_weightings.objective.sum() / 8760.
set_line_s_max_pu(n)

View File

@ -141,7 +141,8 @@ def simplify_network_to_380(n):
def _prepare_connection_costs_per_link(n):
if n.links.empty: return {}
costs = load_costs(n.snapshot_weightings.sum() / 8760, snakemake.input.tech_costs,
Nyears = n.snapshot_weightings.objective.sum() / 8760
costs = load_costs(Nyears, snakemake.input.tech_costs,
snakemake.config['costs'], snakemake.config['electricity'])
connection_costs_per_link = {}
@ -178,6 +179,7 @@ def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None,
def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus):
connection_costs = {}
for tech in connection_costs_to_bus:
tech_b = n.generators.carrier == tech
costs = n.generators.loc[tech_b, "bus"].map(connection_costs_to_bus[tech]).loc[lambda s: s>0]
@ -185,6 +187,9 @@ def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus):
n.generators.loc[costs.index, "capital_cost"] += costs
logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} "
.format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems())))
connection_costs[tech] = costs
pd.DataFrame(connection_costs).to_csv(snakemake.output.connection_costs)
def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate_one_ports={"Load", "StorageUnit"}):

View File

@ -101,8 +101,9 @@ def prepare_network(n, solve_opts):
if solve_opts.get('load_shedding'):
n.add("Carrier", "Load")
n.madd("Generator", n.buses.index, " load",
bus=n.buses.index,
buses_i = n.buses.query("carrier == 'AC'").index
n.madd("Generator", buses_i, " load",
bus=buses_i,
carrier='load',
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
marginal_cost=1e2, # Eur/kWh
@ -127,7 +128,7 @@ def prepare_network(n, solve_opts):
if solve_opts.get('nhours'):
nhours = solve_opts['nhours']
n.set_snapshots(n.snapshots[:nhours])
n.snapshot_weightings[:] = 8760./nhours
n.snapshot_weightings[:] = 8760. / nhours
return n
@ -174,16 +175,16 @@ def add_EQ_constraints(n, o, scaling=1e-1):
ggrouper = n.generators.bus
lgrouper = n.loads.bus
sgrouper = n.storage_units.bus
load = n.snapshot_weightings @ \
load = n.snapshot_weightings.generators @ \
n.loads_t.p_set.groupby(lgrouper, axis=1).sum()
inflow = n.snapshot_weightings @ \
inflow = n.snapshot_weightings.stores @ \
n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum()
inflow = inflow.reindex(load.index).fillna(0.)
rhs = scaling * ( level * load - inflow )
lhs_gen = linexpr((n.snapshot_weightings * scaling,
lhs_gen = linexpr((n.snapshot_weightings.generators * scaling,
get_var(n, "Generator", "p").T)
).T.groupby(ggrouper, axis=1).apply(join_exprs)
lhs_spill = linexpr((-n.snapshot_weightings * scaling,
lhs_spill = linexpr((-n.snapshot_weightings.stores * scaling,
get_var(n, "StorageUnit", "spill").T)
).T.groupby(sgrouper, axis=1).apply(join_exprs)
lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("")

View File

@ -81,10 +81,15 @@ def set_parameters_from_optimized(n, n_optim):
n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.)
n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False
stor_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable]
n.storage_units.loc[stor_extend_i, 'p_nom'] = \
n_optim.storage_units['p_nom_opt'].reindex(stor_extend_i, fill_value=0.)
n.storage_units.loc[stor_extend_i, 'p_nom_extendable'] = False
stor_units_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable]
n.storage_units.loc[stor_units_extend_i, 'p_nom'] = \
n_optim.storage_units['p_nom_opt'].reindex(stor_units_extend_i, fill_value=0.)
n.storage_units.loc[stor_units_extend_i, 'p_nom_extendable'] = False
stor_extend_i = n.stores.index[n.stores.e_nom_extendable]
n.stores.loc[stor_extend_i, 'e_nom'] = \
n_optim.stores['e_nom_opt'].reindex(stor_extend_i, fill_value=0.)
n.stores.loc[stor_extend_i, 'e_nom_extendable'] = False
return n