This commit is contained in:
lisazeyen 2023-08-30 11:50:49 +02:00
parent 498b22565c
commit 3247fb59e0
5 changed files with 49 additions and 18 deletions

View File

@ -549,6 +549,7 @@ costs:
year: 2030 year: 2030
version: v0.6.0 version: v0.6.0
rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
social_discountrate: 0.02
fill_values: fill_values:
FOM: 0 FOM: 0
VOM: 0 VOM: 0
@ -893,6 +894,7 @@ plotting:
H2 for shipping: "#ebaee0" H2 for shipping: "#ebaee0"
H2: '#bf13a0' H2: '#bf13a0'
hydrogen: '#bf13a0' hydrogen: '#bf13a0'
retrofitted H2 boiler: '#e5a0d9'
SMR: '#870c71' SMR: '#870c71'
SMR CC: '#4f1745' SMR CC: '#4f1745'
H2 liquefaction: '#d647bd' H2 liquefaction: '#d647bd'

View File

@ -41,10 +41,10 @@ Perfect foresight scenarios
.. warning:: .. warning::
Perfect foresight is currently under development and not yet implemented. Perfect foresight is currently implemented as a first test version.
For running perfect foresight scenarios, in future versions you will be able to For running perfect foresight scenarios, you can adjust the
set in the ``config/config.yaml``: ``config/config.perfect.yaml``:
.. code:: yaml .. code:: yaml

View File

@ -14,6 +14,8 @@ Upcoming Release
* For industry distribution, use EPRTR as fallback if ETS data is not available. * For industry distribution, use EPRTR as fallback if ETS data is not available.
* New feature multi-decade optimisation with perfect foresight.
PyPSA-Eur 0.8.1 (27th July 2023) PyPSA-Eur 0.8.1 (27th July 2023)
================================ ================================

View File

@ -8,7 +8,6 @@ Concats pypsa networks of single investment periods to one network.
import logging import logging
import re import re
import pandas as pd import pandas as pd
import numpy as np import numpy as np
import pypsa import pypsa
@ -39,7 +38,8 @@ def get_missing(df, n, c):
def get_social_discount(t, r=0.01): def get_social_discount(t, r=0.01):
""" """
Calculate for a given time t the social discount. Calculate for a given time t and social discount rate r [per unit]
the social discount.
""" """
return 1 / (1 + r) ** t return 1 / (1 + r) ** t
@ -61,6 +61,8 @@ def get_investment_weighting(time_weighting, r=0.01):
def add_year_to_constraints(n, baseyear): def add_year_to_constraints(n, baseyear):
""" """
Add investment period to global constraints and rename index.
Parameters Parameters
---------- ----------
n : pypsa.Network n : pypsa.Network
@ -74,6 +76,10 @@ def add_year_to_constraints(n, baseyear):
def hvdc_transport_model(n): def hvdc_transport_model(n):
"""
Convert AC lines to DC links for multi-decade optimisation with
line expansion. Losses of DC links are assumed to be 3% per 1000km
"""
logger.info("Convert AC lines to DC links to perform multi-decade optimisation.") logger.info("Convert AC lines to DC links to perform multi-decade optimisation.")
@ -102,6 +108,18 @@ def hvdc_transport_model(n):
def adjust_electricity_grid(n, year, years): def adjust_electricity_grid(n, year, years):
"""
Add carrier to lines. Replace AC lines with DC links in case of line
expansion. Add lifetime to DC links in case of line expansion.
Parameters
----------
n : pypsa.Network
year : int
year in which optimized assets are built
years: list
investment periods
"""
n.lines["carrier"] = "AC" n.lines["carrier"] = "AC"
links_i = n.links[n.links.carrier=="DC"].index links_i = n.links[n.links.carrier=="DC"].index
if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any(): if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any():
@ -175,7 +193,8 @@ def concat_networks(years):
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
else: else:
# this is to avoid adding multiple times assets with infinit lifetime as ror # this is to avoid adding multiple times assets with
# infinit lifetime as ror
cols = pnl_year.columns.difference(pnl[k].columns) cols = pnl_year.columns.difference(pnl[k].columns)
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1) pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
@ -250,11 +269,19 @@ def set_all_phase_outs(n):
(["nuclear"], "DE", 2022), (["nuclear"], "DE", 2022),
(["nuclear"], "BE", 2025), (["nuclear"], "BE", 2025),
(["nuclear"], "ES", 2027), (["nuclear"], "ES", 2027),
(["coal", "lignite"], "DE", 2038), (["coal", "lignite"], "DE", 2030),
(["coal", "lignite"], "ES", 2027), (["coal", "lignite"], "ES", 2027),
(["coal", "lignite"], "FR", 2022), (["coal", "lignite"], "FR", 2022),
(["coal", "lignite"], "GB", 2024), (["coal", "lignite"], "GB", 2024),
(["coal", "lignite"], "IT", 2025), (["coal", "lignite"], "IT", 2025),
(["coal", "lignite"], "DK", 2030),
(["coal", "lignite"], "FI", 2030),
(["coal", "lignite"], "HU", 2030),
(["coal", "lignite"], "SK", 2030),
(["coal", "lignite"], "GR", 2030),
(["coal", "lignite"], "IE", 2030),
(["coal", "lignite"], "NL", 2030),
(["coal", "lignite"], "RS", 2030),
] ]
for carrier, ct, phase_out_year in planned: for carrier, ct, phase_out_year in planned:
set_phase_out(n, carrier, ct, phase_out_year) set_phase_out(n, carrier, ct, phase_out_year)
@ -326,6 +353,10 @@ def set_carbon_constraints(n, opts):
def adjust_lvlimit(n): def adjust_lvlimit(n):
"""
Convert global constraints for single investment period to one uniform
if all attributes stay the same.
"""
c = "GlobalConstraint" c = "GlobalConstraint"
cols = ['carrier_attribute', 'sense', "constant", "type"] cols = ['carrier_attribute', 'sense', "constant", "type"]
glc_type = "transmission_volume_expansion_limit" glc_type = "transmission_volume_expansion_limit"
@ -350,6 +381,10 @@ def adjust_CO2_glc(n):
def add_H2_boilers(n): def add_H2_boilers(n):
"""
Gas boilers can be retrofitted to run with H2. Add H2 boilers for heating
for all existing gas boilers.
"""
c = "Link" c = "Link"
logger.info("Add H2 boilers.") logger.info("Add H2 boilers.")
# existing gas boilers # existing gas boilers
@ -369,6 +404,7 @@ def add_H2_boilers(n):
# add H2 boilers to network # add H2 boilers to network
import_components_from_dataframe(n, df, c) import_components_from_dataframe(n, df, c)
def apply_time_segmentation_perfect( def apply_time_segmentation_perfect(
n, segments, solver_name="cbc", overwrite_time_dependent=True n, segments, solver_name="cbc", overwrite_time_dependent=True
): ):
@ -432,12 +468,6 @@ def apply_time_segmentation_perfect(
n.set_snapshots(sn_weightings.index) n.set_snapshots(sn_weightings.index)
n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0) n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0)
# overwrite time-dependent data with timeseries created by tsam
# if overwrite_time_dependent:
# values_t = segmented.mul(annual_max).set_index(snapshots)
# for component, key in values_t.columns.droplevel(2).unique():
# n.pnl(component)[key] = values_t[component, key]
return n return n
def set_temporal_aggregation_SEG(n, opts, solver_name): def set_temporal_aggregation_SEG(n, opts, solver_name):

View File

@ -32,7 +32,7 @@ import re
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from pypsa.descriptors import nominal_attrs, get_activity_mask from pypsa.descriptors import get_activity_mask
import xarray as xr import xarray as xr
from _helpers import configure_logging, update_config_with_sector_opts from _helpers import configure_logging, update_config_with_sector_opts
@ -41,9 +41,7 @@ from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING) pypsa.pf.logger.setLevel(logging.WARNING)
from pypsa.descriptors import get_switchable_as_dense as get_as_dense from pypsa.descriptors import get_switchable_as_dense as get_as_dense
from pypsa.io import import_components_from_dataframe
from linopy.expressions import merge
from numpy import isnan
def add_land_use_constraint(n, planning_horizons, config): def add_land_use_constraint(n, planning_horizons, config):
if "m" in snakemake.wildcards.clusters: if "m" in snakemake.wildcards.clusters:
@ -140,7 +138,6 @@ def _add_land_use_constraint(n):
def _add_land_use_constraint_m(n, planning_horizons, config): def _add_land_use_constraint_m(n, planning_horizons, config):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters # if generators clustering is lower than network clustering, land_use accounting is at generators clusters
planning_horizons = param["planning_horizons"]
grouping_years = config["existing_capacities"]["grouping_years"] grouping_years = config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons current_horizon = snakemake.wildcards.planning_horizons