Merge branch 'master' into add-documentation-hackathon

This commit is contained in:
Fabian Neumann 2024-04-14 20:08:49 +02:00 committed by GitHub
commit 1eb598079e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 70 additions and 18 deletions

View File

@ -20,6 +20,7 @@ remote:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
run:
prefix: ""
name: ""
scenarios:
enable: false
@ -359,8 +360,8 @@ solar_thermal:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#existing-capacities
existing_capacities:
grouping_years_power: [1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]
grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020
grouping_years_power: [1895, 1920, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]
grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020] # heat grouping years >= baseyear will be ignored
threshold_capacity: 10
default_heating_lifetime: 20
conventional_carriers:
@ -515,7 +516,11 @@ sector:
regional_coal_demand: false
regional_co2_sequestration_potential:
enable: false
attribute: 'conservative estimate Mt'
attribute:
- conservative estimate Mt
- conservative estimate GAS Mt
- conservative estimate OIL Mt
- conservative estimate aquifer Mt
include_onshore: false
min_size: 3
max_size: 25

View File

@ -1,5 +1,6 @@
,Unit,Values,Description
name,--,str/list,"Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run."
prefix,--,str,"Prefix for the run name which is used as a top-layer directory name in the results and resources folders."
scenarios,,,
-- enable,bool,"{true, false}","Switch to select whether workflow should generate scenarios based on ``file``."
-- file,str,,"Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory."

1 Unit Values Description
2 name -- str/list Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run.
3 prefix -- str Prefix for the run name which is used as a top-layer directory name in the results and resources folders.
4 scenarios
5 -- enable bool {true, false} Switch to select whether workflow should generate scenarios based on ``file``.
6 -- file str Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory.

View File

@ -90,7 +90,7 @@ regional_methanol_demand,--,"{true, false}",Spatially resolve methanol demand. S
regional_oil_demand,--,"{true, false}",Spatially resolve oil demand. Set to true if regional CO2 constraints needed.
regional_co2 _sequestration_potential,,,
-- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_.
-- attribute,--,string,Name of the attribute for the sequestration potential
-- attribute,--,string or list,Name (or list of names) of the attribute(s) for the sequestration potential
-- include_onshore,--,"{true, false}",Add options for including onshore sequestration potentials
-- min_size,Gt ,float,Any sites with lower potential than this value will be excluded
-- max_size,Gt ,float,The maximum sequestration potential for any one site.
@ -143,5 +143,5 @@ limit_max_growth,,,
-- factor,p.u.,float,The maximum growth factor of a carrier (e.g. 1.3 allows 30% larger than max historic growth)
-- max_growth,,,
-- -- {carrier},GW,float,The historic maximum growth of a carrier
-- max_relative_growth,
-- max_relative_growth,,,
-- -- {carrier},p.u.,float,The historic maximum relative growth of a carrier

Can't render this file because it has a wrong number of fields in line 146.

View File

@ -9,6 +9,11 @@ Release Notes
Upcoming Release
================
* Group existing capacities to the earlier grouping_year for consistency with optimized capacities.
* bugfix: installed heating capacities were 5% lower than existing heating capacities
* Include gas and oil fields and saline aquifers in estimation of CO2 sequestration potential.
* bugfix: convert Strings to pathlib.Path objects as input to ConfigSettings
@ -152,6 +157,9 @@ Upcoming Release
- Collection rules get a new wildcard ``run=config["run"]["name"]`` so they
can collect outputs across different scenarios.
- It is further possible to encapsulate your scenarios in a directory using
the setting ``run: prefix:``.
- **Warning:** One caveat remains for the scenario management with myopic or
perfect foresight pathway optimisation. The first investment period must be
shared across all scenarios. The reason is that the ``wildcard_constraints``

View File

@ -59,6 +59,11 @@ def get_rdir(run):
RDIR = run["name"] + "/"
else:
RDIR = ""
prefix = run.get("prefix", "")
if prefix:
RDIR = f"{prefix}/{RDIR}"
return RDIR

View File

@ -40,8 +40,8 @@ def add_brownfield(n, n_p, year):
# CO2 or global EU values since these are already in n
n_p.mremove(c.name, c.df.index[c.df.lifetime == np.inf])
# remove assets whose build_year + lifetime < year
n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime < year])
# remove assets whose build_year + lifetime <= year
n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime <= year])
# remove assets if their optimized nominal capacity is lower than a threshold
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible

View File

@ -189,8 +189,19 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
phased_out = df_agg[df_agg["DateOut"] < baseyear].index
df_agg.drop(phased_out, inplace=True)
older_assets = (df_agg.DateIn < min(grouping_years)).sum()
if older_assets:
logger.warning(
f"There are {older_assets} assets with build year "
f"before first power grouping year {min(grouping_years)}. "
"These assets are dropped and not considered."
"Consider to redefine the grouping years to keep them."
)
to_drop = df_agg[df_agg.DateIn < min(grouping_years)].index
df_agg.drop(to_drop, inplace=True)
df_agg["grouping_year"] = np.take(
grouping_years, np.digitize(df_agg.DateIn, grouping_years, right=True)
grouping_years[::-1], np.digitize(df_agg.DateIn, grouping_years[::-1])
)
# calculate (adjusted) remaining lifetime before phase-out (+1 because assuming
@ -362,13 +373,20 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
)
else:
key = "central solid biomass CHP"
central_heat = n.buses.query(
"carrier == 'urban central heat'"
).location.unique()
heat_buses = new_capacity.index.map(
lambda i: i + " urban central heat" if i in central_heat else ""
)
n.madd(
"Link",
new_capacity.index,
suffix=name_suffix,
bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values,
bus1=new_capacity.index,
bus2=new_capacity.index + " urban central heat",
bus2=heat_buses,
carrier=generator,
p_nom=new_capacity / costs.at[key, "efficiency"],
capital_cost=costs.at[key, "fixed"]
@ -444,12 +462,25 @@ def add_heating_capacities_installed_before_baseyear(
else:
efficiency = costs.at[costs_name, "efficiency"]
for i, grouping_year in enumerate(grouping_years):
if int(grouping_year) + default_lifetime <= int(baseyear):
continue
valid_grouping_years = pd.Series(
[
int(grouping_year)
for grouping_year in grouping_years
if int(grouping_year) + default_lifetime > int(baseyear)
and int(grouping_year) < int(baseyear)
]
)
# installation is assumed to be linear for the past default_lifetime years
ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime
# get number of years of each interval
_years = (
valid_grouping_years.diff()
.shift(-1)
.fillna(baseyear - valid_grouping_years.iloc[-1])
)
# Installation is assumed to be linear for the past
ratios = _years / _years.sum()
for ratio, grouping_year in zip(ratios, valid_grouping_years):
n.madd(
"Link",

View File

@ -23,13 +23,15 @@ def area(gdf):
def allocate_sequestration_potential(
gdf, regions, attr="conservative estimate Mt", threshold=3
):
gdf = gdf.loc[gdf[attr] > threshold, [attr, "geometry"]]
if isinstance(attr, str):
attr = [attr]
gdf = gdf.loc[gdf[attr].sum(axis=1) > threshold, attr + ["geometry"]]
gdf["area_sqkm"] = area(gdf)
overlay = gpd.overlay(regions, gdf, keep_geom_type=True)
overlay["share"] = area(overlay) / overlay["area_sqkm"]
adjust_cols = overlay.columns.difference({"name", "area_sqkm", "geometry", "share"})
overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0)
return overlay.dissolve("name", aggfunc="sum")[attr]
return overlay.dissolve("name", aggfunc="sum")[attr].sum(axis=1)
if __name__ == "__main__":
@ -37,7 +39,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_sequestration_potentials", simpl="", clusters="181"
"build_sequestration_potentials", simpl="", clusters="128"
)
set_scenario_config(snakemake)

View File

@ -543,7 +543,7 @@ def add_BAU_constraints(n, config):
ext_carrier_i = xr.DataArray(ext_i.carrier.rename_axis("Generator-ext"))
lhs = p_nom.groupby(ext_carrier_i).sum()
index = mincaps.index.intersection(lhs.indexes["carrier"])
rhs = mincaps[index].rename_axis("carrier")
rhs = mincaps[lhs.indexes["carrier"]].rename_axis("carrier")
n.model.add_constraints(lhs >= rhs, name="bau_mincaps")