Merge branch 'master' into add-documentation-hackathon

This commit is contained in:
Fabian Neumann 2024-04-14 20:08:49 +02:00 committed by GitHub
commit 1eb598079e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 70 additions and 18 deletions

View File

@ -20,6 +20,7 @@ remote:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
run: run:
prefix: ""
name: "" name: ""
scenarios: scenarios:
enable: false enable: false
@ -359,8 +360,8 @@ solar_thermal:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#existing-capacities # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#existing-capacities
existing_capacities: existing_capacities:
grouping_years_power: [1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030] grouping_years_power: [1895, 1920, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]
grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020 grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020] # heat grouping years >= baseyear will be ignored
threshold_capacity: 10 threshold_capacity: 10
default_heating_lifetime: 20 default_heating_lifetime: 20
conventional_carriers: conventional_carriers:
@ -515,7 +516,11 @@ sector:
regional_coal_demand: false regional_coal_demand: false
regional_co2_sequestration_potential: regional_co2_sequestration_potential:
enable: false enable: false
attribute: 'conservative estimate Mt' attribute:
- conservative estimate Mt
- conservative estimate GAS Mt
- conservative estimate OIL Mt
- conservative estimate aquifer Mt
include_onshore: false include_onshore: false
min_size: 3 min_size: 3
max_size: 25 max_size: 25

View File

@ -1,5 +1,6 @@
,Unit,Values,Description ,Unit,Values,Description
name,--,str/list,"Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run." name,--,str/list,"Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run."
prefix,--,str,"Prefix for the run name which is used as a top-layer directory name in the results and resources folders."
scenarios,,, scenarios,,,
-- enable,bool,"{true, false}","Switch to select whether workflow should generate scenarios based on ``file``." -- enable,bool,"{true, false}","Switch to select whether workflow should generate scenarios based on ``file``."
-- file,str,,"Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory." -- file,str,,"Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory."

1 Unit Values Description
2 name -- str/list Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run.
3 prefix -- str Prefix for the run name which is used as a top-layer directory name in the results and resources folders.
4 scenarios
5 -- enable bool {true, false} Switch to select whether workflow should generate scenarios based on ``file``.
6 -- file str Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory.

View File

@ -90,7 +90,7 @@ regional_methanol_demand,--,"{true, false}",Spatially resolve methanol demand. S
regional_oil_demand,--,"{true, false}",Spatially resolve oil demand. Set to true if regional CO2 constraints needed. regional_oil_demand,--,"{true, false}",Spatially resolve oil demand. Set to true if regional CO2 constraints needed.
regional_co2 _sequestration_potential,,, regional_co2 _sequestration_potential,,,
-- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_. -- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_.
-- attribute,--,string,Name of the attribute for the sequestration potential -- attribute,--,string or list,Name (or list of names) of the attribute(s) for the sequestration potential
-- include_onshore,--,"{true, false}",Add options for including onshore sequestration potentials -- include_onshore,--,"{true, false}",Add options for including onshore sequestration potentials
-- min_size,Gt ,float,Any sites with lower potential than this value will be excluded -- min_size,Gt ,float,Any sites with lower potential than this value will be excluded
-- max_size,Gt ,float,The maximum sequestration potential for any one site. -- max_size,Gt ,float,The maximum sequestration potential for any one site.
@ -143,5 +143,5 @@ limit_max_growth,,,
-- factor,p.u.,float,The maximum growth factor of a carrier (e.g. 1.3 allows 30% larger than max historic growth) -- factor,p.u.,float,The maximum growth factor of a carrier (e.g. 1.3 allows 30% larger than max historic growth)
-- max_growth,,, -- max_growth,,,
-- -- {carrier},GW,float,The historic maximum growth of a carrier -- -- {carrier},GW,float,The historic maximum growth of a carrier
-- max_relative_growth, -- max_relative_growth,,,
-- -- {carrier},p.u.,float,The historic maximum relative growth of a carrier -- -- {carrier},p.u.,float,The historic maximum relative growth of a carrier

Can't render this file because it has a wrong number of fields in line 146.

View File

@ -9,6 +9,11 @@ Release Notes
Upcoming Release Upcoming Release
================ ================
* Group existing capacities to the earlier grouping_year for consistency with optimized capacities.
* bugfix: installed heating capacities were 5% lower than existing heating capacities
* Include gas and oil fields and saline aquifers in estimation of CO2 sequestration potential.
* bugfix: convert Strings to pathlib.Path objects as input to ConfigSettings * bugfix: convert Strings to pathlib.Path objects as input to ConfigSettings
@ -152,6 +157,9 @@ Upcoming Release
- Collection rules get a new wildcard ``run=config["run"]["name"]`` so they - Collection rules get a new wildcard ``run=config["run"]["name"]`` so they
can collect outputs across different scenarios. can collect outputs across different scenarios.
- It is further possible to encapsulate your scenarios in a directory using
the setting ``run: prefix:``.
- **Warning:** One caveat remains for the scenario management with myopic or - **Warning:** One caveat remains for the scenario management with myopic or
perfect foresight pathway optimisation. The first investment period must be perfect foresight pathway optimisation. The first investment period must be
shared across all scenarios. The reason is that the ``wildcard_constraints`` shared across all scenarios. The reason is that the ``wildcard_constraints``

View File

@ -59,6 +59,11 @@ def get_rdir(run):
RDIR = run["name"] + "/" RDIR = run["name"] + "/"
else: else:
RDIR = "" RDIR = ""
prefix = run.get("prefix", "")
if prefix:
RDIR = f"{prefix}/{RDIR}"
return RDIR return RDIR

View File

@ -40,8 +40,8 @@ def add_brownfield(n, n_p, year):
# CO2 or global EU values since these are already in n # CO2 or global EU values since these are already in n
n_p.mremove(c.name, c.df.index[c.df.lifetime == np.inf]) n_p.mremove(c.name, c.df.index[c.df.lifetime == np.inf])
# remove assets whose build_year + lifetime < year # remove assets whose build_year + lifetime <= year
n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime < year]) n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime <= year])
# remove assets if their optimized nominal capacity is lower than a threshold # remove assets if their optimized nominal capacity is lower than a threshold
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible # since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible

View File

@ -189,8 +189,19 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
phased_out = df_agg[df_agg["DateOut"] < baseyear].index phased_out = df_agg[df_agg["DateOut"] < baseyear].index
df_agg.drop(phased_out, inplace=True) df_agg.drop(phased_out, inplace=True)
older_assets = (df_agg.DateIn < min(grouping_years)).sum()
if older_assets:
logger.warning(
f"There are {older_assets} assets with build year "
f"before first power grouping year {min(grouping_years)}. "
"These assets are dropped and not considered."
"Consider to redefine the grouping years to keep them."
)
to_drop = df_agg[df_agg.DateIn < min(grouping_years)].index
df_agg.drop(to_drop, inplace=True)
df_agg["grouping_year"] = np.take( df_agg["grouping_year"] = np.take(
grouping_years, np.digitize(df_agg.DateIn, grouping_years, right=True) grouping_years[::-1], np.digitize(df_agg.DateIn, grouping_years[::-1])
) )
# calculate (adjusted) remaining lifetime before phase-out (+1 because assuming # calculate (adjusted) remaining lifetime before phase-out (+1 because assuming
@ -362,13 +373,20 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
) )
else: else:
key = "central solid biomass CHP" key = "central solid biomass CHP"
central_heat = n.buses.query(
"carrier == 'urban central heat'"
).location.unique()
heat_buses = new_capacity.index.map(
lambda i: i + " urban central heat" if i in central_heat else ""
)
n.madd( n.madd(
"Link", "Link",
new_capacity.index, new_capacity.index,
suffix=name_suffix, suffix=name_suffix,
bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values, bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values,
bus1=new_capacity.index, bus1=new_capacity.index,
bus2=new_capacity.index + " urban central heat", bus2=heat_buses,
carrier=generator, carrier=generator,
p_nom=new_capacity / costs.at[key, "efficiency"], p_nom=new_capacity / costs.at[key, "efficiency"],
capital_cost=costs.at[key, "fixed"] capital_cost=costs.at[key, "fixed"]
@ -444,12 +462,25 @@ def add_heating_capacities_installed_before_baseyear(
else: else:
efficiency = costs.at[costs_name, "efficiency"] efficiency = costs.at[costs_name, "efficiency"]
for i, grouping_year in enumerate(grouping_years): valid_grouping_years = pd.Series(
if int(grouping_year) + default_lifetime <= int(baseyear): [
continue int(grouping_year)
for grouping_year in grouping_years
if int(grouping_year) + default_lifetime > int(baseyear)
and int(grouping_year) < int(baseyear)
]
)
# installation is assumed to be linear for the past default_lifetime years # get number of years of each interval
ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime _years = (
valid_grouping_years.diff()
.shift(-1)
.fillna(baseyear - valid_grouping_years.iloc[-1])
)
# Installation is assumed to be linear for the past
ratios = _years / _years.sum()
for ratio, grouping_year in zip(ratios, valid_grouping_years):
n.madd( n.madd(
"Link", "Link",

View File

@ -23,13 +23,15 @@ def area(gdf):
def allocate_sequestration_potential( def allocate_sequestration_potential(
gdf, regions, attr="conservative estimate Mt", threshold=3 gdf, regions, attr="conservative estimate Mt", threshold=3
): ):
gdf = gdf.loc[gdf[attr] > threshold, [attr, "geometry"]] if isinstance(attr, str):
attr = [attr]
gdf = gdf.loc[gdf[attr].sum(axis=1) > threshold, attr + ["geometry"]]
gdf["area_sqkm"] = area(gdf) gdf["area_sqkm"] = area(gdf)
overlay = gpd.overlay(regions, gdf, keep_geom_type=True) overlay = gpd.overlay(regions, gdf, keep_geom_type=True)
overlay["share"] = area(overlay) / overlay["area_sqkm"] overlay["share"] = area(overlay) / overlay["area_sqkm"]
adjust_cols = overlay.columns.difference({"name", "area_sqkm", "geometry", "share"}) adjust_cols = overlay.columns.difference({"name", "area_sqkm", "geometry", "share"})
overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0) overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0)
return overlay.dissolve("name", aggfunc="sum")[attr] return overlay.dissolve("name", aggfunc="sum")[attr].sum(axis=1)
if __name__ == "__main__": if __name__ == "__main__":
@ -37,7 +39,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
"build_sequestration_potentials", simpl="", clusters="181" "build_sequestration_potentials", simpl="", clusters="128"
) )
set_scenario_config(snakemake) set_scenario_config(snakemake)

View File

@ -543,7 +543,7 @@ def add_BAU_constraints(n, config):
ext_carrier_i = xr.DataArray(ext_i.carrier.rename_axis("Generator-ext")) ext_carrier_i = xr.DataArray(ext_i.carrier.rename_axis("Generator-ext"))
lhs = p_nom.groupby(ext_carrier_i).sum() lhs = p_nom.groupby(ext_carrier_i).sum()
index = mincaps.index.intersection(lhs.indexes["carrier"]) index = mincaps.index.intersection(lhs.indexes["carrier"])
rhs = mincaps[index].rename_axis("carrier") rhs = mincaps[lhs.indexes["carrier"]].rename_axis("carrier")
n.model.add_constraints(lhs >= rhs, name="bau_mincaps") n.model.add_constraints(lhs >= rhs, name="bau_mincaps")