Merge branch 'master' into fix_env

This commit is contained in:
Martha Frysztacki 2024-07-01 15:10:18 +02:00 committed by GitHub
commit 061fd00bd1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
37 changed files with 1665 additions and 95 deletions

View File

@ -87,6 +87,6 @@ repos:
# Check for FSFE REUSE compliance (licensing)
- repo: https://github.com/fsfe/reuse-tool
rev: v3.0.2
rev: v3.1.0a1
hooks:
- id: reuse

View File

@ -73,7 +73,7 @@ if config["foresight"] == "perfect":
rule all:
input:
expand(RESULTS + "graphs/costs.pdf", run=config["run"]["name"]),
expand(RESULTS + "graphs/costs.svg", run=config["run"]["name"]),
default_target: True

View File

@ -390,8 +390,8 @@ solar_thermal:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#existing-capacities
existing_capacities:
grouping_years_power: [1895, 1920, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]
grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020] # heat grouping years >= baseyear will be ignored
grouping_years_power: [1920, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025]
grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # heat grouping years >= baseyear will be ignored
threshold_capacity: 10
default_heating_lifetime: 20
conventional_carriers:
@ -621,6 +621,13 @@ sector:
solar: 3
offwind-ac: 3
offwind-dc: 3
enhanced_geothermal:
enable: false
flexible: true
max_hours: 240
max_boost: 0.25
var_cf: true
sustainability_factor: 0.0025
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry
industry:
@ -1184,6 +1191,9 @@ plotting:
waste: '#e3d37d'
other: '#000000'
geothermal: '#ba91b1'
geothermal heat: '#ba91b1'
geothermal district heat: '#d19D00'
geothermal organic rankine cycle: '#ffbf00'
AC: "#70af1d"
AC-AC: "#70af1d"
AC line: "#70af1d"

1
data/egs_costs.json Normal file

File diff suppressed because one or more lines are too long

View File

@ -145,3 +145,11 @@ limit_max_growth,,,
-- -- {carrier},GW,float,The historic maximum growth of a carrier
-- max_relative_growth,,,
-- -- {carrier},p.u.,float,The historic maximum relative growth of a carrier
,,,
enhanced_geothermal,,,
-- enable,--,"{true, false}",Add option to include Enhanced Geothermal Systems
-- flexible,--,"{true, false}",Add option for flexible operation (see Ricks et al. 2024)
-- max_hours,--,int,The maximum hours the reservoir can be charged under flexible operation
-- max_boost,--,float,The maximum boost in power output under flexible operation
-- var_cf,--,"{true, false}",Add option for variable capacity factor (see Ricks et al. 2024)
-- sustainability_factor,--,float,Share of sourced heat that is replenished by the earth's core (see details in `build_egs_potentials.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/build_egs_potentials.py>`_)

1 Unit Values Description
145 -- -- {carrier} GW float The historic maximum growth of a carrier
146 -- max_relative_growth
147 -- -- {carrier} p.u. float The historic maximum relative growth of a carrier
148
149 enhanced_geothermal
150 -- enable -- {true, false} Add option to include Enhanced Geothermal Systems
151 -- flexible -- {true, false} Add option for flexible operation (see Ricks et al. 2024)
152 -- max_hours -- int The maximum hours the reservoir can be charged under flexible operation
153 -- max_boost -- float The maximum boost in power output under flexible operation
154 -- var_cf -- {true, false} Add option for variable capacity factor (see Ricks et al. 2024)
155 -- sustainability_factor -- float Share of sourced heat that is replenished by the earth's core (see details in `build_egs_potentials.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/build_egs_potentials.py>`_)

View File

@ -7,8 +7,24 @@
Release Notes
##########################################
.. Upcoming Release
.. ================
Upcoming Release
================
* Set non-zero capital_cost for methanol stores to avoid unrealistic storage sizes
* Set p_nom = p_nom_min for generators with baseyear == grouping_year in add_existing_baseyear. This has no effect on the optimization but helps n.statistics to correctly report already installed capacities.
* Reverted outdated hotfix for doubled renewable capacity in myopic optimization.
* Added Enhanced Geothermal Systems for generation of electricity and district heat.
Cost and available capacity assumptions based on `Aghahosseini et al. (2020)
<https://www.sciencedirect.com/science/article/pii/S0306261920312551>`__.
See configuration ``sector: enhanced_geothermal`` for details; by default switched off.
* Partially revert https://github.com/PyPSA/pypsa-eur/pull/967 to return to old grouping year logic (which was mostly correct)
* Bugfix: Correctly read in threshold capacity below which to remove components from previous planning horizons in :mod:`add_brownfield`.
PyPSA-Eur 0.11.0 (25th May 2024)
=====================================
@ -808,7 +824,7 @@ PyPSA-Eur 0.9.0 (5th January 2024)
* The minimum PyPSA version is now 0.26.1.
* Update to ``tsam>=0.2.3`` for performance improvents in temporal clustering.
* Update to ``tsam>=0.2.3`` for performance improvements in temporal clustering.
* Pin ``snakemake`` version to below 8.0.0, as the new version is not yet
supported. The next release will switch to the requirement ``snakemake>=8``.

View File

@ -20,7 +20,7 @@ dependencies:
- openpyxl!=3.1.1
- pycountry
- seaborn
- snakemake-minimal>=8.11
- snakemake-minimal>=8.14
- memory_profiler
- yaml
- pytables

View File

@ -58,7 +58,7 @@ rule build_powerplants:
logs("build_powerplants.log"),
threads: 1
resources:
mem_mb=5000,
mem_mb=7000,
conda:
"../envs/environment.yaml"
script:

View File

@ -902,6 +902,34 @@ def input_profile_offwind(w):
}
rule build_egs_potentials:
params:
snapshots=config_provider("snapshots"),
sector=config_provider("sector"),
costs=config_provider("costs"),
input:
egs_cost="data/egs_costs.json",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
air_temperature=(
resources("temp_air_total_elec_s{simpl}_{clusters}.nc")
if config_provider("sector", "enhanced_geothermal", "var_cf")
else []
),
output:
egs_potentials=resources("egs_potentials_s{simpl}_{clusters}.csv"),
egs_overlap=resources("egs_overlap_s{simpl}_{clusters}.csv"),
egs_capacity_factors=resources("egs_capacity_factors_s{simpl}_{clusters}.csv"),
threads: 1
resources:
mem_mb=2000,
log:
logs("build_egs_potentials_s{simpl}_{clusters}.log"),
conda:
"../envs/environment.yaml"
script:
"../scripts/build_egs_potentials.py"
rule prepare_sector_network:
params:
time_resolution=config_provider("clustering", "temporal", "resolution_sector"),
@ -1022,6 +1050,21 @@ rule prepare_sector_network:
if config_provider("sector", "solar_thermal")(w)
else []
),
egs_potentials=lambda w: (
resources("egs_potentials_s{simpl}_{clusters}.csv")
if config_provider("sector", "enhanced_geothermal", "enable")(w)
else []
),
egs_overlap=lambda w: (
resources("egs_overlap_s{simpl}_{clusters}.csv")
if config_provider("sector", "enhanced_geothermal", "enable")(w)
else []
),
egs_capacity_factors=lambda w: (
resources("egs_capacity_factors_s{simpl}_{clusters}.csv")
if config_provider("sector", "enhanced_geothermal", "enable")(w)
else []
),
output:
RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",

View File

@ -233,9 +233,9 @@ rule plot_summary:
eurostat="data/eurostat/Balances-April2023",
co2="data/bundle/eea/UNFCCC_v23.csv",
output:
costs=RESULTS + "graphs/costs.pdf",
energy=RESULTS + "graphs/energy.pdf",
balances=RESULTS + "graphs/balances-energy.pdf",
costs=RESULTS + "graphs/costs.svg",
energy=RESULTS + "graphs/energy.svg",
balances=RESULTS + "graphs/balances-energy.svg",
threads: 2
resources:
mem_mb=10000,

View File

@ -65,7 +65,7 @@ rule add_brownfield:
H2_retrofit_capacity_per_CH4=config_provider(
"sector", "H2_retrofit_capacity_per_CH4"
),
threshold_capacity=config_provider("existing_capacities", " threshold_capacity"),
threshold_capacity=config_provider("existing_capacities", "threshold_capacity"),
snapshots=config_provider("snapshots"),
drop_leap_day=config_provider("enable", "drop_leap_day"),
carriers=config_provider("electricity", "renewable_carriers"),

View File

@ -406,7 +406,7 @@ def mock_snakemake(
from snakemake.api import Workflow
from snakemake.common import SNAKEFILE_CHOICES
from snakemake.script import Snakemake
from snakemake.settings import (
from snakemake.settings.types import (
ConfigSettings,
DAGSettings,
ResourceSettings,

View File

@ -61,7 +61,7 @@ def add_existing_renewables(df_agg, costs):
Append existing renewables to the df_agg pd.DataFrame with the conventional
power plants.
"""
tech_map = {"solar": "PV", "onwind": "Onshore", "offwind": "Offshore"}
tech_map = {"solar": "PV", "onwind": "Onshore", "offwind-ac": "Offshore"}
countries = snakemake.config["countries"] # noqa: F841
irena = pm.data.IRENASTAT().powerplant.convert_country_to_alpha2()
@ -109,12 +109,13 @@ def add_existing_renewables(df_agg, costs):
name = f"{node}-{carrier}-{year}"
capacity = nodal_df.loc[node, year]
if capacity > 0.0:
cost_key = carrier.split("-")[0]
df_agg.at[name, "Fueltype"] = carrier
df_agg.at[name, "Capacity"] = capacity
df_agg.at[name, "DateIn"] = year
df_agg.at[name, "lifetime"] = costs.at[carrier, "lifetime"]
df_agg.at[name, "lifetime"] = costs.at[cost_key, "lifetime"]
df_agg.at[name, "DateOut"] = (
year + costs.at[carrier, "lifetime"] - 1
year + costs.at[cost_key, "lifetime"] - 1
)
df_agg.at[name, "cluster_bus"] = node
@ -200,19 +201,19 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
phased_out = df_agg[df_agg["DateOut"] < baseyear].index
df_agg.drop(phased_out, inplace=True)
older_assets = (df_agg.DateIn < min(grouping_years)).sum()
if older_assets:
newer_assets = (df_agg.DateIn > max(grouping_years)).sum()
if newer_assets:
logger.warning(
f"There are {older_assets} assets with build year "
f"before first power grouping year {min(grouping_years)}. "
f"There are {newer_assets} assets with build year "
f"after last power grouping year {max(grouping_years)}. "
"These assets are dropped and not considered."
"Consider to redefine the grouping years to keep them."
)
to_drop = df_agg[df_agg.DateIn < min(grouping_years)].index
to_drop = df_agg[df_agg.DateIn > max(grouping_years)].index
df_agg.drop(to_drop, inplace=True)
df_agg["grouping_year"] = np.take(
grouping_years[::-1], np.digitize(df_agg.DateIn, grouping_years[::-1])
grouping_years, np.digitize(df_agg.DateIn, grouping_years, right=True)
)
# calculate (adjusted) remaining lifetime before phase-out (+1 because assuming
@ -254,7 +255,8 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
name_suffix = f" {generator}{suffix}-{grouping_year}"
name_suffix_by = f" {generator}{suffix}-{baseyear}"
asset_i = capacity.index + name_suffix
if generator in ["solar", "onwind", "offwind"]:
if generator in ["solar", "onwind", "offwind-ac"]:
cost_key = generator.split("-")[0]
# to consider electricity grid connection costs or a split between
# solar utility and rooftop as well, rather take cost assumptions
# from existing network than from the cost database
@ -270,9 +272,9 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
# this is for the year 2020
if not already_build.empty:
n.generators.loc[already_build, "p_nom_min"] = capacity.loc[
already_build.str.replace(name_suffix, "")
].values
n.generators.loc[already_build, "p_nom"] = n.generators.loc[
already_build, "p_nom_min"
] = capacity.loc[already_build.str.replace(name_suffix, "")].values
new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")]
if "m" in snakemake.wildcards.clusters:
@ -299,10 +301,10 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
/ len(inv_ind), # split among regions in a country
marginal_cost=marginal_cost,
capital_cost=capital_cost,
efficiency=costs.at[generator, "efficiency"],
efficiency=costs.at[cost_key, "efficiency"],
p_max_pu=p_max_pu,
build_year=grouping_year,
lifetime=costs.at[generator, "lifetime"],
lifetime=costs.at[cost_key, "lifetime"],
)
else:
@ -318,10 +320,10 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
p_nom=new_capacity,
marginal_cost=marginal_cost,
capital_cost=capital_cost,
efficiency=costs.at[generator, "efficiency"],
efficiency=costs.at[cost_key, "efficiency"],
p_max_pu=p_max_pu.rename(columns=n.generators.bus),
build_year=grouping_year,
lifetime=costs.at[generator, "lifetime"],
lifetime=costs.at[cost_key, "lifetime"],
)
else:
@ -462,6 +464,11 @@ def add_heating_capacities_installed_before_baseyear(
else:
efficiency = costs.at[costs_name, "efficiency"]
too_large_grouping_years = [gy for gy in grouping_years if gy >= int(baseyear)]
if too_large_grouping_years:
logger.warning(
f"Grouping years >= baseyear are ignored. Dropping {too_large_grouping_years}."
)
valid_grouping_years = pd.Series(
[
int(grouping_year)
@ -471,12 +478,12 @@ def add_heating_capacities_installed_before_baseyear(
]
)
assert valid_grouping_years.is_monotonic_increasing
# get number of years of each interval
_years = (
valid_grouping_years.diff()
.shift(-1)
.fillna(baseyear - valid_grouping_years.iloc[-1])
)
_years = valid_grouping_years.diff()
# Fill NA from .diff() with value for the first interval
_years[0] = valid_grouping_years[0] - baseyear + default_lifetime
# Installation is assumed to be linear for the past
ratios = _years / _years.sum()
@ -594,13 +601,13 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"add_existing_baseyear",
configfiles="config/test/config.myopic.yaml",
configfiles="config/config.yaml",
simpl="",
clusters="37",
ll="v1.0",
clusters="20",
ll="v1.5",
opts="",
sector_opts="8760-T-H-B-I-A-dist1",
planning_horizons=2020,
sector_opts="none",
planning_horizons=2030,
)
configure_logging(snakemake)

View File

@ -4,6 +4,21 @@
# SPDX-License-Identifier: MIT
"""
Build historical annual ammonia production per country in ktonNH3/a.
Inputs
-------
- ``data/bundle-sector/myb1-2017-nitro.xls``
Outputs
-------
- ``resources/ammonia_production.csv``
Description
-------
This functions takes data from the `Minerals Yearbook <https://www.usgs.gov/centers/national-minerals-information-center/nitrogen-statistics-and-information>`_ (June 2024) published by the US Geological Survey (USGS) and the National Minerals Information Center and extracts the annual ammonia production per country in ktonN/a. The data is converted to ktonNH3/a.
"""
import country_converter as coco

View File

@ -6,11 +6,40 @@
Build coefficient of performance (COP) time series for air- or ground-sourced
heat pumps.
The COP is a function of the temperature difference between source and
sink.
The COP is approximated as a quatratic function of the temperature difference between source and
sink, based on Staffell et al. 2012.
The quadratic regression used is based on Staffell et al. (2012)
https://doi.org/10.1039/C2EE22653G.
This rule is executed in ``build_sector.smk``.
Relevant Settings
-----------------
.. code:: yaml
heat_pump_sink_T:
Inputs:
-------
- ``resources/<run_name>/temp_soil_total_elec_s<simpl>_<clusters>.nc``: Soil temperature (total) time series.
- ``resources/<run_name>/temp_soil_rural_elec_s<simpl>_<clusters>.nc``: Soil temperature (rural) time series.
- ``resources/<run_name>/temp_soil_urban_elec_s<simpl>_<clusters>.nc``: Soil temperature (urban) time series.
- ``resources/<run_name>/temp_air_total_elec_s<simpl>_<clusters>.nc``: Ambient air temperature (total) time series.
- ``resources/<run_name>/temp_air_rural_elec_s<simpl>_<clusters>.nc``: Ambient air temperature (rural) time series.
- ``resources/<run_name>/temp_air_urban_elec_s<simpl>_<clusters>.nc``: Ambient air temperature (urban) time series.
Outputs:
--------
- ``resources/cop_soil_total_elec_s<simpl>_<clusters>.nc``: COP (ground-sourced) time series (total).
- ``resources/cop_soil_rural_elec_s<simpl>_<clusters>.nc``: COP (ground-sourced) time series (rural).
- ``resources/cop_soil_urban_elec_s<simpl>_<clusters>.nc``: COP (ground-sourced) time series (urban).
- ``resources/cop_air_total_elec_s<simpl>_<clusters>.nc``: COP (air-sourced) time series (total).
- ``resources/cop_air_rural_elec_s<simpl>_<clusters>.nc``: COP (air-sourced) time series (rural).
- ``resources/cop_air_urban_elec_s<simpl>_<clusters>.nc``: COP (air-sourced) time series (urban).
References
----------
[1] Staffell et al., Energy & Environmental Science 11 (2012): A review of domestic heat pumps, https://doi.org/10.1039/C2EE22653G.
"""
import xarray as xr

View File

@ -3,7 +3,45 @@
#
# SPDX-License-Identifier: MIT
"""
Build heat demand time series using heating degree day (HDD) approximation.
This rule builds heat demand time series using heating degree day (HDD)
approximation.
Snapshots are resampled to daily time resolution and ``Atlite.convert.heat_demand`` is used to convert ambient temperature from the default weather cutout to heat demand time series for the respective cutout.
Heat demand is distributed by population to clustered onshore regions.
The rule is executed in ``build_sector.smk``.
.. seealso::
`Atlite.Cutout.heat_demand <https://atlite.readthedocs.io/en/master/ref_api.html#module-atlite.convert>`_
Relevant Settings
-----------------
.. code:: yaml
snapshots:
drop_leap_day:
Inputs
------
- ``resources/<run_name>/pop_layout_<scope>.nc``: Population layout (spatial population distribution).
- ``resources/<run_name>/regions_onshore_elec_s<simpl>_<clusters>.geojson``: Onshore region shapes.
- ``cutout``: Weather data cutout, as specified in config
Outputs
-------
- ``resources/daily_heat_demand_<scope>_elec_s<simpl>_<clusters>.nc``:
Relevant settings
-----------------
.. code:: yaml
atlite:
default_cutout``:
"""
import atlite

View File

@ -4,6 +4,29 @@
# SPDX-License-Identifier: MIT
"""
Build district heat shares at each node, depending on investment year.
Inputs:
-------
- `resources/<run_name>/pop_layout.csv`: Population layout for each node: Total, urban and rural population.
- `resources/<run_name>/district_heat_share.csv`: Historical district heat share at each country. Output of `scripts/build_energy_totals.py`.
Outputs:
--------
- `resources/<run_name>/district_heat_share.csv`: District heat share at each node, potential for each investment year.
Relevant settings:
------------------
.. code:: yaml
sector:
district_heating:
energy:
energy_totals_year:
Notes:
------
- The district heat share is calculated as the share of urban population at each node, multiplied by the share of district heating in the respective country.
- The `sector.district_heating.potential` setting defines the max. district heating share.
- The max. share of district heating is increased by a progress factor, depending on the investment year (See `sector.district_heating.progress` setting).
"""
import logging

View File

@ -0,0 +1,249 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2023 @LukasFranken, The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
This rule extracts potential and cost for electricity generation through
enhanced geothermal systems.
For this, we use data from "From hot rock to useful energy..." by Aghahosseini, Breyer (2020)
'https://doi.org/10.1016/j.apenergy.2020.115769'
Note that we input data used here is not the same as in the paper, but was passed on by the authors.
The data provides a lon-lat gridded map of Europe (1° x 1°), with each grid cell assigned
a heat potential (in GWh) and a cost (in EUR/MW).
This scripts overlays that map with the network's regions, and builds a csv with CAPEX, OPEX and p_nom_max
"""
import logging
logger = logging.getLogger(__name__)
import json
import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
from shapely.geometry import Polygon
def prepare_egs_data(egs_file):
"""
Processes the original .json file EGS data to a more human-readable format.
"""
with open(egs_file) as f:
jsondata = json.load(f)
def point_to_square(p, lon_extent=1.0, lat_extent=1.0):
try:
x, y = p.coords.xy[0][0], p.coords.xy[1][0]
except IndexError:
return p
return Polygon(
[
[x - lon_extent / 2, y - lat_extent / 2],
[x - lon_extent / 2, y + lat_extent / 2],
[x + lon_extent / 2, y + lat_extent / 2],
[x + lon_extent / 2, y - lat_extent / 2],
]
)
years = [2015, 2020, 2025, 2030, 2035, 2040, 2045, 2050]
lcoes = ["LCOE50", "LCOE100", "LCOE150"]
egs_data = dict()
for year in years:
df = pd.DataFrame(columns=["Lon", "Lat", "CAPEX", "HeatSust", "PowerSust"])
for lcoe in lcoes:
for country_data in jsondata[lcoe]:
try:
country_df = pd.DataFrame(
columns=df.columns,
index=range(len(country_data[0][years.index(year)]["Lon"])),
)
except TypeError:
country_df = pd.DataFrame(columns=df.columns, index=range(0))
for col in df.columns:
country_df[col] = country_data[0][years.index(year)][col]
if country_df.dropna().empty:
continue
elif df.empty:
df = country_df.dropna()
else:
df = pd.concat((df, country_df.dropna()), ignore_index=True)
gdf = gpd.GeoDataFrame(
df.drop(columns=["Lon", "Lat"]), geometry=gpd.points_from_xy(df.Lon, df.Lat)
).reset_index(drop=True)
gdf["geometry"] = gdf.geometry.apply(lambda geom: point_to_square(geom))
egs_data[year] = gdf
return egs_data
def prepare_capex(prepared_data):
"""
The source paper provides only data for year and regions where LCOE <
100Euro/MWh. However, this implementations starts with the costs for 2020
for all regions and then adjusts the costs according to the user's chosen
setting in the config file.
As such, for regions where cost data is available only from, say,
2035, we need to reverse-engineer the costs for 2020. This is done
in the following (unfortunately verbose) function.
"""
default_year = 2020
# obtains all available CAPEX data
capex_df = pd.DataFrame(columns=prepared_data.keys())
for year in capex_df.columns:
year_data = prepared_data[year].groupby("geometry").mean().reset_index()
for g in year_data.geometry:
if not g in year_data.geometry.tolist():
# weird but apparently necessary
continue
capex_df.loc[g, year] = year_data.loc[
year_data.geometry == g, "CAPEX"
].values[0]
capex_df = capex_df.loc[:, default_year:]
# fill up missing values assuming cost reduction factors similar to existing values
for sooner, later in zip(capex_df.columns[::-1][1:], capex_df.columns[::-1]):
missing_mask = capex_df[sooner].isna()
cr_factor = (
capex_df.loc[~missing_mask, later] / capex_df.loc[~missing_mask, sooner]
)
capex_df.loc[missing_mask, sooner] = (
capex_df.loc[missing_mask, later] / cr_factor.mean()
)
# harmonice capacity and CAPEX
p_nom_max = prepared_data[2050].groupby("geometry")["PowerSust"].mean()
p_nom_max = p_nom_max.loc[p_nom_max > 0]
capex_df = capex_df.loc[p_nom_max.index]
data = (
pd.concat((capex_df[default_year], p_nom_max), axis=1)
.reset_index()
.rename(columns={2020: "CAPEX"})
)
return gpd.GeoDataFrame(data, geometry=data.geometry)
def get_capacity_factors(network_regions_file, air_temperatures_file):
"""
Performance of EGS is higher for lower temperatures, due to more efficient
air cooling Data from Ricks et al.: The Role of Flexible Geothermal Power
in Decarbonized Elec Systems.
"""
# these values are taken from the paper's
# Supplementary Figure 20 from https://zenodo.org/records/7093330
# and relate deviations of the ambient temperature from the year-average
# ambient temperature to EGS capacity factors.
delta_T = [-15, -10, -5, 0, 5, 10, 15, 20]
cf = [1.17, 1.13, 1.07, 1, 0.925, 0.84, 0.75, 0.65]
x = np.linspace(-15, 20, 200)
y = np.interp(x, delta_T, cf)
upper_x = np.linspace(20, 25, 50)
m_upper = (y[-1] - y[-2]) / (x[-1] - x[-2])
upper_y = upper_x * m_upper - x[-1] * m_upper + y[-1]
lower_x = np.linspace(-20, -15, 50)
m_lower = (y[1] - y[0]) / (x[1] - x[0])
lower_y = lower_x * m_lower - x[0] * m_lower + y[0]
x = np.hstack((lower_x, x, upper_x))
y = np.hstack((lower_y, y, upper_y))
network_regions = gpd.read_file(network_regions_file).set_crs(epsg=4326)
index = network_regions["name"]
air_temp = xr.open_dataset(air_temperatures_file)
snapshots = pd.date_range(freq="h", **snakemake.params.snapshots)
capacity_factors = pd.DataFrame(index=snapshots)
# bespoke computation of capacity factors for each bus.
# Considering the respective temperatures, we compute
# the deviation from the average temperature and relate it
# to capacity factors based on the data from above.
for bus in index:
temp = air_temp.sel(name=bus).to_dataframe()["temperature"]
capacity_factors[bus] = np.interp((temp - temp.mean()).values, x, y)
return capacity_factors
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_egs_potentials",
simpl="",
clusters=37,
)
egs_config = snakemake.params["sector"]["enhanced_geothermal"]
costs_config = snakemake.params["costs"]
egs_data = prepare_egs_data(snakemake.input.egs_cost)
egs_data = prepare_capex(egs_data)
egs_regions = egs_data.geometry
network_regions = (
gpd.read_file(snakemake.input.regions)
.set_index("name", drop=True)
.set_crs(epsg=4326)
)
overlap_matrix = pd.DataFrame(
index=network_regions.index,
columns=egs_data.index,
)
for name, polygon in network_regions.geometry.items():
overlap_matrix.loc[name] = (
egs_regions.intersection(polygon).area
) / egs_regions.area
overlap_matrix.to_csv(snakemake.output["egs_overlap"])
# the share of heat that is replenished from the earth's core.
# we are not constraining ourselves to the sustainable share, but
# inversely apply it to our underlying data, which refers to the
# sustainable heat. Source: Relative magnitude of sustainable heat vs
# nonsustainable heat in the paper "From hot rock to useful energy..."
sustainability_factor = egs_config["sustainability_factor"]
egs_data["p_nom_max"] = egs_data["PowerSust"] / sustainability_factor
egs_data[["p_nom_max", "CAPEX"]].to_csv(snakemake.output["egs_potentials"])
capacity_factors = get_capacity_factors(
snakemake.input["regions"],
snakemake.input["air_temperature"],
)
capacity_factors.to_csv(snakemake.output["egs_capacity_factors"])

View File

@ -312,7 +312,7 @@ if __name__ == "__main__":
fn = snakemake.input.synthetic
synthetic_load = pd.read_csv(fn, index_col=0, parse_dates=True)
# "UA" does not appear in synthetic load data
countries = list(set(countries) - set(["UA"]))
countries = list(set(countries) - set(["UA", "MD"]))
synthetic_load = synthetic_load.loc[snapshots, countries]
load = load.combine_first(synthetic_load)

View File

@ -3,12 +3,45 @@
#
# SPDX-License-Identifier: MIT
"""
Build total energy demands per country using JRC IDEES, eurostat, and EEA data.
Build total energy demands and carbon emissions per country using JRC IDEES,
eurostat, and EEA data.
- Country-specific data is read in :func:`build_eurostat`, :func:`build_idees` and `build_swiss`.
- :func:`build_energy_totals` then combines energy data from Eurostat, Swiss, and IDEES data and :func:`rescale_idees_from_eurostat` rescales IDEES data to match Eurostat data.
- :func:`build_district_heat_share` calculates the share of district heating for each country from IDEES data.
- Historical CO2 emissions are calculated in :func:`build_eea_co2` and :func:`build_eurostat_co2` and combined in :func:`build_co2_totals`.
Relevant Settings
-----------------
.. code:: yaml
countries:
energy:
Inputs
------
- `resources/<run_name>/nuts3_shapes.gejson`: NUTS3 shapes.
- `data/bundle/eea_UNFCCC_v23.csv`: CO2 emissions data from EEA.
- `data/switzerland-new_format-all_years.csv`: Swiss energy data.
- `data/gr-e-11.03.02.01.01-cc.csv`: Swiss transport data
- `data/bundle/jrc-idees`: JRC IDEES data.
- `data/district_heat_share.csv`: District heating shares.
- `data/eurostat/Balances-April2023`: Eurostat energy balances.
- `data/eurostat/eurostat-household_energy_balances-february_2024.csv`: Eurostat household energy balances.
Outputs
-------
- `resources/<run_name>/energy_totals.csv`: Energy totals per country, sector and year.
- `resources/<run_name>/co2_totals.csv`: CO2 emissions per country, sector and year.
- `resources/<run_name>/transport_data.csv`: Transport data per country and year.
- `resources/<run_name>/district_heat_share.csv`: District heating share per by country and year.
"""
import logging
import multiprocessing as mp
from functools import partial
from typing import List
import country_converter as coco
import geopandas as gpd
@ -22,16 +55,54 @@ logger = logging.getLogger(__name__)
idx = pd.IndexSlice
def cartesian(s1, s2):
def cartesian(s1: pd.Series, s2: pd.Series) -> pd.DataFrame:
"""
Cartesian product of two pd.Series.
Compute the Cartesian product of two pandas Series.
Parameters
----------
s1: pd.Series
The first pandas Series
s2: pd.Series:
The second pandas Series.
Returns
----------
pd.DataFrame
A DataFrame representing the Cartesian product of s1 and s2.
Examples
--------
>>> s1 = pd.Series([1, 2, 3], index=["a", "b", "c"])
>>> s2 = pd.Series([4, 5, 6], index=["d", "e", "f"])
>>> cartesian(s1, s2)
d e f
a 4 5 6
b 8 10 12
c 12 15 18
"""
return pd.DataFrame(np.outer(s1, s2), index=s1.index, columns=s2.index)
def reverse(dictionary):
def reverse(dictionary: dict) -> dict:
"""
Reverses a keys and values of a dictionary.
Reverses the keys and values of a dictionary.
Parameters
----------
dictionary : dict
The dictionary to be reversed.
Returns
-------
dict
A new dictionary with the keys and values reversed.
Examples
--------
>>> d = {"a": 1, "b": 2, "c": 3}
>>> reverse(d)
{1: 'a', 2: 'b', 3: 'c'}
"""
return {v: k for k, v in dictionary.items()}
@ -68,7 +139,28 @@ to_ipcc = {
}
def eurostat_per_country(input_eurostat, country):
def eurostat_per_country(input_eurostat: str, country: str) -> pd.DataFrame:
"""
Read energy balance data for a specific country from Eurostat.
Parameters
----------
input_eurostat : str
Path to the directory containing Eurostat data files.
country : str
Country code for the specific country.
Returns
-------
pd.DataFrame
Concatenated energy balance data for the specified country.
Notes
-----
- The function reads `<input_eurostat>/<country>.-Energy-balance-sheets-April-2023-edition.xlsb`
- It removes the "Cover" sheet from the data and concatenates all the remaining sheets into a single DataFrame.
"""
filename = (
f"{input_eurostat}/{country}-Energy-balance-sheets-April-2023-edition.xlsb"
)
@ -83,10 +175,38 @@ def eurostat_per_country(input_eurostat, country):
return pd.concat(sheet)
def build_eurostat(input_eurostat, countries, nprocesses=1, disable_progressbar=False):
def build_eurostat(
input_eurostat: str,
countries: List[str],
nprocesses: int = 1,
disable_progressbar: bool = False,
) -> pd.DataFrame:
"""
Return multi-index for all countries' energy data in TWh/a.
Parameters:
-----------
input_eurostat : str
Path to the Eurostat database.
countries : List[str]
List of countries for which energy data is to be retrieved.
nprocesses : int, optional
Number of processes to use for parallel execution, by default 1.
disable_progressbar : bool, optional
Whether to disable the progress bar, by default False.
Returns:
--------
pd.DataFrame
Multi-index DataFrame containing energy data for all countries in TWh/a.
Notes:
------
- The function first renames the countries in the input list using the `idees_rename` mapping and removes "CH".
- It then reads country-wise data using :func:`eurostat_per_country` into a single DataFrame.
- The data is reordered, converted to TWh/a, and missing values are filled.
"""
countries = {idees_rename.get(country, country) for country in countries} - {"CH"}
func = partial(eurostat_per_country, input_eurostat)
@ -152,9 +272,20 @@ def build_eurostat(input_eurostat, countries, nprocesses=1, disable_progressbar=
return df
def build_swiss():
def build_swiss() -> pd.DataFrame:
"""
Return a pd.DataFrame of Swiss energy data in TWh/a.
Returns
--------
pd.DataFrame
Swiss energy data in TWh/a.
Notes
-----
- Reads Swiss energy data from `data/switzerland-new_format-all_years.csv`.
- Reshapes and renames data.
- Converts energy units from PJ/a to TWh/a.
"""
fn = snakemake.input.swiss
@ -174,7 +305,29 @@ def build_swiss():
return df
def idees_per_country(ct, base_dir):
def idees_per_country(ct: str, base_dir: str) -> pd.DataFrame:
"""
Calculate energy totals per country using JRC-IDEES data.
Parameters
----------
ct : str
The country code.
base_dir : str
The base directory where the JRC-IDEES data files are located.
Returns
-------
pd.DataFrame
A DataFrame containing the energy totals per country. Columns are energy uses.
Notes
-----
- Retrieves JRC-IDEES data for the specified country from `base_dir` for residential, tertiary, and transport sectors.
- Calculates energy totals for each sector, stores them in a dictionary and returns them as data frame.
- Assertions ensure indices of JRC-IDEES data are as expected.
"""
ct_idees = idees_rename.get(ct, ct)
fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx"
fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx"
@ -372,7 +525,27 @@ def idees_per_country(ct, base_dir):
return pd.DataFrame(ct_totals)
def build_idees(countries):
def build_idees(countries: List[str]) -> pd.DataFrame:
"""
Build energy totals from IDEES database for the given list of countries
using :func:`idees_per_country`.
Parameters
----------
countries : List[str]
List of country names for which energy totals need to be built.
Returns
-------
pd.DataFrame
Energy totals for the given countries.
Notes
-----
- Retrieves energy totals per country and year using :func:`idees_per_country`.
- Returns a DataFrame with columns: country, year, and energy totals for different categories.
"""
nprocesses = snakemake.threads
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
@ -403,7 +576,42 @@ def build_idees(countries):
return totals
def build_energy_totals(countries, eurostat, swiss, idees):
def build_energy_totals(
countries: List[str],
eurostat: pd.DataFrame,
swiss: pd.DataFrame,
idees: pd.DataFrame,
) -> pd.DataFrame:
"""
Combine energy totals for the specified countries from Eurostat, Swiss, and
IDEES data.
Parameters
----------
countries : List[str]
List of country codes for which energy totals are to be calculated.
eurostat : pd.DataFrame
Eurostat energy balances dataframe.
swiss : pd.DataFrame
Swiss energy data dataframe.
idees : pd.DataFrame
IDEES energy data dataframe.
Returns
-------
pd.DataFrame
Energy totals dataframe for the given countries.
Notes
-----
- Missing values are filled based on Eurostat energy balances and average values in EU28.
- The function also performs specific calculations for Norway and splits road, rail, and aviation traffic for non-IDEES data.
References
----------
- `Norway heating data <http://www.ssb.no/en/energi-og-industri/statistikker/husenergi/hvert-3-aar/2014-07-14>`_
"""
eurostat_fuels = {"electricity": "Electricity", "total": "Total all products"}
eurostat_countries = eurostat.index.levels[0]
eurostat_years = eurostat.index.levels[1]
@ -591,7 +799,30 @@ def build_energy_totals(countries, eurostat, swiss, idees):
return df
def build_district_heat_share(countries, idees):
def build_district_heat_share(countries: List[str], idees: pd.DataFrame) -> pd.Series:
"""
Calculate the share of district heating for each country.
Parameters
----------
countries : List[str]
List of country codes for which to calculate district heating share.
idees : pd.DataFrame
IDEES energy data dataframe.
Returns
-------
pd.Series
Series with the district heating share for each country.
Notes
-----
- The function calculates the district heating share as the sum of residential and services derived heat, divided by the sum of residential and services thermal uses.
- The district heating share is then reindexed to match the provided list of countries.
- Missing district heating shares are filled from `data/district_heat_share.csv`.
- The function makes a conservative assumption and takes the minimum district heating share from both the IDEES data and `data/district_heat_share.csv`.
"""
# district heating share
district_heat = idees[["derived heat residential", "derived heat services"]].sum(
axis=1
@ -625,9 +856,37 @@ def build_district_heat_share(countries, idees):
return district_heat_share
def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
# downloaded 201228 (modified by EEA last on 201221)
def build_eea_co2(
input_co2: str, year: int = 1990, emissions_scope: str = "CO2"
) -> pd.DataFrame:
"""
Calculate CO2 emissions for a given year based on EEA data in Mt.
Parameters
----------
input_co2 : str
Path to the input CSV file with CO2 data.
year : int, optional
Year for which to calculate emissions, by default 1990.
emissions_scope : str, optional
Scope of the emissions to consider, by default "CO2".
Returns
-------
pd.DataFrame
DataFrame with CO2 emissions for the given year.
Notes
-----
- The function reads the `input_co2` data and for a specific `year` and `emission scope`
- It calculates "industrial non-elec" and "agriculture" emissions from that data
- It drops unneeded columns and converts the emissions to Mt.
References
---------
- `EEA CO2 data <https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16>`_ (downloaded 201228, modified by EEA last on 201221)
"""
df = pd.read_csv(input_co2, encoding="latin-1", low_memory=False)
df.replace(dict(Year="1985-1987"), 1986, inplace=True)
@ -673,11 +932,43 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
]
emissions.drop(columns=to_drop, inplace=True)
# convert from Gg to Mt
# convert from Gt to Mt
return emissions / 1e3
def build_eurostat_co2(eurostat, year=1990):
def build_eurostat_co2(eurostat: pd.DataFrame, year: int = 1990) -> pd.Series:
"""
Calculate CO2 emissions for a given year based on Eurostat fuel consumption
data and fuel-specific emissions.
Parameters
----------
eurostat : pd.DataFrame
DataFrame with Eurostat data.
year : int, optional
Year for which to calculate emissions, by default 1990.
Returns
-------
pd.Series
Series with CO2 emissions for the given year.
Notes
-----
- The function hard-sets fuel-specific emissions:
- solid fuels: 0.36 tCO2_equi/MW_th (approximates coal)
- oil: 0.285 tCO2_equi/MW_th (average of distillate and residue)
- natural gas: 0.2 tCO2_equi/MW_th
- It then multiplies the Eurostat fuel consumption data for `year` by the specific emissions and sums the result.
References
----------
- Oil values from `EIA <https://www.eia.gov/tools/faqs/faq.cfm?id=74&t=11>`_
- Distillate oil (No. 2) 0.276
- Residual oil (No. 6) 0.298
- `EIA Electricity Annual <https://www.eia.gov/electricity/annual/html/epa_a_03.html>`_
"""
eurostat_year = eurostat.xs(year, level="year")
specific_emissions = pd.Series(index=eurostat.columns, dtype=float)
@ -687,15 +978,34 @@ def build_eurostat_co2(eurostat, year=1990):
specific_emissions["Oil (total)"] = 0.285 # Average of distillate and residue
specific_emissions["Gas"] = 0.2 # For natural gas
# oil values from https://www.eia.gov/tools/faqs/faq.cfm?id=74&t=11
# Distillate oil (No. 2) 0.276
# Residual oil (No. 6) 0.298
# https://www.eia.gov/electricity/annual/html/epa_a_03.html
return eurostat_year.multiply(specific_emissions).sum(axis=1)
def build_co2_totals(countries, eea_co2, eurostat_co2):
def build_co2_totals(
countries: List[str], eea_co2: pd.DataFrame, eurostat_co2: pd.DataFrame
) -> pd.DataFrame:
"""
Combine CO2 emissions data from EEA and Eurostat for a list of countries.
Parameters
----------
countries : List[str]
List of country codes for which CO2 totals need to be built.
eea_co2 : pd.DataFrame
DataFrame with EEA CO2 emissions data.
eurostat_co2 : pd.DataFrame
DataFrame with Eurostat CO2 emissions data.
Returns
-------
pd.DataFrame
Combined CO2 emissions data for the given countries.
Notes
-----
- The function combines the CO2 emissions from EEA and Eurostat into a single DataFrame for the given countries.
"""
co2 = eea_co2.reindex(countries)
for ct in pd.Index(countries).intersection(["BA", "RS", "AL", "ME", "MK"]):
@ -722,9 +1032,38 @@ def build_co2_totals(countries, eea_co2, eurostat_co2):
return co2
def build_transport_data(countries, population, idees):
# first collect number of cars
def build_transport_data(
countries: List[str], population: pd.DataFrame, idees: pd.DataFrame
) -> pd.DataFrame:
"""
Build transport data for a set of countries based on IDEES data.
Parameters
----------
countries : List[str]
List of country codes.
population : pd.DataFrame
DataFrame with population data.
idees : pd.DataFrame
DataFrame with IDEES data.
Returns
-------
pd.DataFrame
DataFrame with transport data.
Notes
-----
- The function first collects the number of passenger cars.
- For Switzerland, it reads the data from `data/gr-e-11.03.02.01.01-cc.csv`.
- It fills missing data on the number of cars and fuel efficiency with average data.
References
----------
- Swiss transport data: `BFS <https://www.bfs.admin.ch/bfs/en/home/statistics/mobility-transport/transport-infrastructure-vehicles/vehicles/road-vehicles-stock-level-motorisation.html>`_
"""
# first collect number of cars
transport_data = pd.DataFrame(idees["passenger cars"])
countries_without_ch = set(countries) - {"CH"}
@ -735,7 +1074,6 @@ def build_transport_data(countries, population, idees):
transport_data = transport_data.reindex(index=new_index)
# https://www.bfs.admin.ch/bfs/en/home/statistics/mobility-transport/transport-infrastructure-vehicles/vehicles/road-vehicles-stock-level-motorisation.html
if "CH" in countries:
fn = snakemake.input.swiss_transport
swiss_cars = pd.read_csv(fn, index_col=0).loc[2000:2015, ["passenger cars"]]
@ -782,16 +1120,38 @@ def build_transport_data(countries, population, idees):
def rescale_idees_from_eurostat(
idees_countries,
energy,
eurostat,
):
idees_countries: List[str], energy: pd.DataFrame, eurostat: pd.DataFrame
) -> pd.DataFrame:
"""
Takes JRC IDEES data from 2015 and rescales it by the ratio of the eurostat
data and the 2015 eurostat data.
Takes JRC IDEES data from 2015 and rescales it by the ratio of the Eurostat
data and the 2015 Eurostat data.
Missing data: ['passenger car efficiency', 'passenger cars']
missing data: ['passenger car efficiency', 'passenger cars']
Parameters
----------
idees_countries : List[str]
List of IDEES country codes.
energy : pd.DataFrame
DataFrame with JRC IDEES data.
eurostat : pd.DataFrame
DataFrame with Eurostat data.
Returns
-------
pd.DataFrame
DataFrame with rescaled IDEES data.
Notes
-----
- The function first reads in the Eurostat data for 2015 and calculates the ratio of that data with other Eurostat data.
- This ratio is mapped to the IDEES data.
References
----------
- JRC IDEES data: `JRC IDEES <https://ec.europa.eu/jrc/en/publication/eur-scientific-and-technical-research-reports/jrc-idees>`_
- Eurostat data: `Eurostat <https://ec.europa.eu/eurostat/data/database>`_
"""
main_cols = ["Total all products", "Electricity"]
# read in the eurostat data for 2015
eurostat_2015 = eurostat.xs(2015, level="year")[main_cols]
@ -959,10 +1319,25 @@ def rescale_idees_from_eurostat(
return energy
def update_residential_from_eurostat(energy):
def update_residential_from_eurostat(energy: pd.DataFrame) -> pd.DataFrame:
"""
Updates energy balances for residential from disaggregated data from
Eurostat.
Eurostat by mutating input data DataFrame.
Parameters
----------
energy : pd.DataFrame
DataFrame with energy data.
Returns
-------
pd.DataFrame
DataFrame with updated energy balances.
Notes
-----
- The function first reads in the Eurostat data for households and maps the energy types to the corresponding Eurostat codes.
- For each energy type, it selects the corresponding data, converts units, and drops unnecessary data.
"""
eurostat_households = pd.read_csv(snakemake.input.eurostat_households)

View File

@ -5,6 +5,38 @@
"""
Builds table of existing heat generation capacities for initial planning
horizon.
Existing heat generation capacities are distributed to nodes based on population.
Within the nodes, the capacities are distributed to sectors (residential and services) based on sectoral consumption and urban/rural based population distribution.
Inputs:
-------
- Existing heating generators: `data/existing_heating_raw.csv` per country
- Population layout: `resources/{run_name}/pop_layout_s<simpl>_<clusters>.csv`. Output of `scripts/build_clustered_population_layout.py`
- Population layout with energy demands: `resources/<run_name>/pop_weighted_energy_totals_s<simpl>_<clusters>.csv`
- District heating share: `resources/<run_name>/district_heat_share_elec_s<simpl>_<clusters>_<planning_horizons>.csv`
Outputs:
--------
- Existing heat generation capacities distributed to nodes: `resources/{run_name}/existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv`
Relevant settings:
------------------
.. code:: yaml
scenario:
planning_horizons
sector:
existing_capacities:
Notes:
------
- Data for Albania, Montenegro and Macedonia is not included in input database and assumed 0.
- Coal and oil boilers are assimilated to oil boilers.
- All ground-source heat pumps are assumed in rural areas and all air-source heat pumps are assumed to be in urban areas.
References:
-----------
- "Mapping and analyses of the current and future (2020 - 2030) heating/cooling fuel deployment (fossil/renewables)" (https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en)
"""
import country_converter as coco
import numpy as np

View File

@ -4,6 +4,17 @@
# SPDX-License-Identifier: MIT
"""
Approximate heat demand for all weather years.
:func:`approximate_heat_demand` approximates annual heat demand based on energy totals and heating degree days (HDD) using a regression of heat demand on HDDs.
Inputs
------
- `resources/<run_name>/energy_totals.csv`: Energy consumption by sector (columns), country and year. Output of :func:`scripts.build_energy_totals.py`.
- `data/era5-annual-HDD-per-country.csv`: Number of heating degree days by year (columns) and country (index).
Outputs
-------
- `resources/<run_name>/heat_totals.csv`: Approximated annual heat demand for each country.
"""
from itertools import product
@ -14,7 +25,30 @@ from numpy.polynomial import Polynomial
idx = pd.IndexSlice
def approximate_heat_demand(energy_totals, hdd):
def approximate_heat_demand(energy_totals: pd.DataFrame, hdd: pd.DataFrame):
"""
Approximate heat demand for a set of countries based on energy totals and
heating degree days (HDD). A polynomial regression of heat demand on HDDs
is performed on the data from 2007 to 2021. Then, for 2022 and 2023, the
heat demand is estimated from known HDDs based on the regression.
Parameters
----------
energy_totals : pd.DataFrame
DataFrame with energy consumption by sector (columns), country and year. Output of :func:`scripts.build_energy_totals.py`.
hdd : pd.DataFrame
DataFrame with number of heating degree days by year (columns) and country (index).
Returns
-------
pd.DataFrame
DataFrame with approximated heat demand for each country.
Notes
-----
- Missing data is filled forward for GB in 2020 and backward for CH from 2007 to 2009.
- If only one year of heating data is available for a country, a point (0, 0) is added to make the polynomial fit work.
"""
countries = hdd.columns.intersection(energy_totals.index.levels[0])

View File

@ -3,7 +3,31 @@
#
# SPDX-License-Identifier: MIT
"""
Build hourly heat demand time series from daily ones.
Build hourly heat demand time series from daily heat demand.
Water and space heating demand profiles are generated using intraday profiles from BDEW. Different profiles are used for the residential and services sectors as well as weekdays and weekend.
The daily heat demand is multiplied by the intraday profile to obtain the hourly heat demand time series. The rule is executed in ``build_sector.smk``.
Relevant Settings
-----------------
.. code:: yaml
snapshots:
drop_leap_day:
Inputs
------
- ``data/heat_load_profile_BDEW.csv``: Intraday heat profile for water and space heating demand for the residential and services sectors for weekends and weekdays.
- ``resources/daily_heat_demand_<scope>_elec_s<simpl>_<clusters>.nc``: Daily heat demand per cluster.
Outputs
-------
- ``resources/hourly_heat_demand_<scope>_elec_s<simpl>_<clusters>.nc``:
"""
from itertools import product

View File

@ -4,6 +4,36 @@
# SPDX-License-Identifier: MIT
"""
Build spatial distribution of industries from Hotmaps database.
Inputs
-------
- ``resources/regions_onshore_elec_s{simpl}_{clusters}.geojson``
- ``resources/pop_layout_elec_s{simpl}_{clusters}.csv``
Outputs
-------
- ``resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv``
Description
-------
This rule uses the `Hotmaps database <https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database>`. After removing entries without valid locations, it assigns each industrial site to a bus region based on its location.
Then, it calculates the nodal distribution key for each sector based on the emissions of the industrial sites in each region. This leads to a distribution key of 1 if there is only one bus per country and <1 if there are multiple buses per country. The sum over buses of one country is 1.
The following subcategories of industry are considered:
- Iron and steel
- Cement
- Refineries
- Paper and printing
- Chemical industry
- Glass
- Non-ferrous metals
- Non-metallic mineral products
- Other non-classified
Furthermore, the population distribution is added
- Population
"""
import logging

View File

@ -4,6 +4,60 @@
# SPDX-License-Identifier: MIT
"""
Build industrial energy demand per country.
Inputs
-------
- ``data/bundle/jrc-idees-2015``
- ``industrial_production_per_country.csv``
Outputs
-------
- ``resources/industrial_energy_demand_per_country_today.csv``
Description
-------
This rule uses the industrial_production_per_country.csv file and the JRC-IDEES data to derive an energy demand per country and sector. If the country is not in the EU28, an average energy demand depending on the production volume is derived.
For each country and each subcategory of
- Alumina production
- Aluminium - primary production
- Aluminium - secondary production
- Ammonia
- Cement
- Ceramics & other NMM
- Chlorine
- Electric arc
- Food, beverages and tobacco
- Glass production
- HVC
- Integrated steelworks
- Machinery Equipment
- Methanol
- Other Industrial Sectors
- Other chemicals
- Other non-ferrous metals
- Paper production
- Pharmaceutical products etc.
- Printing and media reproduction
- Pulp production
- Textiles and leather
- Transport Equipment
- Wood and wood products
the output file contains the energy demand in TWh/a for the following carriers
- biomass
- electricity
- gas
- heat
- hydrogen
- liquid
- other
- solid
- waste
"""
import multiprocessing as mp

View File

@ -4,6 +4,36 @@
# SPDX-License-Identifier: MIT
"""
Build industrial energy demand per model region.
Inputs
------
- ``resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv``
- ``resources/industry_sector_ratios_{planning_horizons}.csv``
- ``resources/industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv``
Outputs
-------
- ``resources/industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv``
Description
-------
This rule aggregates the energy demand of the industrial sectors per model region.
For each bus, the following carriers are considered:
- electricity
- coal
- coke
- solid biomass
- methane
- hydrogen
- low-temperature heat
- naphtha
- ammonia
- process emission
- process emission from feedstock
which can later be used as values for the industry load.
"""
import pandas as pd

View File

@ -4,6 +4,25 @@
# SPDX-License-Identifier: MIT
"""
Build industrial energy demand per model region.
Inputs
-------
- ``resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv``
- ``resources/industrial_energy_demand_per_country_today.csv``
Outputs
-------
- ``resources/industrial_energy_demand_per_node_today_elec_s{simpl}_{clusters}.csv``
Description
-------
This rule maps the industrial energy demand per country `industrial_energy_demand_per_country_today.csv` to each bus region.
The energy demand per country is multiplied by the mapping value from the file ``industrial_distribution_key_elec_s{simpl}_{clusters}.csv`` between 0 and 1 to get the industrial energy demand per bus.
The unit of the energy demand is TWh/a.
"""
from itertools import product

View File

@ -3,7 +3,62 @@
#
# SPDX-License-Identifier: MIT
"""
Build industrial production per country.
This rule builds the historical industrial production per country.
Relevant Settings
-----------------
.. code:: yaml
countries:
..
Inputs
-------
- ``resources/ammonia_production.csv``
- ``data/bundle-sector/jrc-idees-2015``
- ``data/eurostat``
Outputs
-------
- ``resources/industrial_production_per_country.csv``
Description
-------
The industrial production is taken from the `JRC-IDEES <https://joint-research-centre.ec.europa.eu/potencia-policy-oriented-tool-energy-and-climate-change-impact-assessment/jrc-idees_en)>`.
This dataset provides detailed information about the consumption of energy for various processes.
If the country is not part of the EU28, the energy consumption in the industrial sectors is taken from the `Eurostat <https://ec.europa.eu/eurostat/de/data/database>` dataset. The industrial production is calculated for the year specified in the config["industry"]["reference_year"].
The ammonia production is provided by the rule `build_ammonia_production <https://pypsa-eur.readthedocs.io/en/latest/sector.html#module-build_ammonia_production>`. Since Switzerland is not part of the EU28 nor reported by eurostat, the energy consumption in the industrial sectors is taken from the `BFE <https://www.bfe.admin.ch/bfe/de/home/versorgung/statistik-und-geodaten/energiestatistiken/energieverbrauch-nach-verwendungszweck.html> dataset.
After the industrial production is calculated, the basic chemicals are separated into ammonia, chlorine, methanol and HVC. The production of these chemicals is assumed to be proportional to the production of basic chemicals without ammonia.
The following subcategories [kton/a] are considered:
- Electric arc
- Integrated steelworks
- Other chemicals
- Pharmaceutical products etc.
- Cement
- Ceramics & other NMM
- Glass production
- Pulp production
- Paper production
- Printing and media reproduction
- Food, beverages and tobacco
- Alumina production
- Aluminium - primary production
- Aluminium - secondary production
- Other non-ferrous metals
- Transport Equipment
- Machinery Equipment
- Textiles and leather
- Wood and wood products
- Other Industrial Sectors
- Ammonia
- HVC
- Chlorine
- Methanol
"""
import logging

View File

@ -4,6 +4,59 @@
# SPDX-License-Identifier: MIT
"""
Build future industrial production per country.
Relevant Settings
-----------------
.. code:: yaml
industry:
St_primary_fraction:
DRI_fraction:
Al_primary_fraction:
HVC_primary_fraction:
HVC_mechanical_recycling_fraction:
HVC_chemical_recycling_fraction:
.. seealso::
Documentation of the configuration file ``config/config.yaml`` at
:ref:`industry`
Inputs
-------
- ``resources/industrial_production_per_country.csv``
Outputs
-------
- ``resources/industrial_production_per_country_tomorrow_{planning_horizons}.csv``
Description
-------
This rule uses the ``industrial_production_per_country.csv`` file and the expected recycling rates to calculate the future production of the industrial sectors.
**St_primary_fraction**
The fraction of steel that is coming from primary production. This is more energy intensive than recycling steel (secondary production).
**DRI_fraction**
The fraction of primary steel that is produced in DRI plants.
**Al_primary_fraction**
The fraction of aluminium that is coming from primary production. This is more energy intensive than recycling aluminium (secondary production).
**HVC_primary_fraction**
The fraction of high value chemicals that are coming from primary production (crude oil or Fischer Tropsch).
**HVC_mechanical_recycling_fraction**
The fraction of high value chemicals that are coming from mechanical recycling.
**HVC_chemical_recycling_fraction**
The fraction of high value chemicals that are coming from chemical recycling.
If not already present, the information is added as new column in the output file.
The unit of the production is kt/a.
"""
import pandas as pd

View File

@ -4,6 +4,25 @@
# SPDX-License-Identifier: MIT
"""
Build industrial production per model region.
Inputs
-------
- ``resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv``
- ``resources/industrial_production_per_country_tomorrow_{planning_horizons}.csv``
Outputs
-------
- ``resources/industrial_production_per_node_elec_s{simpl}_{clusters}_{planning_horizons}.csv``
Description
-------
This rule maps the industrial production per country from a certain time horizon to each bus region.
The mapping file provides a value between 0 and 1 for each bus and industry subcategory, indicating the share of the country's production of that sector in that bus.
The industrial production per country is multiplied by the mapping value to get the industrial production per bus.
The unit of the production is kt/a.
"""
from itertools import product

View File

@ -3,7 +3,49 @@
#
# SPDX-License-Identifier: MIT
"""
Build specific energy consumption by carrier and industries.
Build best case specific energy consumption by carrier and category.
Relevant Settings
-----------------
.. code:: yaml
industry:
ammonia:
..
Inputs
-------
- ``resources/ammonia_production.csv``
- ``data/bundle-sector/jrc-idees-2015``
Outputs
-------
- ``resources/industry_sector_ratios.csv``
Description
-------
This script uses the `JRC-IDEES <https://joint-research-centre.ec.europa.eu/potencia-policy-oriented-tool-energy-and-climate-change-impact-assessment/jrc-idees_en>` data to calculate an EU28 average specific energy consumption by carrier and industries.
The industries are according to the rule `industrial_production_per_country <https://pypsa-eur.readthedocs.io/en/latest/sector.html#module-build_industrial_production_per_country>`.
The following carriers are considered:
- elec
- coal
- coke
- biomass
- methane
- hydrogen
- heat
- naphtha
- process emission
- process emission from feedstock
- (ammonia)
If the `config["industry"]["ammonia"] <https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry>` is set to true the ammonia demand is not converted to hydrogen and electricity but is considered as a separate carrier.
The unit of the specific energy consumption is MWh/t material and tCO2/t material for process emissions.
"""
import pandas as pd

View File

@ -6,6 +6,77 @@
Build specific energy consumption by carrier and industries and by country,
that interpolates between the current average energy consumption (from
2015-2020) and the ideal future best-in-class consumption.
Relevant Settings
-----------------
.. code:: yaml
industry:
sector_ratios_fraction_future:
ammonia:
Inputs
------
- ``resources/industry_sector_ratios.csv``
- ``resources/industrial_energy_demand_per_country_today.csv``
- ``resources/industrial_production_per_country.csv``
Outputs
-------
- ``resources/industry_sector_ratios_{planning_horizons}.csv``
Description
-------
The config["industry"]["sector_ratios_fraction_future"] parameter determines the progress towards the future best-in-class consumption.
For each bus, the following industry subcategories
- Electric arc
- DRI + Electric arc
- Integrated steelworks
- HVC
- HVC (mechanical recycling)
- HVC (chemical recycling)
- Ammonia
- Chlorine
- Methanol
- Other chemicals
- Pharmaceutical products etc.
- Cement
- Ceramics & other NMM
- Glass production
- Pulp production
- Paper production
- Printing and media reproduction
- Food, beverages and tobacco
- Alumina production
- Aluminium - primary production
- Aluminium - secondary production
- Other non-ferrous metals
- Transport Equipment
- Machinery Equipment
- Textiles and leather
- Wood and wood products
- Other Industrial Sectors
with the following carriers are considered:
- elec
- coal
- coke
- biomass
- methane
- hydrogen
- heat
- naphtha
- process emission
- process emission from feedstock
- (ammonia)
Unit of the output file is MWh/t.
"""
import pandas as pd

View File

@ -3,7 +3,36 @@
#
# SPDX-License-Identifier: MIT
"""
Build solar thermal collector time series.
Build solar thermal collector profile time series.
Uses ``atlite.Cutout.solar_thermal` to compute heat generation for clustered onshore regions from population layout and weather data cutout.
The rule is executed in ``build_sector.smk``.
.. seealso::
`Atlite.Cutout.solar_thermal <https://atlite.readthedocs.io/en/master/ref_api.html#module-atlite.convert>`_
Relevant Settings
-----------------
.. code:: yaml
snapshots:
drop_leap_day:
solar_thermal:
atlite:
default_cutout:
Inputs
------
- ``resources/<run_name/pop_layout_<scope>.nc``:
- ``resources/<run_name/regions_onshore_elec_s<simpl>_<clusters>.geojson``:
- ``cutout``: Weather data cutout, as specified in config
Outputs
-------
- ``resources/solar_thermal_<scope>_elec_s<simpl>_<clusters>.nc``:
"""
import atlite

View File

@ -4,6 +4,36 @@
# SPDX-License-Identifier: MIT
"""
Build time series for air and soil temperatures per clustered model region.
Uses ``atlite.Cutout.temperature`` and ``atlite.Cutout.soil_temperature compute temperature ambient air and soil temperature for the respective cutout. The rule is executed in ``build_sector.smk``.
.. seealso::
`Atlite.Cutout.temperature <https://atlite.readthedocs.io/en/master/ref_api.html#module-atlite.convert>`_
`Atlite.Cutout.soil_temperature <https://atlite.readthedocs.io/en/master/ref_api.html#module-atlite.convert>`_
Relevant Settings
-----------------
.. code:: yaml
snapshots:
drop_leap_day:
atlite:
default_cutout:
Inputs
------
- ``resources/<run_name>/pop_layout_<scope>.nc``:
- ``resources/<run_name>/regions_onshore_elec_s<simpl>_<clusters>.geojson``:
- ``cutout``: Weather data cutout, as specified in config
Outputs
-------
- ``resources/temp_soil_<scope>_elec_s<simpl>_<clusters>.nc``:
- ``resources/temp_air_<scope>_elec_s<simpl>_<clusters>.nc`
"""
import atlite

View File

@ -353,7 +353,7 @@ def plot_balances():
frameon=False,
)
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
fig.savefig(snakemake.output.balances[:-10] + k + ".svg", bbox_inches="tight")
def historical_emissions(countries):
@ -563,7 +563,7 @@ def plot_carbon_budget_distribution(input_eurostat, options):
)
plt.grid(axis="y")
path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf"
path = snakemake.output.balances.split("balances")[0] + "carbon_budget.svg"
plt.savefig(path, bbox_inches="tight")

220
scripts/prepare_sector_network.py Executable file → Normal file
View File

@ -196,6 +196,11 @@ def define_spatial(nodes, options):
spatial.lignite.nodes = ["EU lignite"]
spatial.lignite.locations = ["EU"]
# deep geothermal
spatial.geothermal_heat = SimpleNamespace()
spatial.geothermal_heat.nodes = ["EU enhanced geothermal systems"]
spatial.geothermal_heat.locations = ["EU"]
return spatial
@ -451,7 +456,7 @@ def update_wind_solar_costs(n, costs):
clustermaps = busmap_s.map(busmap)
# code adapted from pypsa-eur/scripts/add_electricity.py
for connection in ["dc", "ac"]:
for connection in ["dc", "ac", "float"]:
tech = "offwind-" + connection
if tech not in n.generators.carrier.values:
continue
@ -976,7 +981,7 @@ def insert_electricity_distribution_grid(n, costs):
.get("efficiency_static")
):
logger.info(
f"Deducting distribution losses from electricity demand: {100*(1-efficiency)}%"
f"Deducting distribution losses from electricity demand: {np.around(100*(1-efficiency), decimals=2)}%"
)
n.loads_t.p_set.loc[:, n.loads.carrier == "electricity"] *= efficiency
@ -2697,6 +2702,7 @@ def add_industry(n, costs):
e_nom_extendable=True,
e_cyclic=True,
carrier="methanol",
capital_cost=0.02,
)
n.madd(
@ -3726,6 +3732,210 @@ def lossy_bidirectional_links(n, carrier, efficiencies={}):
)
def add_enhanced_geothermal(n, egs_potentials, egs_overlap, costs):
"""
Adds EGS potential to model.
Built in scripts/build_egs_potentials.py
"""
if len(spatial.geothermal_heat.nodes) > 1:
logger.warning(
"'add_enhanced_geothermal' not implemented for multiple geothermal nodes."
)
logger.info(
"[EGS] implemented with 2020 CAPEX from Aghahosseini et al 2021: 'From hot rock to...'."
)
logger.info(
"[EGS] Recommended usage scales CAPEX to future cost expectations using config 'adjustments'."
)
logger.info("[EGS] During this the relevant carriers are:")
logger.info("[EGS] drilling part -> 'geothermal heat'")
logger.info(
"[EGS] electricity generation part -> 'geothermal organic rankine cycle'"
)
logger.info("[EGS] district heat distribution part -> 'geothermal district heat'")
egs_config = snakemake.params["sector"]["enhanced_geothermal"]
costs_config = snakemake.config["costs"]
# matrix defining the overlap between gridded geothermal potential estimation, and bus regions
overlap = pd.read_csv(egs_overlap, index_col=0)
overlap.columns = overlap.columns.astype(int)
egs_potentials = pd.read_csv(egs_potentials, index_col=0)
Nyears = n.snapshot_weightings.generators.sum() / 8760
dr = costs_config["fill_values"]["discount rate"]
lt = costs.at["geothermal", "lifetime"]
FOM = costs.at["geothermal", "FOM"]
egs_annuity = calculate_annuity(lt, dr)
# under egs optimism, the expected cost reductions also cover costs for ORC
# hence, the ORC costs are no longer taken from technology-data
orc_capex = costs.at["organic rankine cycle", "investment"]
# cost for ORC is subtracted, as it is already included in the geothermal cost.
# The orc cost are attributed to a separate link representing the ORC.
# also capital_cost conversion Euro/kW -> Euro/MW
egs_potentials["capital_cost"] = (
(egs_annuity + FOM / (1.0 + FOM))
* (egs_potentials["CAPEX"] * 1e3 - orc_capex)
* Nyears
)
assert (
egs_potentials["capital_cost"] > 0
).all(), "Error in EGS cost, negative values found."
orc_annuity = calculate_annuity(costs.at["organic rankine cycle", "lifetime"], dr)
orc_capital_cost = (orc_annuity + FOM / (1 + FOM)) * orc_capex * Nyears
efficiency_orc = costs.at["organic rankine cycle", "efficiency"]
efficiency_dh = costs.at["geothermal", "district heat-input"]
# p_nom_max conversion GW -> MW
egs_potentials["p_nom_max"] = egs_potentials["p_nom_max"] * 1000.0
# not using add_carrier_buses, as we are not interested in a Store
n.add("Carrier", "geothermal heat")
n.madd(
"Bus",
spatial.geothermal_heat.nodes,
carrier="geothermal heat",
unit="MWh_th",
)
n.madd(
"Generator",
spatial.geothermal_heat.nodes,
bus=spatial.geothermal_heat.nodes,
carrier="geothermal heat",
p_nom_extendable=True,
)
if egs_config["var_cf"]:
efficiency = pd.read_csv(
snakemake.input.egs_capacity_factors, parse_dates=True, index_col=0
)
logger.info("Adding Enhanced Geothermal with time-varying capacity factors.")
else:
efficiency = 1.0
# if urban central heat exists, adds geothermal as CHP
as_chp = "urban central heat" in n.loads.carrier.unique()
if as_chp:
logger.info("Adding EGS as Combined Heat and Power.")
else:
logger.info("Adding EGS for Electricity Only.")
for bus, bus_overlap in overlap.iterrows():
if not bus_overlap.sum():
continue
overlap = bus_overlap.loc[bus_overlap > 0.0]
bus_egs = egs_potentials.loc[overlap.index]
if not len(bus_egs):
continue
bus_egs["p_nom_max"] = bus_egs["p_nom_max"].multiply(bus_overlap)
bus_egs = bus_egs.loc[bus_egs.p_nom_max > 0.0]
appendix = " " + pd.Index(np.arange(len(bus_egs)).astype(str))
# add surface bus
n.madd(
"Bus",
pd.Index([f"{bus} geothermal heat surface"]),
location=bus,
unit="MWh_th",
carrier="geothermal heat",
)
bus_egs.index = np.arange(len(bus_egs)).astype(str)
well_name = f"{bus} enhanced geothermal" + appendix
if egs_config["var_cf"]:
bus_eta = pd.concat(
(efficiency[bus].rename(idx) for idx in well_name),
axis=1,
)
else:
bus_eta = efficiency
p_nom_max = bus_egs["p_nom_max"]
capital_cost = bus_egs["capital_cost"]
bus1 = pd.Series(f"{bus} geothermal heat surface", well_name)
# adding geothermal wells as multiple generators to represent supply curve
n.madd(
"Link",
well_name,
bus0=spatial.geothermal_heat.nodes,
bus1=bus1,
carrier="geothermal heat",
p_nom_extendable=True,
p_nom_max=p_nom_max.set_axis(well_name) / efficiency_orc,
capital_cost=capital_cost.set_axis(well_name) * efficiency_orc,
efficiency=bus_eta,
)
# adding Organic Rankine Cycle as a single link
n.add(
"Link",
bus + " geothermal organic rankine cycle",
bus0=f"{bus} geothermal heat surface",
bus1=bus,
p_nom_extendable=True,
carrier="geothermal organic rankine cycle",
capital_cost=orc_capital_cost * efficiency_orc,
efficiency=efficiency_orc,
)
if as_chp and bus + " urban central heat" in n.buses.index:
n.add(
"Link",
bus + " geothermal heat district heat",
bus0=f"{bus} geothermal heat surface",
bus1=bus + " urban central heat",
carrier="geothermal district heat",
capital_cost=orc_capital_cost
* efficiency_orc
* costs.at["geothermal", "district heat surcharge"]
/ 100.0,
efficiency=efficiency_dh,
p_nom_extendable=True,
)
elif as_chp and not bus + " urban central heat" in n.buses.index:
n.links.at[bus + " geothermal organic rankine cycle", "efficiency"] = (
efficiency_orc
)
if egs_config["flexible"]:
# this StorageUnit represents flexible operation using the geothermal reservoir.
# Hence, it is counter-intuitive to install it at the surface bus,
# this is however the more lean and computationally efficient solution.
max_hours = egs_config["max_hours"]
boost = egs_config["max_boost"]
n.add(
"StorageUnit",
bus + " geothermal reservoir",
bus=f"{bus} geothermal heat surface",
carrier="geothermal heat",
p_nom_extendable=True,
p_min_pu=-boost,
max_hours=max_hours,
cyclic_state_of_charge=True,
)
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
@ -3857,6 +4067,12 @@ if __name__ == "__main__":
if options["electricity_distribution_grid"]:
insert_electricity_distribution_grid(n, costs)
if options["enhanced_geothermal"].get("enable", False):
logger.info("Adding Enhanced Geothermal Systems (EGS).")
add_enhanced_geothermal(
n, snakemake.input["egs_potentials"], snakemake.input["egs_overlap"], costs
)
maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"])
if options["gas_distribution_grid"]:

View File

@ -132,8 +132,6 @@ def _add_land_use_constraint(n):
"offwind-dc",
"offwind-float",
]:
extendable_i = (n.generators.carrier == carrier) & n.generators.p_nom_extendable
n.generators.loc[extendable_i, "p_nom_min"] = 0
ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
existing = (
@ -174,8 +172,6 @@ def _add_land_use_constraint_m(n, planning_horizons, config):
"offwind-ac",
"offwind-dc",
]:
extendable_i = (n.generators.carrier == carrier) & n.generators.p_nom_extendable
n.generators.loc[extendable_i, "p_nom_min"] = 0
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
ind = list(
@ -948,6 +944,25 @@ def add_pipe_retrofit_constraint(n):
n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit")
def add_flexible_egs_constraint(n):
"""
Upper bounds the charging capacity of the geothermal reservoir according to
the well capacity.
"""
well_index = n.links.loc[n.links.carrier == "geothermal heat"].index
storage_index = n.storage_units.loc[
n.storage_units.carrier == "geothermal heat"
].index
p_nom_rhs = n.model["Link-p_nom"].loc[well_index]
p_nom_lhs = n.model["StorageUnit-p_nom"].loc[storage_index]
n.model.add_constraints(
p_nom_lhs <= p_nom_rhs,
name="upper_bound_charging_capacity_of_geothermal_reservoir",
)
def add_co2_atmosphere_constraint(n, snapshots):
glcs = n.global_constraints[n.global_constraints.type == "co2_atmosphere"]
@ -1013,6 +1028,9 @@ def extra_functionality(n, snapshots):
else:
add_co2_atmosphere_constraint(n, snapshots)
if config["sector"]["enhanced_geothermal"]["enable"]:
add_flexible_egs_constraint(n)
if snakemake.params.custom_extra_functionality:
source_path = snakemake.params.custom_extra_functionality
assert os.path.exists(source_path), f"{source_path} does not exist"