Merge remote-tracking branch 'upstream/master' into add_methanol_techs

This commit is contained in:
Philipp Glaum 2024-08-26 09:12:55 +02:00
commit 0b0de7ba77
31 changed files with 3690 additions and 143 deletions

View File

@ -54,6 +54,7 @@ include: "rules/build_sector.smk"
include: "rules/solve_electricity.smk"
include: "rules/postprocess.smk"
include: "rules/validate.smk"
include: "rules/development.smk"
if config["foresight"] == "overnight":

View File

@ -42,7 +42,7 @@ scenario:
ll:
- vopt
clusters:
- 37
- 38
- 128
- 256
opts:
@ -74,7 +74,6 @@ enable:
custom_busmap: false
drop_leap_day: true
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget
co2_budget:
2020: 0.701
@ -87,7 +86,8 @@ co2_budget:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#electricity
electricity:
voltages: [220., 300., 380., 500., 750.]
voltages: [200., 220., 300., 380., 500., 750.]
base_network: entsoegridkit
gaslimit_enable: false
gaslimit: false
co2limit_enable: false
@ -276,6 +276,7 @@ conventional:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#lines
lines:
types:
200.: "Al/St 240/40 2-bundle 200.0"
220.: "Al/St 240/40 2-bundle 220.0"
300.: "Al/St 240/40 3-bundle 300.0"
380.: "Al/St 240/40 4-bundle 380.0"
@ -448,8 +449,19 @@ sector:
2045: 0.8
2050: 1.0
district_heating_loss: 0.15
forward_temperature: 90 #C
return_temperature: 50 #C
# check these numbers!
forward_temperature:
default: 90
DK: 70
SE: 70
NO: 70
FI: 70
return_temperature:
default: 50
DK: 40
SE: 40
NO: 40
FI: 40
heat_source_cooling: 6 #K
heat_pump_cop_approximation:
refrigerant: ammonia
@ -600,7 +612,14 @@ sector:
min_size: 3
max_size: 25
years_of_storage: 25
co2_sequestration_potential: 200
co2_sequestration_potential:
2020: 0
2025: 0
2030: 50
2035: 100
2040: 200
2045: 200
2050: 200
co2_sequestration_cost: 10
co2_sequestration_lifetime: 50
co2_spatial: false
@ -666,6 +685,7 @@ sector:
biomass_to_liquid: false
electrobiofuels: false
biosng: false
bioH2: false
municipal_solid_waste: false
limit_max_growth:
enable: false
@ -1109,6 +1129,7 @@ plotting:
unsustainable bioliquids: '#32CD32'
electrobiofuels: 'red'
BioSNG: '#123456'
solid biomass to hydrogen: '#654321'
# power transmission
lines: '#6c9459'
transmission lines: '#6c9459'

View File

@ -1,5 +1,6 @@
,Unit,Values,Description
voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider
voltages,kV,"Any subset of {200., 220., 300., 380., 500., 750.}",Voltage levels to consider
base_network, --, "Any value in {'entsoegridkit', 'osm-prebuilt', 'osm-raw}", "Specify the underlying base network, i.e. GridKit (based on ENTSO-E web map extract, OpenStreetMap (OSM) prebuilt or raw (built from raw OSM data), takes longer."
gaslimit_enable,bool,true or false,Add an overall absolute gas limit configured in ``electricity: gaslimit``.
gaslimit,MWhth,float or false,Global gas usage limit
co2limit_enable,bool,true or false,Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit`` in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks..

Can't render this file because it has a wrong number of fields in line 5.

View File

@ -4,5 +4,5 @@ retrieve_databundle,bool,"{true, false}","Switch to retrieve databundle from zen
retrieve_cost_data,bool,"{true, false}","Switch to retrieve technology cost data from `technology-data repository <https://github.com/PyPSA/technology-data>`_."
build_cutout,bool,"{true, false}","Switch to enable the building of cutouts via the rule :mod:`build_cutout`."
retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`."
custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``."
custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/busmaps/elec_s{simpl}_{clusters}_{base_network}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``. {base_network} is the name of the selected base_network in electricity, e.g. ``gridkit``, ``osm-prebuilt``, or ``osm-raw``."
drop_leap_day,bool,"{true, false}","Switch to drop February 29 from all time-dependent data in leap years"

1 Unit Values Description
4 retrieve_cost_data bool {true, false} Switch to retrieve technology cost data from `technology-data repository <https://github.com/PyPSA/technology-data>`_.
5 build_cutout bool {true, false} Switch to enable the building of cutouts via the rule :mod:`build_cutout`.
6 retrieve_cutout bool {true, false} Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`.
7 custom_busmap bool {true, false} Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``. Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/busmaps/elec_s{simpl}_{clusters}_{base_network}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``. {base_network} is the name of the selected base_network in electricity, e.g. ``gridkit``, ``osm-prebuilt``, or ``osm-raw``.
8 drop_leap_day bool {true, false} Switch to drop February 29 from all time-dependent data in leap years

View File

@ -9,8 +9,8 @@ district_heating,--,,`prepare_sector_network.py <https://github.com/PyPSA/pypsa-
-- potential,--,float,maximum fraction of urban demand which can be supplied by district heating
-- progress,--,Dictionary with planning horizons as keys., Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating
-- district_heating_loss,--,float,Share increase in district heat demand in urban central due to heat losses
-- forward_temperature,°C,float,Forward temperature in district heating
-- return_temperature,°C,float,Return temperature in district heating. Must be lower than forward temperature
-- forward_temperature,°C,Dictionary with country codes as keys. One key must be 'default'.,Forward temperature in district heating
-- return_temperature,°C,Dictionary with country codes as keys. One key must be 'default'.,Return temperature in district heating. Must be lower than forward temperature
-- heat_source_cooling,K,float,Cooling of heat source for heat pumps
-- heat_pump_cop_approximation,,,
-- -- refrigerant,--,"{ammonia, isobutane}",Heat pump refrigerant assumed for COP approximation
@ -107,7 +107,7 @@ regional_co2 _sequestration_potential,,,
-- min_size,Gt ,float,Any sites with lower potential than this value will be excluded
-- max_size,Gt ,float,The maximum sequestration potential for any one site.
-- years_of_storage,years,float,The years until potential exhausted at optimised annual rate
co2_sequestration_potential,MtCO2/a,float,The potential of sequestering CO2 in Europe per year
co2_sequestration_potential,--,Dictionary with planning horizons as keys.,The potential of sequestering CO2 in Europe per year and investment period
co2_sequestration_cost,currency/tCO2,float,The cost of sequestering a ton of CO2
co2_sequestration_lifetime,years,int,The lifetime of a CO2 sequestration site
co2_spatial,--,"{true, false}","Add option to spatially resolve carrier representing stored carbon dioxide. This allows for more detailed modelling of CCUTS, e.g. regarding the capturing of industrial process emissions, usage as feedstock for electrofuels, transport of carbon dioxide, and geological sequestration sites."
@ -162,6 +162,7 @@ biogas_upgrading_cc,--,"{true, false}",Add option to capture CO2 from biomass up
conventional_generation,,,Add a more detailed description of conventional carriers. Any power generation requires the consumption of fuel from nodes representing that fuel.
biomass_to_liquid,--,"{true, false}",Add option for transforming solid biomass into liquid fuel with the same properties as oil
biosng,--,"{true, false}",Add option for transforming solid biomass into synthesis gas with the same properties as natural gas
bioH2,--,"{true, false}",Add option for transforming solid biomass into hydrogen with carbon capture
municipal_solid_waste,--,"{true, false}",Add option for municipal solid waste
limit_max_growth,,,
-- enable,--,"{true, false}",Add option to limit the maximum growth of a carrier

1 Unit Values Description
9 -- potential -- float maximum fraction of urban demand which can be supplied by district heating
10 -- progress -- Dictionary with planning horizons as keys. Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating
11 -- district_heating_loss -- float Share increase in district heat demand in urban central due to heat losses
12 -- forward_temperature °C float Dictionary with country codes as keys. One key must be 'default'. Forward temperature in district heating
13 -- return_temperature °C float Dictionary with country codes as keys. One key must be 'default'. Return temperature in district heating. Must be lower than forward temperature
14 -- heat_source_cooling K float Cooling of heat source for heat pumps
15 -- heat_pump_cop_approximation
16 -- -- refrigerant -- {ammonia, isobutane} Heat pump refrigerant assumed for COP approximation
107 -- min_size Gt float Any sites with lower potential than this value will be excluded
108 -- max_size Gt float The maximum sequestration potential for any one site.
109 -- years_of_storage years float The years until potential exhausted at optimised annual rate
110 co2_sequestration_potential MtCO2/a -- float Dictionary with planning horizons as keys. The potential of sequestering CO2 in Europe per year The potential of sequestering CO2 in Europe per year and investment period
111 co2_sequestration_cost currency/tCO2 float The cost of sequestering a ton of CO2
112 co2_sequestration_lifetime years int The lifetime of a CO2 sequestration site
113 co2_spatial -- {true, false} Add option to spatially resolve carrier representing stored carbon dioxide. This allows for more detailed modelling of CCUTS, e.g. regarding the capturing of industrial process emissions, usage as feedstock for electrofuels, transport of carbon dioxide, and geological sequestration sites.
162 conventional_generation Add a more detailed description of conventional carriers. Any power generation requires the consumption of fuel from nodes representing that fuel.
163 biomass_to_liquid -- {true, false} Add option for transforming solid biomass into liquid fuel with the same properties as oil
164 biosng -- {true, false} Add option for transforming solid biomass into synthesis gas with the same properties as natural gas
165 bioH2 -- {true, false} Add option for transforming solid biomass into hydrogen with carbon capture
166 municipal_solid_waste -- {true, false} Add option for municipal solid waste
167 limit_max_growth
168 -- enable -- {true, false} Add option to limit the maximum growth of a carrier

View File

@ -11,12 +11,20 @@ Upcoming Release
================
* Add technology options for methanol, like electricity production from methanol, biomass to methanol, methanol to kerosene, ...
* Add investment period dependent CO2 sequestration potentials
* Add option to produce hydrogen from solid biomass (flag ``solid biomass to hydrogen``), combined with carbon capture
* Fixed PDF encoding in ``build_biomass_transport_costs`` with update of tabula-py and jpype1
* More modular and flexible handling of transmission projects. One can now add new transmission projects in a subfolder of `data/transmission projects` similar to the files in the template folder. After adding the new files and updating the config section `transmission_projects:`, transmission projects will be included if they are not duplicates of existing lines or other projects.
* Add option to apply a gaussian kernel density smoothing to wind turbine power curves.
* Update JRC-IDEES-2015 to `JRC-IDEES-2021 <https://publications.jrc.ec.europa.eu/repository/handle/JRC137809>`__. The reference year is changed from 2015 to 2019.
* Added option to use country-specific district heating forward and return temperatures. Defaults to lower temperatures in Scandinavia.
* Added unsustainable biomass potentials for solid, gaseous, and liquid biomass. The potentials can be phased-out and/or
substituted by the phase-in of sustainable biomass types using the config parameters
``biomass: share_unsustainable_use_retained`` and ``biomass: share_sustainable_potential_available``.
@ -68,6 +76,8 @@ Upcoming Release
* Enable parallelism in :mod:`determine_availability_matrix_MD_UA.py` and remove plots. This requires the use of temporary files.
* Added new major feature to create the base_network from OpenStreetMap (OSM) data (PR https://github.com/PyPSA/pypsa-eur/pull/1079). Note that a heuristics based cleaning process is used for lines and links where electrical parameters are incomplete, missing, or ambiguous. Through ``electricity["base_network"]``, the base network can be set to "entsoegridkit" (original default setting, deprecated soon), "osm-prebuilt" (which downloads the latest prebuilt snapshot based on OSM data from Zenodo), or "osm-raw" which retrieves (once) and cleans the raw OSM data and subsequently builds the network. Note that this process may take a few minutes.
* Updated pre-built `weather data cutouts
<https://zenodo.org/records/12791128>`__. These are now merged cutouts with
solar irradiation from the new SARAH-3 dataset while taking all other
@ -83,6 +93,8 @@ Upcoming Release
* In :mod:`base_network`, replace own voronoi polygon calculation function with
Geopandas `gdf.voronoi_polygons` method.
* Move custom busmaps to ```data/busmaps/elec_s{simpl}_{clusters}_{base_network}.csv``` (if enabled). This allows for different busmaps depending on the base network and scenario.
PyPSA-Eur 0.11.0 (25th May 2024)
=====================================

View File

@ -391,7 +391,7 @@ dependencies:
- stack_data=0.6.2
- statsmodels=0.14.2
- stopit=1.1.2
- tabula-py=2.7.0
- jpype1=1.5.0
- tabulate=0.9.0
- tbb=2021.11.0
- tblib=3.0.0
@ -466,3 +466,4 @@ dependencies:
- snakemake-executor-plugin-slurm-jobstep==0.2.1
- snakemake-storage-plugin-http==0.2.3
- tsam==2.3.1
- tabula-py=2.9.3

View File

@ -43,10 +43,11 @@ dependencies:
- geopy
- tqdm
- pytz
- tabula-py
- jpype1
- pyxlsb
- graphviz
- pre-commit
- geojson
# Keep in conda environment when calling ipython
- ipython
@ -63,3 +64,4 @@ dependencies:
- snakemake-executor-plugin-slurm
- snakemake-executor-plugin-cluster-generic
- highspy
- tabula-py

View File

@ -50,6 +50,19 @@ rule build_powerplants:
"../scripts/build_powerplants.py"
def input_base_network(w):
base_network = config_provider("electricity", "base_network")(w)
components = {"buses", "lines", "links", "converters", "transformers"}
if base_network == "osm-raw":
inputs = {c: resources(f"osm-raw/build/{c}.csv") for c in components}
else:
inputs = {c: f"data/{base_network}/{c}.csv" for c in components}
if base_network == "entsoegridkit":
inputs["parameter_corrections"] = "data/parameter_corrections.yaml"
inputs["links_p_nom"] = "data/links_p_nom.csv"
return inputs
rule base_network:
params:
countries=config_provider("countries"),
@ -58,13 +71,7 @@ rule base_network:
lines=config_provider("lines"),
transformers=config_provider("transformers"),
input:
eg_buses="data/entsoegridkit/buses.csv",
eg_lines="data/entsoegridkit/lines.csv",
eg_links="data/entsoegridkit/links.csv",
eg_converters="data/entsoegridkit/converters.csv",
eg_transformers="data/entsoegridkit/transformers.csv",
parameter_corrections="data/parameter_corrections.yaml",
links_p_nom="data/links_p_nom.csv",
unpack(input_base_network),
country_shapes=resources("country_shapes.geojson"),
offshore_shapes=resources("offshore_shapes.geojson"),
europe_shape=resources("europe_shape.geojson"),
@ -523,6 +530,15 @@ rule simplify_network:
"../scripts/simplify_network.py"
# Optional input when using custom busmaps - Needs to be tailored to selected base_network
def input_cluster_network(w):
if config_provider("enable", "custom_busmap", default=False)(w):
base_network = config_provider("electricity", "base_network")(w)
custom_busmap = f"data/busmaps/elec_s{w.simpl}_{w.clusters}_{base_network}.csv"
return {"custom_busmap": custom_busmap}
return {"custom_busmap": []}
rule cluster_network:
params:
cluster_network=config_provider("clustering", "cluster_network"),
@ -539,15 +555,11 @@ rule cluster_network:
length_factor=config_provider("lines", "length_factor"),
costs=config_provider("costs"),
input:
unpack(input_cluster_network),
network=resources("networks/elec_s{simpl}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"),
busmap=ancient(resources("busmap_elec_s{simpl}.csv")),
custom_busmap=lambda w: (
"data/custom_busmap_elec_s{simpl}_{clusters}.csv"
if config_provider("enable", "custom_busmap", default=False)(w)
else []
),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
@ -629,3 +641,79 @@ rule prepare_network:
"../envs/environment.yaml"
script:
"../scripts/prepare_network.py"
if config["electricity"]["base_network"] == "osm-raw":
rule clean_osm_data:
input:
cables_way=expand(
"data/osm-raw/{country}/cables_way.json",
country=config_provider("countries"),
),
lines_way=expand(
"data/osm-raw/{country}/lines_way.json",
country=config_provider("countries"),
),
links_relation=expand(
"data/osm-raw/{country}/links_relation.json",
country=config_provider("countries"),
),
substations_way=expand(
"data/osm-raw/{country}/substations_way.json",
country=config_provider("countries"),
),
substations_relation=expand(
"data/osm-raw/{country}/substations_relation.json",
country=config_provider("countries"),
),
offshore_shapes=resources("offshore_shapes.geojson"),
country_shapes=resources("country_shapes.geojson"),
output:
substations=resources("osm-raw/clean/substations.geojson"),
substations_polygon=resources("osm-raw/clean/substations_polygon.geojson"),
lines=resources("osm-raw/clean/lines.geojson"),
links=resources("osm-raw/clean/links.geojson"),
log:
logs("clean_osm_data.log"),
benchmark:
benchmarks("clean_osm_data")
threads: 1
resources:
mem_mb=4000,
conda:
"../envs/environment.yaml"
script:
"../scripts/clean_osm_data.py"
if config["electricity"]["base_network"] == "osm-raw":
rule build_osm_network:
input:
substations=resources("osm-raw/clean/substations.geojson"),
lines=resources("osm-raw/clean/lines.geojson"),
links=resources("osm-raw/clean/links.geojson"),
country_shapes=resources("country_shapes.geojson"),
output:
lines=resources("osm-raw/build/lines.csv"),
links=resources("osm-raw/build/links.csv"),
converters=resources("osm-raw/build/converters.csv"),
transformers=resources("osm-raw/build/transformers.csv"),
substations=resources("osm-raw/build/buses.csv"),
lines_geojson=resources("osm-raw/build/geojson/lines.geojson"),
links_geojson=resources("osm-raw/build/geojson/links.geojson"),
converters_geojson=resources("osm-raw/build/geojson/converters.geojson"),
transformers_geojson=resources("osm-raw/build/geojson/transformers.geojson"),
substations_geojson=resources("osm-raw/build/geojson/buses.geojson"),
log:
logs("build_osm_network.log"),
benchmark:
benchmarks("build_osm_network")
threads: 1
resources:
mem_mb=4000,
conda:
"../envs/environment.yaml"
script:
"../scripts/build_osm_network.py"

View File

@ -233,9 +233,11 @@ rule build_cop_profiles:
"sector", "district_heating", "heat_pump_cop_approximation"
),
heat_pump_sources=config_provider("sector", "heat_pump_sources"),
snapshots=config_provider("snapshots"),
input:
temp_soil_total=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"),
temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output:
cop_profiles=resources("cop_profiles_elec_s{simpl}_{clusters}.nc"),
resources:

26
rules/development.smk Normal file
View File

@ -0,0 +1,26 @@
# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
if config["electricity"]["base_network"] == "osm-raw":
rule prepare_osm_network_release:
input:
base_network=resources("networks/base.nc"),
output:
buses=resources("osm-raw/release/buses.csv"),
converters=resources("osm-raw/release/converters.csv"),
lines=resources("osm-raw/release/lines.csv"),
links=resources("osm-raw/release/links.csv"),
transformers=resources("osm-raw/release/transformers.csv"),
log:
logs("prepare_osm_network_release.log"),
benchmark:
benchmarks("prepare_osm_network_release")
threads: 1
resources:
mem_mb=1000,
conda:
"../envs/environment.yaml"
script:
"../scripts/prepare_osm_network_release.py"

View File

@ -85,7 +85,7 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True
"https://zenodo.org/records/12791128/files/{cutout}.nc",
),
output:
protected("cutouts/" + CDIR + "{cutout}.nc"),
"cutouts/" + CDIR + "{cutout}.nc",
log:
"logs/" + CDIR + "retrieve_cutout_{cutout}.log",
resources:
@ -390,3 +390,85 @@ if config["enable"]["retrieve"]:
"../envs/retrieve.yaml"
script:
"../scripts/retrieve_monthly_fuel_prices.py"
if config["enable"]["retrieve"] and (
config["electricity"]["base_network"] == "osm-prebuilt"
):
rule retrieve_osm_prebuilt:
input:
buses=storage("https://zenodo.org/records/13358976/files/buses.csv"),
converters=storage(
"https://zenodo.org/records/13358976/files/converters.csv"
),
lines=storage("https://zenodo.org/records/13358976/files/lines.csv"),
links=storage("https://zenodo.org/records/13358976/files/links.csv"),
transformers=storage(
"https://zenodo.org/records/13358976/files/transformers.csv"
),
output:
buses="data/osm-prebuilt/buses.csv",
converters="data/osm-prebuilt/converters.csv",
lines="data/osm-prebuilt/lines.csv",
links="data/osm-prebuilt/links.csv",
transformers="data/osm-prebuilt/transformers.csv",
log:
"logs/retrieve_osm_prebuilt.log",
threads: 1
resources:
mem_mb=500,
retries: 2
run:
for key in input.keys():
move(input[key], output[key])
validate_checksum(output[key], input[key])
if config["enable"]["retrieve"] and (
config["electricity"]["base_network"] == "osm-raw"
):
rule retrieve_osm_data:
output:
cables_way="data/osm-raw/{country}/cables_way.json",
lines_way="data/osm-raw/{country}/lines_way.json",
links_relation="data/osm-raw/{country}/links_relation.json",
substations_way="data/osm-raw/{country}/substations_way.json",
substations_relation="data/osm-raw/{country}/substations_relation.json",
log:
"logs/retrieve_osm_data_{country}.log",
threads: 1
conda:
"../envs/retrieve.yaml"
script:
"../scripts/retrieve_osm_data.py"
if config["enable"]["retrieve"] and (
config["electricity"]["base_network"] == "osm-raw"
):
rule retrieve_osm_data_all:
input:
expand(
"data/osm-raw/{country}/cables_way.json",
country=config_provider("countries"),
),
expand(
"data/osm-raw/{country}/lines_way.json",
country=config_provider("countries"),
),
expand(
"data/osm-raw/{country}/links_relation.json",
country=config_provider("countries"),
),
expand(
"data/osm-raw/{country}/substations_way.json",
country=config_provider("countries"),
),
expand(
"data/osm-raw/{country}/substations_relation.json",
country=config_provider("countries"),
),

View File

@ -22,12 +22,11 @@ from _helpers import (
update_config_from_wildcards,
)
from add_electricity import sanitize_carriers
from definitions.heat_sector import HeatSector
from definitions.heat_system import HeatSystem
from definitions.heat_system_type import HeatSystemType
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
from scripts.definitions.heat_sector import HeatSector
from scripts.definitions.heat_system import HeatSystem
from scripts.definitions.heat_system_type import HeatSystemType
logger = logging.getLogger(__name__)
cc = coco.CountryConverter()
idx = pd.IndexSlice

View File

@ -5,7 +5,11 @@
# coding: utf-8
"""
Creates the network topology from an `ENTSO-E map extract <https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022) as a PyPSA network.
Creates the network topology from a `ENTSO-E map extract.
<https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022)
or `OpenStreetMap data <https://www.openstreetmap.org/>`_ (Aug 2024)
as a PyPSA
network.
Relevant Settings
-----------------
@ -95,7 +99,7 @@ logger = logging.getLogger(__name__)
def _get_oid(df):
if "tags" in df.columns:
return df.tags.str.extract('"oid"=>"(\d+)"', expand=False)
return df.tags.str.extract(r'"oid"=>"(\d+)"', expand=False)
else:
return pd.Series(np.nan, df.index)
@ -131,45 +135,50 @@ def _find_closest_links(links, new_links, distance_upper_bound=1.5):
)
def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
def _load_buses(buses, europe_shape, config):
buses = (
pd.read_csv(
eg_buses,
buses,
quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(bus_id="str"),
)
.set_index("bus_id")
.drop(["station_id"], axis=1)
.rename(columns=dict(voltage="v_nom"))
)
if "station_id" in buses.columns:
buses.drop("station_id", axis=1, inplace=True)
buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
buses["under_construction"] = buses.under_construction.where(
lambda s: s.notnull(), False
).astype(bool)
# remove all buses outside of all countries including exclusive economic zones (offshore)
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
europe_shape_prepped = shapely.prepared.prep(europe_shape)
buses_in_europe_b = buses[["x", "y"]].apply(
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
)
v_nom_min = min(config["electricity"]["voltages"])
v_nom_max = max(config["electricity"]["voltages"])
buses_with_v_nom_to_keep_b = (
buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull()
)
logger.info(
f'Removing buses with voltages {pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])}'
(v_nom_min <= buses.v_nom) & (buses.v_nom <= v_nom_max)
| (buses.v_nom.isnull())
| (
buses.carrier == "DC"
) # Keeping all DC buses from the input dataset independent of voltage (e.g. 150 kV connections)
)
logger.info(f"Removing buses outside of range AC {v_nom_min} - {v_nom_max} V")
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
def _load_transformers_from_eg(buses, eg_transformers):
def _load_transformers(buses, transformers):
transformers = pd.read_csv(
eg_transformers,
transformers,
quotechar="'",
true_values=["t"],
false_values=["f"],
@ -181,9 +190,9 @@ def _load_transformers_from_eg(buses, eg_transformers):
return transformers
def _load_converters_from_eg(buses, eg_converters):
def _load_converters_from_eg(buses, converters):
converters = pd.read_csv(
eg_converters,
converters,
quotechar="'",
true_values=["t"],
false_values=["f"],
@ -197,9 +206,25 @@ def _load_converters_from_eg(buses, eg_converters):
return converters
def _load_links_from_eg(buses, eg_links):
def _load_converters_from_osm(buses, converters):
converters = pd.read_csv(
converters,
quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(converter_id="str", bus0="str", bus1="str"),
).set_index("converter_id")
converters = _remove_dangling_branches(converters, buses)
converters["carrier"] = ""
return converters
def _load_links_from_eg(buses, links):
links = pd.read_csv(
eg_links,
links,
quotechar="'",
true_values=["t"],
false_values=["f"],
@ -208,7 +233,7 @@ def _load_links_from_eg(buses, eg_links):
links["length"] /= 1e3
# Skagerrak Link is connected to 132kV bus which is removed in _load_buses_from_eg.
# Skagerrak Link is connected to 132kV bus which is removed in _load_buses.
# Connect to neighboring 380kV bus
links.loc[links.bus1 == "6396", "bus1"] = "6398"
@ -220,10 +245,35 @@ def _load_links_from_eg(buses, eg_links):
return links
def _load_lines_from_eg(buses, eg_lines):
def _load_links_from_osm(buses, links):
links = pd.read_csv(
links,
quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(
link_id="str",
bus0="str",
bus1="str",
voltage="int",
p_nom="float",
),
).set_index("link_id")
links["length"] /= 1e3
links = _remove_dangling_branches(links, buses)
# Add DC line parameters
links["carrier"] = "DC"
return links
def _load_lines(buses, lines):
lines = (
pd.read_csv(
eg_lines,
lines,
quotechar="'",
true_values=["t"],
false_values=["f"],
@ -240,6 +290,7 @@ def _load_lines_from_eg(buses, eg_lines):
)
lines["length"] /= 1e3
lines["carrier"] = "AC"
lines = _remove_dangling_branches(lines, buses)
@ -290,7 +341,7 @@ def _reconnect_crimea(lines):
return pd.concat([lines, lines_to_crimea])
def _set_electrical_parameters_lines(lines, config):
def _set_electrical_parameters_lines_eg(lines, config):
v_noms = config["electricity"]["voltages"]
linetypes = config["lines"]["types"]
@ -302,16 +353,36 @@ def _set_electrical_parameters_lines(lines, config):
return lines
def _set_electrical_parameters_lines_osm(lines, config):
if lines.empty:
lines["type"] = []
return lines
v_noms = config["electricity"]["voltages"]
linetypes = _get_linetypes_config(config["lines"]["types"], v_noms)
lines["carrier"] = "AC"
lines["dc"] = False
lines.loc[:, "type"] = lines.v_nom.apply(
lambda x: _get_linetype_by_voltage(x, linetypes)
)
lines["s_max_pu"] = config["lines"]["s_max_pu"]
return lines
def _set_lines_s_nom_from_linetypes(n):
n.lines["s_nom"] = (
np.sqrt(3)
* n.lines["type"].map(n.line_types.i_nom)
* n.lines["v_nom"]
* n.lines.num_parallel
* n.lines["num_parallel"]
)
def _set_electrical_parameters_links(links, config, links_p_nom):
def _set_electrical_parameters_links_eg(links, config, links_p_nom):
if links.empty:
return links
@ -343,12 +414,27 @@ def _set_electrical_parameters_links(links, config, links_p_nom):
return links
def _set_electrical_parameters_links_osm(links, config):
if links.empty:
return links
p_max_pu = config["links"].get("p_max_pu", 1.0)
links["p_max_pu"] = p_max_pu
links["p_min_pu"] = -p_max_pu
links["carrier"] = "DC"
links["dc"] = True
return links
def _set_electrical_parameters_converters(converters, config):
p_max_pu = config["links"].get("p_max_pu", 1.0)
converters["p_max_pu"] = p_max_pu
converters["p_min_pu"] = -p_max_pu
converters["p_nom"] = 2000
# if column "p_nom" does not exist, set to 2000
if "p_nom" not in converters:
converters["p_nom"] = 2000
# Converters are combined with links
converters["under_construction"] = False
@ -463,7 +549,7 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
buses["substation_lv"] = (
lv_b & onshore_b & (~buses["under_construction"]) & has_connections_b
)
buses["substation_off"] = ((hv_b & offshore_b) | (hv_b & onshore_b)) & (
buses["substation_off"] = (offshore_b | (hv_b & onshore_b)) & (
~buses["under_construction"]
)
@ -617,11 +703,11 @@ def _set_shapes(n, country_shapes, offshore_shapes):
def base_network(
eg_buses,
eg_converters,
eg_transformers,
eg_lines,
eg_links,
buses,
converters,
transformers,
lines,
links,
links_p_nom,
europe_shape,
country_shapes,
@ -629,29 +715,59 @@ def base_network(
parameter_corrections,
config,
):
buses = _load_buses_from_eg(eg_buses, europe_shape, config["electricity"])
links = _load_links_from_eg(buses, eg_links)
base_network = config["electricity"].get("base_network")
assert base_network in {
"entsoegridkit",
"osm-raw",
"osm-prebuilt",
}, f"base_network must be either 'entsoegridkit', 'osm-raw' or 'osm-prebuilt', but got '{base_network}'"
if base_network == "entsoegridkit":
warnings.warn(
"The 'entsoegridkit' base network is deprecated and will be removed in future versions. Please use 'osm-raw' or 'osm-prebuilt' instead.",
DeprecationWarning,
)
converters = _load_converters_from_eg(buses, eg_converters)
logger.info(f"Creating base network using {base_network}.")
lines = _load_lines_from_eg(buses, eg_lines)
transformers = _load_transformers_from_eg(buses, eg_transformers)
buses = _load_buses(buses, europe_shape, config)
transformers = _load_transformers(buses, transformers)
lines = _load_lines(buses, lines)
if config["lines"].get("reconnect_crimea", True) and "UA" in config["countries"]:
lines = _reconnect_crimea(lines)
if base_network == "entsoegridkit":
links = _load_links_from_eg(buses, links)
converters = _load_converters_from_eg(buses, converters)
lines = _set_electrical_parameters_lines(lines, config)
# Optionally reconnect Crimea
if (config["lines"].get("reconnect_crimea", True)) & (
"UA" in config["countries"]
):
lines = _reconnect_crimea(lines)
# Set electrical parameters of lines and links
lines = _set_electrical_parameters_lines_eg(lines, config)
links = _set_electrical_parameters_links_eg(links, config, links_p_nom)
elif base_network in {"osm-prebuilt", "osm-raw"}:
links = _load_links_from_osm(buses, links)
converters = _load_converters_from_osm(buses, converters)
# Set electrical parameters of lines and links
lines = _set_electrical_parameters_lines_osm(lines, config)
links = _set_electrical_parameters_links_osm(links, config)
else:
raise ValueError(
"base_network must be either 'entsoegridkit', 'osm-raw', or 'osm-prebuilt'"
)
# Set electrical parameters of transformers and converters
transformers = _set_electrical_parameters_transformers(transformers, config)
links = _set_electrical_parameters_links(links, config, links_p_nom)
converters = _set_electrical_parameters_converters(converters, config)
n = pypsa.Network()
n.name = "PyPSA-Eur"
n.name = f"PyPSA-Eur ({base_network})"
time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day)
n.set_snapshots(time)
n.madd("Carrier", ["AC", "DC"])
n.import_components_from_dataframe(buses, "Bus")
n.import_components_from_dataframe(lines, "Line")
@ -660,8 +776,8 @@ def base_network(
n.import_components_from_dataframe(converters, "Link")
_set_lines_s_nom_from_linetypes(n)
_apply_parameter_corrections(n, parameter_corrections)
if config["electricity"].get("base_network") == "entsoegridkit":
_apply_parameter_corrections(n, parameter_corrections)
n = _remove_unconnected_components(n)
@ -675,9 +791,64 @@ def base_network(
_set_shapes(n, country_shapes, offshore_shapes)
# Add carriers if they are present in buses.carriers
carriers_in_buses = set(n.buses.carrier.dropna().unique())
carriers = carriers_in_buses.intersection({"AC", "DC"})
if carriers:
n.madd("Carrier", carriers)
return n
def _get_linetypes_config(line_types, voltages):
"""
Return the dictionary of linetypes for selected voltages. The dictionary is
a subset of the dictionary line_types, whose keys match the selected
voltages.
Parameters
----------
line_types : dict
Dictionary of linetypes: keys are nominal voltages and values are linetypes.
voltages : list
List of selected voltages.
Returns
-------
Dictionary of linetypes for selected voltages.
"""
# get voltages value that are not available in the line types
vnoms_diff = set(voltages).symmetric_difference(set(line_types.keys()))
if vnoms_diff:
logger.warning(
f"Voltages {vnoms_diff} not in the {line_types} or {voltages} list."
)
return {k: v for k, v in line_types.items() if k in voltages}
def _get_linetype_by_voltage(v_nom, d_linetypes):
"""
Return the linetype of a specific line based on its voltage v_nom.
Parameters
----------
v_nom : float
The voltage of the line.
d_linetypes : dict
Dictionary of linetypes: keys are nominal voltages and values are linetypes.
Returns
-------
The linetype of the line whose nominal voltage is closest to the line voltage.
"""
v_nom_min, line_type_min = min(
d_linetypes.items(),
key=lambda x: abs(x[0] - v_nom),
)
return line_type_min
def voronoi(points, outline, crs=4326):
"""
Create Voronoi polygons from a set of points within an outline.
@ -793,25 +964,47 @@ if __name__ == "__main__":
configure_logging(snakemake)
set_scenario_config(snakemake)
countries = snakemake.params.countries
buses = snakemake.input.buses
converters = snakemake.input.converters
transformers = snakemake.input.transformers
lines = snakemake.input.lines
links = snakemake.input.links
europe_shape = snakemake.input.europe_shape
country_shapes = snakemake.input.country_shapes
offshore_shapes = snakemake.input.offshore_shapes
config = snakemake.config
if "links_p_nom" in snakemake.input.keys():
links_p_nom = snakemake.input.links_p_nom
else:
links_p_nom = None
if "parameter_corrections" in snakemake.input.keys():
parameter_corrections = snakemake.input.parameter_corrections
else:
parameter_corrections = None
n = base_network(
snakemake.input.eg_buses,
snakemake.input.eg_converters,
snakemake.input.eg_transformers,
snakemake.input.eg_lines,
snakemake.input.eg_links,
snakemake.input.links_p_nom,
snakemake.input.europe_shape,
snakemake.input.country_shapes,
snakemake.input.offshore_shapes,
snakemake.input.parameter_corrections,
snakemake.config,
buses,
converters,
transformers,
lines,
links,
links_p_nom,
europe_shape,
country_shapes,
offshore_shapes,
parameter_corrections,
config,
)
onshore_regions, offshore_regions, shapes, offshore_shapes = build_bus_shapes(
n,
snakemake.input.country_shapes,
snakemake.input.offshore_shapes,
snakemake.params.countries,
country_shapes,
offshore_shapes,
countries,
)
shapes.to_file(snakemake.output.regions_onshore)

View File

@ -311,7 +311,7 @@ def add_unsustainable_potentials(df):
# Phase out unsustainable biomass potentials linearly from 2020 to 2035 while phasing in sustainable potentials
share_unsus = params.get("share_unsustainable_use_retained").get(investment_year)
df_wo_ch = df.drop(df.filter(regex="CH\d", axis=0).index)
df_wo_ch = df.drop(df.filter(regex=r"CH\d", axis=0).index)
# Calculate unsustainable solid biomass
df_wo_ch["unsustainable solid biomass"] = _calc_unsustainable_potential(

View File

@ -24,7 +24,7 @@ import tabula as tbl
ENERGY_CONTENT = 4.8 # unit MWh/t (wood pellets)
system = platform.system()
encoding = "cp1252" if system == "Windows" else None
encoding = "cp1252" if system == "Windows" else "utf-8"
def get_countries():

View File

@ -2,9 +2,47 @@
# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Approximate heat pump coefficient-of-performance (COP) profiles for different
heat sources and systems.
For central heating, this is based on Jensen et al. (2018) (c.f. `CentralHeatingCopApproximator <CentralHeatingCopApproximator.py>`_) and for decentral heating, the approximation is based on Staffell et al. (2012) (c.f. `DecentralHeatingCopApproximator <DecentralHeatingCopApproximator.py>`_).
Relevant Settings
-----------------
.. code:: yaml
sector:
heat_pump_sink_T_decentral_heating:
district_heating:
forward_temperature:
return_temperature:
heat_source_cooling:
heat_pump_cop_approximation:
refrigerant:
heat_exchanger_pinch_point_temperature_difference
isentropic_compressor_efficiency:
heat_loss:
heat_pump_sources:
urban central:
urban decentral:
rural:
snapshots:
Inputs
------
- `resources/<run_name>/regions_onshore.geojson`: Onshore regions
- `resources/<run_name>/temp_soil_total`: Ground temperature
- `resources/<run_name>/temp_air_total`: Air temperature
Outputs
-------
- `resources/<run_name>/cop_profiles.nc`: Heat pump coefficient-of-performance (COP) profiles
"""
import sys
import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
@ -14,13 +52,54 @@ from DecentralHeatingCopApproximator import DecentralHeatingCopApproximator
from scripts.definitions.heat_system_type import HeatSystemType
sys.path.append("..")
def map_temperature_dict_to_onshore_regions(
supply_temperature_by_country: dict,
regions_onshore: pd.Index,
snapshots: pd.DatetimeIndex,
) -> xr.DataArray:
"""
Map dictionary of temperatures to onshore regions.
Parameters:
----------
supply_temperature_by_country : dictionary
Dictionary with temperatures as values and country keys as keys. One key must be named "default"
regions_onshore : pd.Index
Names of onshore regions
snapshots : pd.DatetimeIndex
Time stamps
Returns:
-------
xr.DataArray
The dictionary values mapped to onshore regions with onshore regions as coordinates.
"""
return xr.DataArray(
[
[
(
supply_temperature_by_country[get_country_from_node_name(node_name)]
if get_country_from_node_name(node_name)
in supply_temperature_by_country.keys()
else supply_temperature_by_country["default"]
)
for node_name in regions_onshore.values
]
# pass both nodes and snapshots as dimensions to preserve correct data structure
for _ in snapshots
],
dims=["time", "name"],
coords={"time": snapshots, "name": regions_onshore},
)
def get_cop(
heat_system_type: str,
heat_source: str,
source_inlet_temperature_celsius: xr.DataArray,
forward_temperature_by_node_and_time: xr.DataArray = None,
return_temperature_by_node_and_time: xr.DataArray = None,
) -> xr.DataArray:
"""
Calculate the coefficient of performance (COP) for a heating system.
@ -41,8 +120,8 @@ def get_cop(
"""
if HeatSystemType(heat_system_type).is_central:
return CentralHeatingCopApproximator(
forward_temperature_celsius=snakemake.params.forward_temperature_central_heating,
return_temperature_celsius=snakemake.params.return_temperature_central_heating,
forward_temperature_celsius=forward_temperature_by_node_and_time,
return_temperature_celsius=return_temperature_by_node_and_time,
source_inlet_temperature_celsius=source_inlet_temperature_celsius,
source_outlet_temperature_celsius=source_inlet_temperature_celsius
- snakemake.params.heat_source_cooling_central_heating,
@ -56,6 +135,10 @@ def get_cop(
).approximate_cop()
def get_country_from_node_name(node_name: str) -> str:
return node_name[:2]
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -68,6 +151,23 @@ if __name__ == "__main__":
set_scenario_config(snakemake)
# map forward and return temperatures specified on country-level to onshore regions
regions_onshore = gpd.read_file(snakemake.input.regions_onshore)["name"]
snapshots = pd.date_range(freq="h", **snakemake.params.snapshots)
forward_temperature_central_heating_by_node_and_time: xr.DataArray = (
map_temperature_dict_to_onshore_regions(
supply_temperature_by_country=snakemake.params.forward_temperature_central_heating,
regions_onshore=regions_onshore,
snapshots=snapshots,
)
)
return_temperature_central_heating_by_node_and_time: xr.DataArray = (
map_temperature_dict_to_onshore_regions(
supply_temperature_by_country=snakemake.params.return_temperature_central_heating,
regions_onshore=regions_onshore,
snapshots=snapshots,
)
)
cop_all_system_types = []
for heat_system_type, heat_sources in snakemake.params.heat_pump_sources.items():
cop_this_system_type = []
@ -79,6 +179,8 @@ if __name__ == "__main__":
heat_system_type=heat_system_type,
heat_source=heat_source,
source_inlet_temperature_celsius=source_inlet_temperature_celsius,
forward_temperature_by_node_and_time=forward_temperature_central_heating_by_node_and_time,
return_temperature_by_node_and_time=return_temperature_central_heating_by_node_and_time,
)
cop_this_system_type.append(cop_da)
cop_all_system_types.append(

View File

@ -242,7 +242,7 @@ def build_eurostat(
temp.index = pd.MultiIndex.from_frame(
temp.index.to_frame().fillna("International aviation")
)
df = pd.concat([temp, df.loc[~int_avia]])
df = pd.concat([temp, df.loc[~int_avia]]).sort_index()
# Fill in missing data on "Domestic aviation" for each country.
for country in countries:
@ -651,8 +651,8 @@ def build_energy_totals(
"""
eurostat_fuels = {"electricity": "Electricity", "total": "Total all products"}
eurostat_countries = eurostat.index.levels[0]
eurostat_years = eurostat.index.levels[1]
eurostat_countries = eurostat.index.unique(0)
eurostat_years = eurostat.index.unique(1)
to_drop = ["passenger cars", "passenger car efficiency"]
new_index = pd.MultiIndex.from_product(
@ -1153,13 +1153,14 @@ def build_transport_data(
----------
- Swiss transport data: `BFS <https://www.bfs.admin.ch/bfs/en/home/statistics/mobility-transport/transport-infrastructure-vehicles/vehicles/road-vehicles-stock-level-motorisation.html>`_
"""
years = np.arange(2000, 2022)
# first collect number of cars
transport_data = pd.DataFrame(idees["passenger cars"])
countries_without_ch = set(countries) - {"CH"}
new_index = pd.MultiIndex.from_product(
[countries_without_ch, transport_data.index.levels[1]],
[countries_without_ch, transport_data.index.unique(1)],
names=["country", "year"],
)
@ -1167,7 +1168,7 @@ def build_transport_data(
if "CH" in countries:
fn = snakemake.input.swiss_transport
swiss_cars = pd.read_csv(fn, index_col=0).loc[2000:2015, ["passenger cars"]]
swiss_cars = pd.read_csv(fn, index_col=0).loc[years, ["passenger cars"]]
swiss_cars.index = pd.MultiIndex.from_product(
[["CH"], swiss_cars.index], names=["country", "year"]
@ -1175,10 +1176,8 @@ def build_transport_data(
transport_data = pd.concat([transport_data, swiss_cars]).sort_index()
transport_data.rename(columns={"passenger cars": "number cars"}, inplace=True)
transport_data = transport_data.rename(columns={"passenger cars": "number cars"})
# clean up dataframe
years = np.arange(2000, 2022)
transport_data = transport_data[
transport_data.index.get_level_values(1).isin(years)
]
@ -1188,11 +1187,10 @@ def build_transport_data(
logger.info(
f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data."
)
cars_pp = transport_data["number cars"] / population
fill_values = {
year: cars_pp.mean() * population for year in transport_data.index.levels[1]
year: cars_pp.mean() * population for year in transport_data.index.unique(1)
}
fill_values = pd.DataFrame(fill_values).stack()
fill_values = pd.DataFrame(fill_values, columns=["number cars"])

View File

@ -66,7 +66,7 @@ def build_nodal_industrial_energy_demand():
)
countries = keys.country.unique()
sectors = industrial_demand.columns.levels[1]
sectors = industrial_demand.columns.unique(1)
for country, sector in product(countries, sectors):
buses = keys.index[keys.country == country]

View File

@ -445,7 +445,9 @@ def chemicals_industry():
# subtract ammonia energy demand (in ktNH3/a)
ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0)
ammonia_total = ammonia.loc[ammonia.index.intersection(eu27), str(year)].sum()
ammonia_total = ammonia.loc[
ammonia.index.intersection(eu27), str(max(2018, year))
].sum()
df.loc["methane", sector] -= ammonia_total * params["MWh_CH4_per_tNH3_SMR"]
df.loc["elec", sector] -= ammonia_total * params["MWh_elec_per_tNH3_SMR"]

View File

@ -79,6 +79,7 @@ with the following carriers are considered:
Unit of the output file is MWh/t.
"""
import numpy as np
import pandas as pd
from prepare_sector_network import get
@ -104,7 +105,7 @@ def build_industry_sector_ratios_intermediate():
snakemake.input.industry_sector_ratios, index_col=0
)
today_sector_ratios = demand.div(production, axis=1)
today_sector_ratios = demand.div(production, axis=1).replace([np.inf, -np.inf], 0)
today_sector_ratios.dropna(how="all", axis=1, inplace=True)

View File

@ -0,0 +1,863 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur and PyPSA-Earth Authors
#
# SPDX-License-Identifier: MIT
import logging
import os
import string
import geopandas as gpd
import numpy as np
import pandas as pd
from _helpers import configure_logging, set_scenario_config
from shapely.geometry import LineString, Point
from shapely.ops import linemerge, nearest_points, split
from tqdm import tqdm
logger = logging.getLogger(__name__)
GEO_CRS = "EPSG:4326"
DISTANCE_CRS = "EPSG:3035"
BUS_TOL = (
5000 # unit: meters, default 5000 - Buses within this distance are grouped together
)
LINES_COLUMNS = [
"bus0",
"bus1",
"voltage",
"circuits",
"length",
"underground",
"under_construction",
"geometry",
]
LINKS_COLUMNS = [
"bus0",
"bus1",
"voltage",
"p_nom",
"length",
"underground",
"under_construction",
"geometry",
]
TRANSFORMERS_COLUMNS = [
"bus0",
"bus1",
"voltage_bus0",
"voltage_bus1",
"country",
"geometry",
]
def line_endings_to_bus_conversion(lines):
"""
Converts line endings to bus connections.
This function takes a df of lines and converts the line endings to bus
connections. It performs the necessary operations to ensure that the line
endings are properly connected to the buses in the network.
Parameters:
lines (DataFrame)
Returns:
lines (DataFrame)
"""
# Assign to every line a start and end point
lines["bounds"] = lines["geometry"].boundary # create start and end point
lines["bus_0_coors"] = lines["bounds"].map(lambda p: p.geoms[0])
lines["bus_1_coors"] = lines["bounds"].map(lambda p: p.geoms[-1])
# splits into coordinates
lines["bus0_lon"] = lines["bus_0_coors"].x
lines["bus0_lat"] = lines["bus_0_coors"].y
lines["bus1_lon"] = lines["bus_1_coors"].x
lines["bus1_lat"] = lines["bus_1_coors"].y
return lines
# tol in m
def set_substations_ids(buses, distance_crs, tol=5000):
"""
Function to set substations ids to buses, accounting for location
tolerance.
The algorithm is as follows:
1. initialize all substation ids to -1
2. if the current substation has been already visited [substation_id < 0], then skip the calculation
3. otherwise:
1. identify the substations within the specified tolerance (tol)
2. when all the substations in tolerance have substation_id < 0, then specify a new substation_id
3. otherwise, if one of the substation in tolerance has a substation_id >= 0, then set that substation_id to all the others;
in case of multiple substations with substation_ids >= 0, the first value is picked for all
"""
buses["station_id"] = -1
# create temporary series to execute distance calculations using m as reference distances
temp_bus_geom = buses.geometry.to_crs(distance_crs)
# set tqdm options for substation ids
tqdm_kwargs_substation_ids = dict(
ascii=False,
unit=" buses",
total=buses.shape[0],
desc="Set substation ids ",
)
station_id = 0
for i, row in tqdm(buses.iterrows(), **tqdm_kwargs_substation_ids):
if buses.loc[i, "station_id"] >= 0:
continue
# get substations within tolerance
close_nodes = np.flatnonzero(
temp_bus_geom.distance(temp_bus_geom.loc[i]) <= tol
)
if len(close_nodes) == 1:
# if only one substation is in tolerance, then the substation is the current one iì
# Note that the node cannot be with substation_id >= 0, given the preliminary check
# at the beginning of the for loop
buses.loc[buses.index[i], "station_id"] = station_id
# update station id
station_id += 1
else:
# several substations in tolerance
# get their ids
subset_substation_ids = buses.loc[buses.index[close_nodes], "station_id"]
# check if all substation_ids are negative (<0)
all_neg = subset_substation_ids.max() < 0
# check if at least a substation_id is negative (<0)
some_neg = subset_substation_ids.min() < 0
if all_neg:
# when all substation_ids are negative, then this is a new substation id
# set the current station_id and increment the counter
buses.loc[buses.index[close_nodes], "station_id"] = station_id
station_id += 1
elif some_neg:
# otherwise, when at least a substation_id is non-negative, then pick the first value
# and set it to all the other substations within tolerance
sub_id = -1
for substation_id in subset_substation_ids:
if substation_id >= 0:
sub_id = substation_id
break
buses.loc[buses.index[close_nodes], "station_id"] = sub_id
def set_lines_ids(lines, buses, distance_crs):
"""
Function to set line buses ids to the closest bus in the list.
"""
# set tqdm options for set lines ids
tqdm_kwargs_line_ids = dict(
ascii=False,
unit=" lines",
total=lines.shape[0],
desc="Set line/link bus ids ",
)
# initialization
lines["bus0"] = -1
lines["bus1"] = -1
busesepsg = buses.to_crs(distance_crs)
linesepsg = lines.to_crs(distance_crs)
for i, row in tqdm(linesepsg.iterrows(), **tqdm_kwargs_line_ids):
# select buses having the voltage level of the current line
buses_sel = busesepsg[
(buses["voltage"] == row["voltage"]) & (buses["dc"] == row["dc"])
]
# find the closest node of the bus0 of the line
bus0_id = buses_sel.geometry.distance(row.geometry.boundary.geoms[0]).idxmin()
lines.loc[i, "bus0"] = buses.loc[bus0_id, "bus_id"]
# check if the line starts exactly in the node, otherwise modify the linestring
distance_bus0 = busesepsg.geometry.loc[bus0_id].distance(
row.geometry.boundary.geoms[0]
)
if distance_bus0 > 0:
# the line does not start in the node, thus modify the linestring
line_start_point = lines.geometry.loc[i].boundary.geoms[0]
new_segment = LineString([buses.geometry.loc[bus0_id], line_start_point])
modified_line = linemerge([new_segment, lines.geometry.loc[i]])
lines.loc[i, "geometry"] = modified_line
# find the closest node of the bus1 of the line
bus1_id = buses_sel.geometry.distance(row.geometry.boundary.geoms[1]).idxmin()
lines.loc[i, "bus1"] = buses.loc[bus1_id, "bus_id"]
# check if the line ends exactly in the node, otherwise modify the linestring
distance_bus1 = busesepsg.geometry.loc[bus1_id].distance(
row.geometry.boundary.geoms[1]
)
if distance_bus1 > 0:
# the line does not start in the node, thus modify the linestring
line_end_point = lines.geometry.loc[i].boundary.geoms[1]
new_segment = LineString([line_end_point, buses.geometry.loc[bus1_id]])
modified_line = linemerge([lines.geometry.loc[i], new_segment])
lines.loc[i, "geometry"] = modified_line
return lines, buses
def merge_stations_same_station_id(
buses, delta_lon=0.001, delta_lat=0.001, precision=4
):
"""
Function to merge buses with same voltage and station_id This function
iterates over all substation ids and creates a bus_id for every substation
and voltage level.
Therefore, a substation with multiple voltage levels is represented
with different buses, one per voltage level
"""
# initialize list of cleaned buses
buses_clean = []
# initialize the number of buses
n_buses = 0
for g_name, g_value in buses.groupby(by="station_id"):
# average location of the buses having the same station_id
station_point_x = np.round(g_value.geometry.x.mean(), precision)
station_point_y = np.round(g_value.geometry.y.mean(), precision)
# is_dclink_boundary_point = any(g_value["is_dclink_boundary_point"])
# loop for every voltage level in the bus
# The location of the buses is averaged; in the case of multiple voltage levels for the same station_id,
# each bus corresponding to a voltage level and each polatity is located at a distance regulated by delta_lon/delta_lat
v_it = 0
for v_name, bus_row in g_value.groupby(by=["voltage", "dc"]):
lon_bus = np.round(station_point_x + v_it * delta_lon, precision)
lat_bus = np.round(station_point_y + v_it * delta_lat, precision)
bus_data = [
n_buses, # "bus_id"
g_name, # "station_id"
v_name[0], # "voltage"
bus_row["dc"].all(), # "dc"
"|".join(bus_row["symbol"].unique()), # "symbol"
bus_row["under_construction"].any(), # "under_construction"
"|".join(bus_row["tag_substation"].unique()), # "tag_substation"
bus_row["tag_area"].sum(), # "tag_area"
lon_bus, # "lon"
lat_bus, # "lat"
bus_row["country"].iloc[0], # "country"
Point(lon_bus, lat_bus), # "geometry"
]
# add the bus
buses_clean.append(bus_data)
# increase counters
v_it += 1
n_buses += 1
# names of the columns
buses_clean_columns = [
"bus_id",
"station_id",
"voltage",
"dc",
"symbol",
"under_construction",
"tag_substation",
"tag_area",
"x",
"y",
"country",
# "is_dclink_boundary_point",
"geometry",
]
gdf_buses_clean = gpd.GeoDataFrame(
buses_clean, columns=buses_clean_columns
).set_crs(crs=buses.crs, inplace=True)
return gdf_buses_clean
def get_ac_frequency(df, fr_col="tag_frequency"):
"""
# Function to define a default frequency value.
Attempts to find the most usual non-zero frequency across the
dataframe; 50 Hz is assumed as a back-up value
"""
# Initialize a default frequency value
ac_freq_default = 50
grid_freq_levels = df[fr_col].value_counts(sort=True, dropna=True)
if not grid_freq_levels.empty:
# AC lines frequency shouldn't be 0Hz
ac_freq_levels = grid_freq_levels.loc[
grid_freq_levels.index.get_level_values(0) != "0"
]
ac_freq_default = ac_freq_levels.index.get_level_values(0)[0]
return ac_freq_default
def get_transformers(buses, lines):
"""
Function to create fake transformer lines that connect buses of the same
station_id at different voltage.
"""
ac_freq = get_ac_frequency(lines)
df_transformers = []
# Transformers should be added between AC buses only
buses_ac = buses[buses["dc"] != True]
for g_name, g_value in buses_ac.sort_values("voltage", ascending=True).groupby(
by="station_id"
):
# note: by construction there cannot be more that two buses with the same station_id and same voltage
n_voltages = len(g_value)
if n_voltages > 1:
for id in range(n_voltages - 1):
# when g_value has more than one node, it means that there are multiple voltages for the same bus
transformer_geometry = LineString(
[g_value.geometry.iloc[id], g_value.geometry.iloc[id + 1]]
)
transformer_data = [
f"transf_{g_name}_{id}", # "line_id"
g_value["bus_id"].iloc[id], # "bus0"
g_value["bus_id"].iloc[id + 1], # "bus1"
g_value.voltage.iloc[id], # "voltage_bus0"
g_value.voltage.iloc[id + 1], # "voltage_bus0"
g_value.country.iloc[id], # "country"
transformer_geometry, # "geometry"
]
df_transformers.append(transformer_data)
# name of the columns
transformers_columns = [
"transformer_id",
"bus0",
"bus1",
"voltage_bus0",
"voltage_bus1",
"country",
"geometry",
]
df_transformers = gpd.GeoDataFrame(df_transformers, columns=transformers_columns)
if not df_transformers.empty:
init_index = 0 if lines.empty else lines.index[-1] + 1
df_transformers.set_index(init_index + df_transformers.index, inplace=True)
# update line endings
df_transformers = line_endings_to_bus_conversion(df_transformers)
df_transformers.drop(columns=["bounds", "bus_0_coors", "bus_1_coors"], inplace=True)
gdf_transformers = gpd.GeoDataFrame(df_transformers)
gdf_transformers.crs = GEO_CRS
return gdf_transformers
def _find_closest_bus(row, buses, distance_crs, tol=5000):
"""
Find the closest bus to a given bus based on geographical distance and
country.
Parameters:
- row: The bus_id of the bus to find the closest bus for.
- buses: A GeoDataFrame containing information about all the buses.
- distance_crs: The coordinate reference system to use for distance calculations.
- tol: The tolerance distance within which a bus is considered closest (default: 5000).
Returns:
- closest_bus_id: The bus_id of the closest bus, or None if no bus is found within the distance and same country.
"""
gdf_buses = buses.copy()
gdf_buses = gdf_buses.to_crs(distance_crs)
# Get the geometry of the bus with bus_id = link_bus_id
bus = gdf_buses[gdf_buses["bus_id"] == row]
bus_geom = bus.geometry.values[0]
gdf_buses_filtered = gdf_buses[gdf_buses["dc"] == False]
# Find the closest point in the filtered buses
nearest_geom = nearest_points(bus_geom, gdf_buses_filtered.union_all())[1]
# Get the bus_id of the closest bus
closest_bus = gdf_buses_filtered.loc[gdf_buses["geometry"] == nearest_geom]
# check if closest_bus_id is within the distance
within_distance = (
closest_bus.to_crs(distance_crs).distance(bus.to_crs(distance_crs), align=False)
).values[0] <= tol
in_same_country = closest_bus.country.values[0] == bus.country.values[0]
if within_distance and in_same_country:
closest_bus_id = closest_bus.bus_id.values[0]
else:
closest_bus_id = None
return closest_bus_id
def _get_converters(buses, links, distance_crs):
"""
Get the converters for the given buses and links. Connecting link endings
to closest AC bus.
Parameters:
- buses (pandas.DataFrame): DataFrame containing information about buses.
- links (pandas.DataFrame): DataFrame containing information about links.
Returns:
- gdf_converters (geopandas.GeoDataFrame): GeoDataFrame containing information about converters.
"""
converters = []
for idx, row in links.iterrows():
for conv in range(2):
link_end = row[f"bus{conv}"]
# HVDC Gotland is connected to 130 kV grid, closest HVAC bus is further away
closest_bus = _find_closest_bus(link_end, buses, distance_crs, tol=40000)
if closest_bus is None:
continue
converter_id = f"converter/{row['link_id']}_{conv}"
converter_geometry = LineString(
[
buses[buses["bus_id"] == link_end].geometry.values[0],
buses[buses["bus_id"] == closest_bus].geometry.values[0],
]
)
logger.info(
f"Added converter #{conv+1}/2 for link {row['link_id']}:{converter_id}."
)
converter_data = [
converter_id, # "line_id"
link_end, # "bus0"
closest_bus, # "bus1"
row["voltage"], # "voltage"
row["p_nom"], # "p_nom"
False, # "underground"
False, # "under_construction"
buses[buses["bus_id"] == closest_bus].country.values[0], # "country"
converter_geometry, # "geometry"
]
# Create the converter
converters.append(converter_data)
conv_columns = [
"converter_id",
"bus0",
"bus1",
"voltage",
"p_nom",
"underground",
"under_construction",
"country",
"geometry",
]
gdf_converters = gpd.GeoDataFrame(
converters, columns=conv_columns, crs=GEO_CRS
).reset_index()
return gdf_converters
def connect_stations_same_station_id(lines, buses):
"""
Function to create fake links between substations with the same
substation_id.
"""
ac_freq = get_ac_frequency(lines)
station_id_list = buses.station_id.unique()
add_lines = []
from shapely.geometry import LineString
for s_id in station_id_list:
buses_station_id = buses[buses.station_id == s_id]
if len(buses_station_id) > 1:
for b_it in range(1, len(buses_station_id)):
line_geometry = LineString(
[
buses_station_id.geometry.iloc[0],
buses_station_id.geometry.iloc[b_it],
]
)
line_bounds = line_geometry.bounds
line_data = [
f"link{buses_station_id}_{b_it}", # "line_id"
buses_station_id.index[0], # "bus0"
buses_station_id.index[b_it], # "bus1"
400000, # "voltage"
1, # "circuits"
0.0, # "length"
False, # "underground"
False, # "under_construction"
"transmission", # "tag_type"
ac_freq, # "tag_frequency"
buses_station_id.country.iloc[0], # "country"
line_geometry, # "geometry"
line_bounds, # "bounds"
buses_station_id.geometry.iloc[0], # "bus_0_coors"
buses_station_id.geometry.iloc[b_it], # "bus_1_coors"
buses_station_id.lon.iloc[0], # "bus0_lon"
buses_station_id.lat.iloc[0], # "bus0_lat"
buses_station_id.lon.iloc[b_it], # "bus1_lon"
buses_station_id.lat.iloc[b_it], # "bus1_lat"
]
add_lines.append(line_data)
# name of the columns
add_lines_columns = [
"line_id",
"bus0",
"bus1",
"voltage",
"circuits",
"length",
"underground",
"under_construction",
"tag_type",
"tag_frequency",
"country",
"geometry",
"bounds",
"bus_0_coors",
"bus_1_coors",
"bus0_lon",
"bus0_lat",
"bus1_lon",
"bus1_lat",
]
df_add_lines = gpd.GeoDataFrame(pd.concat(add_lines), columns=add_lines_columns)
lines = pd.concat([lines, df_add_lines], ignore_index=True)
return lines
def set_lv_substations(buses):
"""
Function to set what nodes are lv, thereby setting substation_lv The
current methodology is to set lv nodes to buses where multiple voltage
level are found, hence when the station_id is duplicated.
"""
# initialize column substation_lv to true
buses["substation_lv"] = True
# For each station number with multiple buses make lowest voltage `substation_lv = TRUE`
bus_with_stations_duplicates = buses[
buses.station_id.duplicated(keep=False)
].sort_values(by=["station_id", "voltage"])
lv_bus_at_station_duplicates = (
buses[buses.station_id.duplicated(keep=False)]
.sort_values(by=["station_id", "voltage"])
.drop_duplicates(subset=["station_id"])
)
# Set all buses with station duplicates "False"
buses.loc[bus_with_stations_duplicates.index, "substation_lv"] = False
# Set lv_buses with station duplicates "True"
buses.loc[lv_bus_at_station_duplicates.index, "substation_lv"] = True
return buses
def merge_stations_lines_by_station_id_and_voltage(
lines, links, buses, distance_crs, tol=5000
):
"""
Function to merge close stations and adapt the line datasets to adhere to
the merged dataset.
"""
logger.info(" - Setting substation ids with tolerance of %.2f m" % (tol))
# bus types (AC != DC)
buses_ac = buses[buses["dc"] == False].reset_index()
buses_dc = buses[buses["dc"] == True].reset_index()
# set_substations_ids(buses, distance_crs, tol=tol)
set_substations_ids(buses_ac, distance_crs, tol=tol)
set_substations_ids(buses_dc, distance_crs, tol=tol)
logger.info(" - Merging substations with the same id")
# merge buses with same station id and voltage
if not buses.empty:
buses_ac = merge_stations_same_station_id(buses_ac)
buses_dc = merge_stations_same_station_id(buses_dc)
buses_dc["bus_id"] = buses_ac["bus_id"].max() + buses_dc["bus_id"] + 1
buses = pd.concat([buses_ac, buses_dc], ignore_index=True)
set_substations_ids(buses, distance_crs, tol=tol)
logger.info(" - Specifying the bus ids of the line endings")
# set the bus ids to the line dataset
lines, buses = set_lines_ids(lines, buses, distance_crs)
links, buses = set_lines_ids(links, buses, distance_crs)
# drop lines starting and ending in the same node
lines.drop(lines[lines["bus0"] == lines["bus1"]].index, inplace=True)
links.drop(links[links["bus0"] == links["bus1"]].index, inplace=True)
# update line endings
lines = line_endings_to_bus_conversion(lines)
links = line_endings_to_bus_conversion(links)
# set substation_lv
set_lv_substations(buses)
# reset index
lines.reset_index(drop=True, inplace=True)
links.reset_index(drop=True, inplace=True)
return lines, links, buses
def _split_linestring_by_point(linestring, points):
"""
Function to split a linestring geometry by multiple inner points.
Parameters
----------
lstring : LineString
Linestring of the line to be split
points : list
List of points to split the linestring
Return
------
list_lines : list
List of linestring to split the line
"""
list_linestrings = [linestring]
for p in points:
# execute split to all lines and store results
temp_list = [split(l, p) for l in list_linestrings]
# nest all geometries
list_linestrings = [lstring for tval in temp_list for lstring in tval.geoms]
return list_linestrings
def fix_overpassing_lines(lines, buses, distance_crs, tol=1):
"""
Fix overpassing lines by splitting them at nodes within a given tolerance,
to include the buses being overpassed.
Parameters:
- lines (GeoDataFrame): The lines to be fixed.
- buses (GeoDataFrame): The buses representing nodes.
- distance_crs (str): The coordinate reference system (CRS) for distance calculations.
- tol (float): The tolerance distance in meters for determining if a bus is within a line.
Returns:
- lines (GeoDataFrame): The fixed lines.
- buses (GeoDataFrame): The buses representing nodes.
"""
lines_to_add = [] # list of lines to be added
lines_to_split = [] # list of lines that have been split
lines_epsgmod = lines.to_crs(distance_crs)
buses_epsgmod = buses.to_crs(distance_crs)
# set tqdm options for substation ids
tqdm_kwargs_substation_ids = dict(
ascii=False,
unit=" lines",
total=lines.shape[0],
desc="Verify lines overpassing nodes ",
)
for l in tqdm(lines.index, **tqdm_kwargs_substation_ids):
# bus indices being within tolerance from the line
bus_in_tol_epsg = buses_epsgmod[
buses_epsgmod.geometry.distance(lines_epsgmod.geometry.loc[l]) <= tol
]
# Get boundary points
endpoint0 = lines_epsgmod.geometry.loc[l].boundary.geoms[0]
endpoint1 = lines_epsgmod.geometry.loc[l].boundary.geoms[1]
# Calculate distances
dist_to_ep0 = bus_in_tol_epsg.geometry.distance(endpoint0)
dist_to_ep1 = bus_in_tol_epsg.geometry.distance(endpoint1)
# exclude endings of the lines
bus_in_tol_epsg = bus_in_tol_epsg[(dist_to_ep0 > tol) | (dist_to_ep1 > tol)]
if not bus_in_tol_epsg.empty:
# add index of line to split
lines_to_split.append(l)
buses_locs = buses.geometry.loc[bus_in_tol_epsg.index]
# get new line geometries
new_geometries = _split_linestring_by_point(lines.geometry[l], buses_locs)
n_geoms = len(new_geometries)
# create temporary copies of the line
df_append = gpd.GeoDataFrame([lines.loc[l]] * n_geoms)
# update geometries
df_append["geometry"] = new_geometries
# update name of the line if there are multiple line segments
df_append["line_id"] = [
str(df_append["line_id"].iloc[0])
+ (f"-{letter}" if n_geoms > 1 else "")
for letter in string.ascii_lowercase[:n_geoms]
]
lines_to_add.append(df_append)
if not lines_to_add:
return lines, buses
df_to_add = gpd.GeoDataFrame(pd.concat(lines_to_add, ignore_index=True))
df_to_add.set_crs(lines.crs, inplace=True)
df_to_add.set_index(lines.index[-1] + df_to_add.index, inplace=True)
# update length
df_to_add["length"] = df_to_add.to_crs(distance_crs).geometry.length
# update line endings
df_to_add = line_endings_to_bus_conversion(df_to_add)
# remove original lines
lines.drop(lines_to_split, inplace=True)
lines = df_to_add if lines.empty else pd.concat([lines, df_to_add])
lines = gpd.GeoDataFrame(lines.reset_index(drop=True), crs=lines.crs)
return lines, buses
def build_network(inputs, outputs):
logger.info("Reading input data.")
buses = gpd.read_file(inputs["substations"])
lines = gpd.read_file(inputs["lines"])
links = gpd.read_file(inputs["links"])
lines = line_endings_to_bus_conversion(lines)
links = line_endings_to_bus_conversion(links)
logger.info(
"Fixing lines overpassing nodes: Connecting nodes and splittling lines."
)
lines, buses = fix_overpassing_lines(lines, buses, DISTANCE_CRS, tol=1)
# Merge buses with same voltage and within tolerance
logger.info(f"Aggregating close substations with a tolerance of {BUS_TOL} m")
lines, links, buses = merge_stations_lines_by_station_id_and_voltage(
lines, links, buses, DISTANCE_CRS, BUS_TOL
)
# Recalculate lengths of lines
utm = lines.estimate_utm_crs(datum_name="WGS 84")
lines["length"] = lines.to_crs(utm).length
links["length"] = links.to_crs(utm).length
transformers = get_transformers(buses, lines)
converters = _get_converters(buses, links, DISTANCE_CRS)
logger.info("Saving outputs")
### Convert output to pypsa-eur friendly format
# Rename "substation" in buses["symbol"] to "Substation"
buses["symbol"] = buses["symbol"].replace({"substation": "Substation"})
# Drop unnecessary index column and set respective element ids as index
lines.set_index("line_id", inplace=True)
if not links.empty:
links.set_index("link_id", inplace=True)
converters.set_index("converter_id", inplace=True)
transformers.set_index("transformer_id", inplace=True)
buses.set_index("bus_id", inplace=True)
# Convert voltages from V to kV
lines["voltage"] = lines["voltage"] / 1000
if not links.empty:
links["voltage"] = links["voltage"] / 1000
if not converters.empty:
converters["voltage"] = converters["voltage"] / 1000
transformers["voltage_bus0"], transformers["voltage_bus1"] = (
transformers["voltage_bus0"] / 1000,
transformers["voltage_bus1"] / 1000,
)
buses["voltage"] = buses["voltage"] / 1000
# Convert 'true' and 'false' to 't' and 'f'
lines = lines.replace({True: "t", False: "f"})
links = links.replace({True: "t", False: "f"})
converters = converters.replace({True: "t", False: "f"})
buses = buses.replace({True: "t", False: "f"})
# Change column orders
lines = lines[LINES_COLUMNS]
if not links.empty:
links = links[LINKS_COLUMNS]
else:
links = pd.DataFrame(columns=["link_id"] + LINKS_COLUMNS)
links.set_index("link_id", inplace=True)
transformers = transformers[TRANSFORMERS_COLUMNS]
# Export to csv for base_network
buses.to_csv(outputs["substations"], quotechar="'")
lines.to_csv(outputs["lines"], quotechar="'")
links.to_csv(outputs["links"], quotechar="'")
converters.to_csv(outputs["converters"], quotechar="'")
transformers.to_csv(outputs["transformers"], quotechar="'")
# Export to GeoJSON for quick validations
buses.to_file(outputs["substations_geojson"])
lines.to_file(outputs["lines_geojson"])
links.to_file(outputs["links_geojson"])
converters.to_file(outputs["converters_geojson"])
transformers.to_file(outputs["transformers_geojson"])
return None
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_osm_network")
configure_logging(snakemake)
set_scenario_config(snakemake)
countries = snakemake.config["countries"]
build_network(snakemake.input, snakemake.output)

View File

@ -254,7 +254,7 @@ def prepare_building_stock_data():
index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()])
averaged_data.index = index
averaged_data["estimated"] = 1
if ct not in area_tot.index.levels[0]:
if ct not in area_tot.index.unique(0):
area_tot = pd.concat([area_tot, averaged_data], sort=True)
else:
area_tot.loc[averaged_data.index] = averaged_data

1807
scripts/clean_osm_data.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,7 @@ Inputs
- ``resources/regions_offshore_elec_s{simpl}.geojson``: confer :ref:`simplify`
- ``resources/busmap_elec_s{simpl}.csv``: confer :ref:`simplify`
- ``networks/elec_s{simpl}.nc``: confer :ref:`simplify`
- ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``: optional input
- ``data/custom_busmap_elec_s{simpl}_{clusters}_{base_network}.csv``: optional input
Outputs
-------
@ -511,9 +511,7 @@ if __name__ == "__main__":
# Fast-path if no clustering is necessary
busmap = n.buses.index.to_series()
linemap = n.lines.index.to_series()
clustering = pypsa.clustering.spatial.Clustering(
n, busmap, linemap, linemap, pd.Series(dtype="O")
)
clustering = pypsa.clustering.spatial.Clustering(n, busmap, linemap)
else:
Nyears = n.snapshot_weightings.objective.sum() / 8760

View File

@ -40,9 +40,9 @@ def calculate_costs(n, label, costs):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
costs.columns.levels[0],
costs.columns.levels[1],
costs.columns.levels[2],
costs.columns.unique(0),
costs.columns.unique(1),
costs.columns.unique(2),
investments,
],
names=costs.columns.names[:3] + ["year"],
@ -339,9 +339,9 @@ def calculate_supply_energy(n, label, supply_energy):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
supply_energy.columns.levels[0],
supply_energy.columns.levels[1],
supply_energy.columns.levels[2],
supply_energy.columns.unique(0),
supply_energy.columns.unique(1),
supply_energy.columns.unique(2),
investments,
],
names=supply_energy.columns.names[:3] + ["year"],

View File

@ -0,0 +1,146 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import logging
import os
import pandas as pd
import pypsa
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__)
BUSES_COLUMNS = [
"bus_id",
"voltage",
"dc",
"symbol",
"under_construction",
"x",
"y",
"country",
"geometry",
]
LINES_COLUMNS = [
"line_id",
"bus0",
"bus1",
"voltage",
"circuits",
"length",
"underground",
"under_construction",
"geometry",
]
LINKS_COLUMNS = [
"link_id",
"bus0",
"bus1",
"voltage",
"p_nom",
"length",
"underground",
"under_construction",
"geometry",
]
TRANSFORMERS_COLUMNS = [
"transformer_id",
"bus0",
"bus1",
"voltage_bus0",
"voltage_bus1",
"geometry",
]
CONVERTERS_COLUMNS = [
"converter_id",
"bus0",
"bus1",
"voltage",
"geometry",
]
def export_clean_csv(df, columns, output_file):
"""
Export a cleaned DataFrame to a CSV file.
Args:
df (pandas.DataFrame): The DataFrame to be exported.
columns (list): A list of column names to include in the exported CSV file.
output_file (str): The path to the output CSV file.
Returns:
None
"""
rename_dict = {
"Bus": "bus_id",
"Line": "line_id",
"Link": "link_id",
"Transformer": "transformer_id",
"v_nom": "voltage",
"num_parallel": "circuits",
}
if "converter_id" in columns:
rename_dict["Link"] = "converter_id"
df.reset_index().rename(columns=rename_dict).loc[:, columns].replace(
{True: "t", False: "f"}
).to_csv(output_file, index=False, quotechar="'")
return None
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("prepare_osm_network_release")
configure_logging(snakemake)
set_scenario_config(snakemake)
network = pypsa.Network(snakemake.input.base_network)
network.buses["dc"] = network.buses.pop("carrier").map({"DC": "t", "AC": "f"})
network.lines.length = network.lines.length * 1e3
network.links.length = network.links.length * 1e3
# Export to clean csv for release
logger.info(f"Exporting {len(network.buses)} buses to %s", snakemake.output.buses)
export_clean_csv(network.buses, BUSES_COLUMNS, snakemake.output.buses)
logger.info(
f"Exporting {len(network.transformers)} transformers to %s",
snakemake.output.transformers,
)
export_clean_csv(
network.transformers, TRANSFORMERS_COLUMNS, snakemake.output.transformers
)
logger.info(f"Exporting {len(network.lines)} lines to %s", snakemake.output.lines)
export_clean_csv(network.lines, LINES_COLUMNS, snakemake.output.lines)
# Boolean that specifies if link element is a converter
is_converter = network.links.index.str.startswith("conv") == True
logger.info(
f"Exporting {len(network.links[~is_converter])} links to %s",
snakemake.output.links,
)
export_clean_csv(
network.links[~is_converter], LINKS_COLUMNS, snakemake.output.links
)
logger.info(
f"Exporting {len(network.links[is_converter])} converters to %s",
snakemake.output.converters,
)
export_clean_csv(
network.links[is_converter], CONVERTERS_COLUMNS, snakemake.output.converters
)
logger.info("Export of OSM network for release complete.")

View File

@ -30,6 +30,9 @@ from build_energy_totals import (
build_eurostat_co2,
)
from build_transport_demand import transport_degree_factor
from definitions.heat_sector import HeatSector
from definitions.heat_system import HeatSystem
from definitions.heat_system_type import HeatSystemType
from networkx.algorithms import complement
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from prepare_network import maybe_adjust_costs_and_potentials
@ -37,10 +40,6 @@ from pypsa.geo import haversine_pts
from pypsa.io import import_components_from_dataframe
from scipy.stats import beta
from scripts.definitions.heat_sector import HeatSector
from scripts.definitions.heat_system import HeatSystem
from scripts.definitions.heat_system_type import HeatSystemType
spatial = SimpleNamespace()
logger = logging.getLogger(__name__)
@ -3196,6 +3195,38 @@ def add_biomass(n, costs):
marginal_cost=costs.at["BioSNG", "VOM"] * costs.at["BioSNG", "efficiency"],
)
if options["bioH2"]:
name = (
pd.Index(spatial.biomass.nodes)
+ " "
+ pd.Index(spatial.h2.nodes.str.replace(" H2", ""))
)
n.madd(
"Link",
name,
suffix=" solid biomass to hydrogen CC",
bus0=spatial.biomass.nodes,
bus1=spatial.h2.nodes,
bus2=spatial.co2.nodes,
bus3="co2 atmosphere",
carrier="solid biomass to hydrogen",
efficiency=costs.at["solid biomass to hydrogen", "efficiency"],
efficiency2=costs.at["solid biomass", "CO2 intensity"]
* options["cc_fraction"],
efficiency3=-costs.at["solid biomass", "CO2 intensity"]
* options["cc_fraction"],
p_nom_extendable=True,
capital_cost=costs.at["solid biomass to hydrogen", "fixed"]
* costs.at["solid biomass to hydrogen", "efficiency"]
+ costs.at["biomass CHP capture", "fixed"]
* costs.at["solid biomass", "CO2 intensity"],
overnight_cost=costs.at["solid biomass to hydrogen", "investment"]
* costs.at["solid biomass to hydrogen", "efficiency"]
+ costs.at["biomass CHP capture", "investment"]
* costs.at["solid biomass", "CO2 intensity"],
marginal_cost=0.0,
)
def add_industry(n, costs):
logger.info("Add industrial demand")

View File

@ -0,0 +1,152 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Retrieve OSM data for the specified country using the overpass API and save it
to the specified output files.
Note that overpass requests are based on a fair
use policy. `retrieve_osm_data` is meant to be used in a way that respects this
policy by fetching the needed data once, only.
"""
import json
import logging
import os
import time
import requests
from _helpers import ( # set_scenario_config,; update_config_from_wildcards,; update_config_from_wildcards,
configure_logging,
set_scenario_config,
)
logger = logging.getLogger(__name__)
def retrieve_osm_data(
country,
output,
features=[
"cables_way",
"lines_way",
"links_relation",
"substations_way",
"substations_relation",
],
):
"""
Retrieve OSM data for the specified country and save it to the specified
output files.
Parameters
----------
country : str
The country code for which the OSM data should be retrieved.
output : dict
A dictionary mapping feature names to the corresponding output file
paths. Saving the OSM data to .json files.
features : list, optional
A list of OSM features to retrieve. The default is [
"cables_way",
"lines_way",
"substations_way",
"substations_relation",
].
"""
# Overpass API endpoint URL
overpass_url = "https://overpass-api.de/api/interpreter"
features_dict = {
"cables_way": 'way["power"="cable"]',
"lines_way": 'way["power"="line"]',
"links_relation": 'relation["route"="power"]["frequency"="0"]',
"substations_way": 'way["power"="substation"]',
"substations_relation": 'relation["power"="substation"]',
}
wait_time = 5
for f in features:
if f not in features_dict:
logger.info(
f"Invalid feature: {f}. Supported features: {list(features_dict.keys())}"
)
raise ValueError(
f"Invalid feature: {f}. Supported features: {list(features_dict.keys())}"
)
retries = 3
for attempt in range(retries):
logger.info(
f" - Fetching OSM data for feature '{f}' in {country} (Attempt {attempt+1})..."
)
# Build the overpass query
op_area = f'area["ISO3166-1"="{country}"]'
op_query = f"""
[out:json];
{op_area}->.searchArea;
(
{features_dict[f]}(area.searchArea);
);
out body geom;
"""
try:
# Send the request
response = requests.post(overpass_url, data=op_query)
response.raise_for_status() # Raise HTTPError for bad responses
data = response.json()
filepath = output[f]
parentfolder = os.path.dirname(filepath)
if not os.path.exists(parentfolder):
os.makedirs(parentfolder)
with open(filepath, mode="w") as f:
json.dump(response.json(), f, indent=2)
logger.info(" - Done.")
break # Exit the retry loop on success
except (json.JSONDecodeError, requests.exceptions.RequestException) as e:
logger.error(f"Error for feature '{f}' in country {country}: {e}")
logger.debug(
f"Response text: {response.text if response else 'No response'}"
)
if attempt < retries - 1:
wait_time += 15
logger.info(f"Waiting {wait_time} seconds before retrying...")
time.sleep(wait_time)
else:
logger.error(
f"Failed to retrieve data for feature '{f}' in country {country} after {retries} attempts."
)
except Exception as e:
# For now, catch any other exceptions and log them. Treat this
# the same as a RequestException and try to run again two times.
logger.error(
f"Unexpected error for feature '{f}' in country {country}: {e}"
)
if attempt < retries - 1:
wait_time += 10
logger.info(f"Waiting {wait_time} seconds before retrying...")
time.sleep(wait_time)
else:
logger.error(
f"Failed to retrieve data for feature '{f}' in country {country} after {retries} attempts."
)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("retrieve_osm_data", country="BE")
configure_logging(snakemake)
set_scenario_config(snakemake)
# Retrieve the OSM data
country = snakemake.wildcards.country
output = snakemake.output
retrieve_osm_data(country, output)

View File

@ -108,7 +108,7 @@ from scipy.sparse.csgraph import connected_components, dijkstra
logger = logging.getLogger(__name__)
def simplify_network_to_380(n):
def simplify_network_to_380(n, linetype_380):
"""
Fix all lines to a voltage level of 380 kV and remove all transformers.
@ -132,8 +132,8 @@ def simplify_network_to_380(n):
trafo_map = pd.Series(n.transformers.bus1.values, n.transformers.bus0.values)
trafo_map = trafo_map[~trafo_map.index.duplicated(keep="first")]
several_trafo_b = trafo_map.isin(trafo_map.index)
trafo_map[several_trafo_b] = trafo_map[several_trafo_b].map(trafo_map)
while (several_trafo_b := trafo_map.isin(trafo_map.index)).any():
trafo_map[several_trafo_b] = trafo_map[several_trafo_b].map(trafo_map)
missing_buses_i = n.buses.index.difference(trafo_map.index)
missing = pd.Series(missing_buses_i, missing_buses_i)
trafo_map = pd.concat([trafo_map, missing])
@ -301,19 +301,11 @@ def simplify_links(
# Only span graph over the DC link components
G = n.graph(branch_components=["Link"])
def split_links(nodes):
def split_links(nodes, added_supernodes):
nodes = frozenset(nodes)
seen = set()
# Corsica substation
node_corsica = find_closest_bus(
n,
x=9.44802,
y=42.52842,
tol=2000, # Tolerance needed to only return the bus if the region is actually modelled
)
# Supernodes are endpoints of links, identified by having lass then two neighbours or being an AC Bus
# An example for the latter is if two different links are connected to the same AC bus.
supernodes = {
@ -322,7 +314,7 @@ def simplify_links(
if (
(len(G.adj[m]) < 2 or (set(G.adj[m]) - nodes))
or (n.buses.loc[m, "carrier"] == "AC")
or (m == node_corsica)
or (m in added_supernodes)
)
}
@ -359,8 +351,21 @@ def simplify_links(
0.0, index=n.buses.index, columns=list(connection_costs_per_link)
)
node_corsica = find_closest_bus(
n,
x=9.44802,
y=42.52842,
tol=2000, # Tolerance needed to only return the bus if the region is actually modelled
)
added_supernodes = []
if node_corsica is not None:
added_supernodes.append(node_corsica)
for lbl in labels.value_counts().loc[lambda s: s > 2].index:
for b, buses, links in split_links(labels.index[labels == lbl]):
for b, buses, links in split_links(
labels.index[labels == lbl], added_supernodes
):
if len(buses) <= 2:
continue
@ -421,6 +426,9 @@ def simplify_links(
logger.debug("Collecting all components using the busmap")
# Change carrier type of all added super_nodes to "AC"
n.buses.loc[added_supernodes, "carrier"] = "AC"
_aggregate_and_move_components(
n,
busmap,
@ -592,7 +600,8 @@ if __name__ == "__main__":
# remove integer outputs for compatibility with PyPSA v0.26.0
n.generators.drop("n_mod", axis=1, inplace=True, errors="ignore")
n, trafo_map = simplify_network_to_380(n)
linetype_380 = snakemake.config["lines"]["types"][380]
n, trafo_map = simplify_network_to_380(n, linetype_380)
technology_costs = load_costs(
snakemake.input.tech_costs,

View File

@ -43,6 +43,7 @@ from _helpers import (
set_scenario_config,
update_config_from_wildcards,
)
from prepare_sector_network import get
from pypsa.descriptors import get_activity_mask
from pypsa.descriptors import get_switchable_as_dense as get_as_dense
@ -287,15 +288,22 @@ def add_solar_potential_constraints(n, config):
n.model.add_constraints(lhs <= rhs, name="solar_potential")
def add_co2_sequestration_limit(n, limit=200):
def add_co2_sequestration_limit(n, limit_dict):
"""
Add a global constraint on the amount of Mt CO2 that can be sequestered.
"""
if not n.investment_periods.empty:
periods = n.investment_periods
names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods])
limit = pd.Series(
{
f"co2_sequestration_limit-{period}": limit_dict.get(period, 200)
for period in periods
}
)
names = limit.index
else:
limit = get(limit_dict, int(snakemake.wildcards.planning_horizons))
periods = [np.nan]
names = pd.Index(["co2_sequestration_limit"])
@ -515,8 +523,8 @@ def prepare_network(
n = add_max_growth(n)
if n.stores.carrier.eq("co2 sequestered").any():
limit = co2_sequestration_potential
add_co2_sequestration_limit(n, limit=limit)
limit_dict = co2_sequestration_potential
add_co2_sequestration_limit(n, limit_dict=limit_dict)
return n
@ -1117,19 +1125,20 @@ def solve_network(n, config, solving, **kwargs):
return n
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"solve_sector_network",
"solve_sector_network_perfect",
configfiles="../config/test/config.perfect.yaml",
simpl="",
opts="",
clusters="37",
clusters="5",
ll="v1.0",
sector_opts="CO2L0-1H-T-H-B-I-A-dist1",
planning_horizons="2030",
sector_opts="",
# planning_horizons="2030",
)
configure_logging(snakemake)
set_scenario_config(snakemake)