Clustering: build renewable profiles and add all assets after clustering (#1201)

* Cluster first: build renewable profiles and add all assets after clustering

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* correction: pass landfall_lengths through functions

* assign landfall_lenghts correctly

* remove parameter add_land_use_constraint

* fix network_dict

* calculate distance to shoreline, remove underwater_fraction

* adjust simplification parameter to exclude Crete from offshore wind connections

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* remove unused geth2015 hydro capacities

* removing remaining traces of {simpl} wildcard

* add release notes and update workflow graphics

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: lisazeyen <lisa.zeyen@web.de>
This commit is contained in:
Fabian Neumann 2024-09-13 15:37:01 +02:00 committed by GitHub
parent 40351fbf9b
commit 013b705ee4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
87 changed files with 2657 additions and 2794 deletions

2
.gitignore vendored
View File

@ -73,3 +73,5 @@ d1gam3xoknrgr2.cloudfront.net/
*.ipynb
merger-todos.md
*.html

View File

@ -39,7 +39,6 @@ localrules:
wildcard_constraints:
simpl="[a-zA-Z0-9]*",
clusters="[0-9]+(m|c)?|all",
ll=r"(v|c)([0-9\.]+|opt)",
opts=r"[-+a-zA-Z0-9\.]*",

View File

@ -37,8 +37,6 @@ foresight: overnight
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario
# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html
scenario:
simpl:
- ''
ll:
- vopt
clusters:
@ -188,6 +186,7 @@ renewable:
max_shore_distance: 30000
excluder_resolution: 200
clip_p_max_pu: 1.e-2
landfall_length: 10
offwind-dc:
cutout: europe-2013-sarah3-era5
resource:
@ -205,6 +204,7 @@ renewable:
min_shore_distance: 30000
excluder_resolution: 200
clip_p_max_pu: 1.e-2
landfall_length: 10
offwind-float:
cutout: europe-2013-sarah3-era5
resource:
@ -225,6 +225,7 @@ renewable:
min_depth: 60
max_depth: 1000
clip_p_max_pu: 1.e-2
landfall_length: 10
solar:
cutout: europe-2013-sarah3-era5
resource:
@ -301,6 +302,7 @@ links:
p_max_pu: 1.0
p_nom_max: .inf
max_extension: 30000 #MW
length_factor: 1.25
under_construction: 'keep' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity for lines in grid extract
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#transmission_projects
@ -335,6 +337,9 @@ load:
scaling_factor: 1.0
fixed_year: false # false or year (e.g. 2013)
supplement_synthetic: true
distribution_key:
gdp: 0.6
population: 0.4
# docs
# TODO: PyPSA-Eur merge issue in prepare_sector_network.py
@ -849,16 +854,15 @@ clustering:
focus_weights: false
simplify_network:
to_substations: false
algorithm: kmeans # choose from: [hac, kmeans]
feature: solar+onwind-time
exclude_carriers: []
remove_stubs: true
remove_stubs_across_borders: true
remove_stubs_across_borders: false
cluster_network:
algorithm: kmeans
feature: solar+onwind-time
exclude_carriers: []
consider_efficiency_classes: false
hac_features:
- wnd100m
- influx_direct
exclude_carriers: []
consider_efficiency_classes: false
aggregation_strategies:
generators:
committable: any

View File

@ -10,8 +10,6 @@ run:
shared_cutouts: true
scenario:
simpl:
- ''
ll:
- vopt
clusters:

View File

@ -10,8 +10,6 @@ foresight: perfect
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario
# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html
scenario:
simpl:
- ''
ll:
- v1.0
clusters:

View File

@ -2,16 +2,14 @@
focus_weights,,,Optionally specify the focus weights for the clustering of countries. For instance: `DE: 0.8` will distribute 80% of all nodes to Germany and 20% to the rest of the countries.
simplify_network,,,
-- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones"
-- algorithm,str,"One of {kmeans, hac, modularity}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
-- remove stubs,bool,"{'true','false'}",Controls whether radial parts of the network should be recursively aggregated. Defaults to true.
-- remove_stubs_across_borders,bool,"{'true','false'}",Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true.
cluster_network,,,
-- algorithm,str,"One of {kmeans, hac}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
-- consider_efficiency_classes,bool,"{'true','false'}","Aggregated each carriers into the top 10-quantile (high), the bottom 90-quantile (low), and everything in between (medium)."
-- hac_features,list,"List of meteorological variables contained in the weather data cutout that should be considered for hierarchical clustering.",
exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
consider_efficiency_classes,bool,"{'true','false'}","Aggregated each carriers into the top 10-quantile (high), the bottom 90-quantile (low), and everything in between (medium)."
aggregation_strategies,,,
-- generators,,,
-- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."

1 Unit Values Description
2 focus_weights Optionally specify the focus weights for the clustering of countries. For instance: `DE: 0.8` will distribute 80% of all nodes to Germany and 20% to the rest of the countries.
3 simplify_network
4 -- to_substations bool {'true','false'} Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones
-- algorithm str One of {‘kmeans’, ‘hac’, ‘modularity‘}
-- feature str Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.
5 -- exclude_carriers list List of Str like [ 'solar', 'onwind'] or empy list [] List of carriers which will not be aggregated. If empty, all carriers will be aggregated.
6 -- remove stubs bool {'true','false'} Controls whether radial parts of the network should be recursively aggregated. Defaults to true.
7 -- remove_stubs_across_borders bool {'true','false'} Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true.
8 cluster_network
9 -- algorithm str One of {‘kmeans’, ‘hac’}
10 -- feature -- hac_features str list Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}. List of meteorological variables contained in the weather data cutout that should be considered for hierarchical clustering.
11 -- exclude_carriers exclude_carriers list List of Str like [ 'solar', 'onwind'] or empy list [] List of carriers which will not be aggregated. If empty, all carriers will be aggregated.
12 -- consider_efficiency_classes consider_efficiency_classes bool {'true','false'} Aggregated each carriers into the top 10-quantile (high), the bottom 90-quantile (low), and everything in between (medium).
13 aggregation_strategies
14 -- generators
15 -- -- {key} str {key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator.

View File

@ -27,7 +27,7 @@ custom_powerplants,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/
,,,
everywhere_powerplants,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to add to every node in the model with zero initial capacity. To be used in combination with ``extendable_carriers`` to allow for building conventional powerplants irrespective of existing locations."
,,,
conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in ``extendable_carriers``, the capacity is taken as a lower bound."
conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants_s_{clusters}.csv``. If an included carrier is also listed in ``extendable_carriers``, the capacity is taken as a lower bound."
,,,
renewable_carriers,--,"Any subset of {solar, onwind, offwind-ac, offwind-dc, offwind-float, hydro}",List of renewable generators to include in the model.
estimate_renewable_capacities,,,

Can't render this file because it has a wrong number of fields in line 7.

View File

@ -4,5 +4,5 @@ retrieve_databundle,bool,"{true, false}","Switch to retrieve databundle from zen
retrieve_cost_data,bool,"{true, false}","Switch to retrieve technology cost data from `technology-data repository <https://github.com/PyPSA/technology-data>`_."
build_cutout,bool,"{true, false}","Switch to enable the building of cutouts via the rule :mod:`build_cutout`."
retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`."
custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/busmaps/elec_s{simpl}_{clusters}_{base_network}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``. {base_network} is the name of the selected base_network in electricity, e.g. ``gridkit``, ``osm-prebuilt``, or ``osm-raw``."
custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/busmaps/base_s_{clusters}_{base_network}.csv`` which should have the same format as ``resources/busmap_base_s_{clusters}.csv``, i.e. the index should contain the buses of ``networks/base_s.nc``. {base_network} is the name of the selected base_network in electricity, e.g. ``gridkit``, ``osm-prebuilt``, or ``osm-raw``."
drop_leap_day,bool,"{true, false}","Switch to drop February 29 from all time-dependent data in leap years"

1 Unit Values Description
4 retrieve_cost_data bool {true, false} Switch to retrieve technology cost data from `technology-data repository <https://github.com/PyPSA/technology-data>`_.
5 build_cutout bool {true, false} Switch to enable the building of cutouts via the rule :mod:`build_cutout`.
6 retrieve_cutout bool {true, false} Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`.
7 custom_busmap bool {true, false} Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/busmaps/elec_s{simpl}_{clusters}_{base_network}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``. {base_network} is the name of the selected base_network in electricity, e.g. ``gridkit``, ``osm-prebuilt``, or ``osm-raw``. Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/busmaps/base_s_{clusters}_{base_network}.csv`` which should have the same format as ``resources/busmap_base_s_{clusters}.csv``, i.e. the index should contain the buses of ``networks/base_s.nc``. {base_network} is the name of the selected base_network in electricity, e.g. ``gridkit``, ``osm-prebuilt``, or ``osm-raw``.
8 drop_leap_day bool {true, false} Switch to drop February 29 from all time-dependent data in leap years

View File

@ -2,4 +2,5 @@
p_max_pu,--,"Value in [0.,1.]","Correction factor for link capacities ``p_nom``."
p_nom_max,MW,"float","Global upper limit for the maximum capacity of each extendable DC link."
max_extension,MW,"float","Upper limit for the extended capacity of each extendable DC link."
length_factor,--,float,"Correction factor to account for the fact that buses are *not* connected by links through air-line distance."
under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction."

1 Unit Values Description
2 p_max_pu -- Value in [0.,1.] Correction factor for link capacities ``p_nom``.
3 p_nom_max MW float Global upper limit for the maximum capacity of each extendable DC link.
4 max_extension MW float Upper limit for the extended capacity of each extendable DC link.
5 length_factor -- float Correction factor to account for the fact that buses are *not* connected by links through air-line distance.
6 under_construction -- One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity} Specifies how to handle lines which are currently under construction.

View File

@ -5,3 +5,6 @@ manual_adjustments,bool,"{true, false}","Whether to adjust the load data manuall
scaling_factor,--,float,"Global correction factor for the load time series."
fixed_year,--,Year or False,"To specify a fixed year for the load time series that deviates from the snapshots' year"
supplement_synthetic,bool,"{true, false}","Whether to supplement missing data for selected time period should be supplemented by synthetic data from https://zenodo.org/records/10820928."
distribution_key,--,--,"Distribution key for spatially disaggregating the per-country electricity demand data."
-- gdp,float,"[0, 1]","Weighting factor for the GDP data in the distribution key."
-- population,float,"[0, 1]","Weighting factor for the population data in the distribution key."

1 Unit Values Description
5 scaling_factor -- float Global correction factor for the load time series.
6 fixed_year -- Year or False To specify a fixed year for the load time series that deviates from the snapshots' year
7 supplement_synthetic bool {true, false} Whether to supplement missing data for selected time period should be supplemented by synthetic data from https://zenodo.org/records/10820928.
8 distribution_key -- -- Distribution key for spatially disaggregating the per-country electricity demand data.
9 -- gdp float [0, 1] Weighting factor for the GDP data in the distribution key.
10 -- population float [0, 1] Weighting factor for the population data in the distribution key.

View File

@ -15,3 +15,4 @@ max_depth,m,float,"Maximum sea water depth at which wind turbines can be build.
min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential."
max_shore_distance,m,float,"Maximum distance to the shore above which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential."
clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
landfall_length,km,float,"Fixed length of the cable connection that is onshorelandfall in km. If 'centroid', the length is calculated as the distance to centroid of the onshore bus."

1 Unit Values Description
15 min_shore_distance m float Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential.
16 max_shore_distance m float Maximum distance to the shore above which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential.
17 clip_p_max_pu p.u. float To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
18 landfall_length km float Fixed length of the cable connection that is onshorelandfall in km. If 'centroid', the length is calculated as the distance to centroid of the onshore bus.

View File

@ -15,3 +15,4 @@ max_depth,m,float,"Maximum sea water depth at which wind turbines can be build.
min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build."
max_shore_distance,m,float,"Maximum distance to the shore above which wind turbines cannot be build."
clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
landfall_length,km,float,"Fixed length of the cable connection that is onshorelandfall in km. If 'centroid', the length is calculated as the distance to centroid of the onshore bus."

1 Unit Values Description
15 min_shore_distance m float Minimum distance to the shore below which wind turbines cannot be build.
16 max_shore_distance m float Maximum distance to the shore above which wind turbines cannot be build.
17 clip_p_max_pu p.u. float To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
18 landfall_length km float Fixed length of the cable connection that is onshorelandfall in km. If 'centroid', the length is calculated as the distance to centroid of the onshore bus.

View File

@ -1,5 +1,4 @@
,Unit,Values,Description
simpl,--,cf. :ref:`simpl`,"List of ``{simpl}`` wildcards to run."
clusters,--,cf. :ref:`clusters`,"List of ``{clusters}`` wildcards to run."
ll,--,cf. :ref:`ll`,"List of ``{ll}`` wildcards to run."
opts,--,cf. :ref:`opts`,"List of ``{opts}`` wildcards to run."

1 Unit Values Description
simpl -- cf. :ref:`simpl` List of ``{simpl}`` wildcards to run.
2 clusters -- cf. :ref:`clusters` List of ``{clusters}`` wildcards to run.
3 ll -- cf. :ref:`ll` List of ``{ll}`` wildcards to run.
4 opts -- cf. :ref:`opts` List of ``{opts}`` wildcards to run.

View File

@ -87,7 +87,7 @@ facilitate running multiple scenarios through a single command
For each wildcard, a **list of values** is provided. The rule
``solve_all_elec_networks`` will trigger the rules for creating
``results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc`` for **all
``results/networks/base_s_{clusters}_elec_l{ll}_{opts}.nc`` for **all
combinations** of the provided wildcard values as defined by Python's
`itertools.product(...)
<https://docs.python.org/2/library/itertools.html#itertools.product>`__ function

Binary file not shown.

Before

Width:  |  Height:  |  Size: 122 KiB

After

Width:  |  Height:  |  Size: 436 KiB

View File

@ -35,7 +35,7 @@ For instance, an invocation to
.. code:: bash
.../pypsa-eur % snakemake -call results/networks/elec_s_128_ec_lvopt_.nc
.../pypsa-eur % snakemake -call results/networks/base_s_128_elec_lvopt_.nc
follows this dependency graph
@ -50,7 +50,7 @@ preceding rules which another rule takes as input data.
.. note::
The dependency graph was generated using
``snakemake --dag results/networks/elec_s_128_ec_lvopt_.nc -F | sed -n "/digraph/,/}/p" | dot -Tpng -o doc/img/intro-workflow.png``
``snakemake --dag results/networks/base_s_128_elec_lvopt_.nc -F | sed -n "/digraph/,/}/p" | dot -Tpng -o doc/img/intro-workflow.png``
For the use of ``snakemake``, it makes sense to familiarize yourself quickly
with the `basic tutorial

View File

@ -27,11 +27,12 @@ Then the process continues by calculating conventional power plant capacities, p
- :mod:`build_powerplants` for today's thermal power plant capacities using `powerplantmatching <https://github.com/PyPSA/powerplantmatching>`__ allocating these to the closest substation for each powerplant,
- :mod:`build_ship_raster` for building shipping traffic density,
- :mod:`determine_availability_matrix` for the land eligibility analysis of each cutout grid cell for PV, onshore and offshore wind,
- :mod:`build_renewable_profiles` for the hourly capacity factors and installation potentials constrained by land-use in each substation's Voronoi cell for PV, onshore and offshore wind, and
- :mod:`build_hydro_profile` for the hourly per-unit hydro power availability time series.
The central rule :mod:`add_electricity` then ties all the different data inputs
together into a detailed PyPSA network stored in ``networks/elec.nc``.
together into a detailed PyPSA network stored in ``networks/base_s_{clusters}_elec.nc``.
.. _cutout:
@ -115,6 +116,15 @@ Rule ``determine_availability_matrix_MD_UA``
.. automodule:: determine_availability_matrix_MD_UA
.. _renewableprofiles:
Rule ``determine_availability_matrix``
======================================
.. automodule:: determine_availability_matrix
.. _renewableprofiles:
Rule ``build_renewable_profiles``
@ -129,10 +139,3 @@ Rule ``build_hydro_profile``
===============================
.. automodule:: build_hydro_profile
.. _electricity:
Rule ``add_electricity``
=============================
.. automodule:: add_electricity

View File

@ -8,7 +8,8 @@
Release Notes
##########################################
.. Upcoming Release
Upcoming Release
================
PyPSA-Eur 0.13.0 (13th September 2024)
======================================
@ -115,6 +116,62 @@ PyPSA-Eur 0.13.0 (13th September 2024)
* The sources of nearly all data files are now listed in the documentation.
(https://github.com/PyPSA/pypsa-eur/pull/1284)
* Rearranged workflow to cluster the electricity network before calculating
renewable profiles and adding further electricity system components.
- Moved rules ``simplify_network`` and ``cluster_network`` before
``add_electricity`` and ``build_renewable_profiles``.
- Split rule ``build_renewable_profiles`` into two separate rules,
``determine_availability_matrix`` for land eligibility analysis and
``build_renewable_profiles``, which now only computes the profiles and total
potentials from the pre-computed availability matrix.
- Removed variables ``weight``, ``underwater_fraction``, and ``potential`` from the
output of ``build_renewable_profiles`` as it is no longer needed.
- HAC-clustering is now based on wind speeds and irradiation time series
rather than capacity factors of wind and solar power plants.
- Added new rule ``build_hac_features`` that aggregates cutout weather data to
base regions in preparation for ``cluster_network``.
- Removed ``{simpl}`` wildcard and all associated code of the ``m`` suffix of
the ``{cluster}`` wildcard. This means that the option to pre-cluster the
network in ``simplify_network`` was removed. It will be superseded by
clustering renewable profiles and potentials within clustered regions by
resource classes soon.
- Added new rule ``add_transmission_projects_and_dlr`` which adds the outputs
from ``build_line_rating`` and ``build_transmission_projects`` to the output
of ``base_network``.
- The rule ``add_extra_components`` was integrated into ``add_electricity``
- Added new rule ``build_electricity_demand_base`` to determine the load
distribution of the substations in the base network (which was previously
done in ``add_electricity``). This time series is used as weights for
kmeans-clustering in ``cluster_network`` and is later added to the network in
``add_electricity`` in aggregated form.
- The weights of the kmeans clustering algorithm are now exclusively based on
the load distribution. Previously, they also included the distribution of
thermal capacity.
- Since the networks no longer start with the whole electricity system added
pre-clustering, the files have been renamed from ``elec...nc`` to
``base...nc`` to identify them as derivatives of ``base.nc``.
- The scripts ``simplify_network.py`` and ``cluster_network.py`` were
simplified to become less nested and profited from the removed need to deal
with cost data.
- New configuration options to calculate connection costs of offshore wind
plants. Offshore connection costs are now calculated based on the underwater
distance to the shoreline plus a configurable ``landfall_length`` which
defaults to 10 km. Previously the distance to the region's centroid was
used, which is not practical when the regions are already aggregated.
PyPSA-Eur 0.12.0 (30th August 2024)
===================================

View File

@ -9,7 +9,7 @@
Simplifying Electricity Networks
##########################################
The simplification ``snakemake`` rules prepare **approximations** of the full model, for which it is computationally viable to co-optimize generation, storage and transmission capacities.
The simplification ``snakemake`` rules prepare **approximations** of the network model, for which it is computationally viable to co-optimize generation, storage and transmission capacities.
- :mod:`simplify_network` transforms the transmission grid to a 380 kV only equivalent network, while
- :mod:`cluster_network` uses a `k-means <https://en.wikipedia.org/wiki/K-means_clustering>`__ based clustering technique to partition the network into a given number of zones and then reduce the network to a representation with one bus per zone.
@ -18,7 +18,7 @@ The simplification and clustering steps are described in detail in the paper
- Jonas Hörsch and Tom Brown. `The role of spatial scale in joint optimisations of generation and transmission for European highly renewable scenarios <https://arxiv.org/abs/1705.07617>`__), *14th International Conference on the European Energy Market*, 2017. `arXiv:1705.07617 <https://arxiv.org/abs/1705.07617>`__, `doi:10.1109/EEM.2017.7982024 <https://doi.org/10.1109/EEM.2017.7982024>`__.
After simplification and clustering of the network, additional components may be appended in the rule :mod:`add_extra_components` and the network is prepared for solving in :mod:`prepare_network`.
After simplification and clustering of the network, further electricity network components may be appended in the rule :mod:`add_electricity` and the network is prepared for solving in :mod:`prepare_network`.
.. _simplify:
@ -34,13 +34,12 @@ Rule ``cluster_network``
.. automodule:: cluster_network
.. _extra_components:
.. _electricity:
Rule ``add_extra_components``
Rule ``add_electricity``
=============================
.. automodule:: add_extra_components
.. automodule:: add_electricity
.. _prepare:

View File

@ -32,7 +32,7 @@ configuration, execute
.. code:: bash
:class: full-width
snakemake -call results/test-elec/networks/elec_s_6_ec_lcopt_.nc --configfile config/test/config.electricity.yaml
snakemake -call results/test-elec/networks/base_s_6_elec_lcopt_.nc --configfile config/test/config.electricity.yaml
This configuration is set to download a reduced cutout via the rule :mod:`retrieve_cutout`.
For more information on the data dependencies of PyPSA-Eur, continue reading :ref:`data`.
@ -114,9 +114,9 @@ clustered down to 6 buses and every 24 hours aggregated to one snapshot. The com
.. code:: bash
snakemake -call results/test-elec/networks/elec_s_6_ec_lcopt_.nc --configfile config/test/config.electricity.yaml
snakemake -call results/test-elec/networks/base_s_6_elec_lcopt_.nc --configfile config/test/config.electricity.yaml
orders ``snakemake`` to run the rule :mod:`solve_network` that produces the solved network and stores it in ``results/networks`` with the name ``elec_s_6_ec_lcopt_.nc``:
orders ``snakemake`` to run the rule :mod:`solve_network` that produces the solved network and stores it in ``results/networks`` with the name ``base_s_6_elec_lcopt_.nc``:
.. literalinclude:: ../rules/solve_electricity.smk
:start-at: rule solve_network:
@ -132,98 +132,129 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
0[label = "solve_network", color = "0.16 0.6 0.85", style="rounded"];
1[label = "prepare_network\nll: copt\nopts: ", color = "0.40 0.6 0.85", style="rounded"];
2[label = "add_extra_components", color = "0.03 0.6 0.85", style="rounded"];
3[label = "cluster_network\nclusters: 6", color = "0.26 0.6 0.85", style="rounded"];
4[label = "simplify_network\nsimpl: ", color = "0.17 0.6 0.85", style="rounded"];
5[label = "add_electricity", color = "0.39 0.6 0.85", style="rounded"];
6[label = "build_renewable_profiles\ntechnology: solar", color = "0.13 0.6 0.85", style="rounded"];
7[label = "base_network", color = "0.01 0.6 0.85", style="rounded"];
8[label = "retrieve_osm_prebuilt", color = "0.27 0.6 0.85", style="rounded"];
9[label = "build_shapes", color = "0.18 0.6 0.85", style="rounded"];
10[label = "retrieve_naturalearth_countries", color = "0.41 0.6 0.85", style="rounded"];
11[label = "retrieve_eez", color = "0.14 0.6 0.85", style="rounded"];
12[label = "retrieve_databundle", color = "0.38 0.6 0.85", style="rounded"];
13[label = "retrieve_cutout\ncutout: be-03-2013-era5", color = "0.51 0.6 0.85", style="rounded"];
14[label = "build_renewable_profiles\ntechnology: solar-hsat", color = "0.13 0.6 0.85", style="rounded"];
15[label = "build_renewable_profiles\ntechnology: onwind", color = "0.13 0.6 0.85", style="rounded"];
16[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.13 0.6 0.85", style="rounded"];
17[label = "build_ship_raster", color = "0.16 0.6 0.85", style="rounded"];
18[label = "retrieve_ship_raster", color = "0.53 0.6 0.85", style="rounded"];
19[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.13 0.6 0.85", style="rounded"];
20[label = "build_renewable_profiles\ntechnology: offwind-float", color = "0.13 0.6 0.85", style="rounded"];
21[label = "build_line_rating", color = "0.46 0.6 0.85", style="rounded"];
22[label = "build_transmission_projects", color = "0.29 0.6 0.85", style="rounded"];
23[label = "retrieve_cost_data\nyear: 2030", color = "0.11 0.6 0.85", style="rounded"];
24[label = "build_powerplants", color = "0.18 0.6 0.85", style="rounded"];
25[label = "build_electricity_demand", color = "0.30 0.6 0.85", style="rounded"];
26[label = "retrieve_electricity_demand", color = "0.13 0.6 0.85", style="rounded"];
27[label = "retrieve_synthetic_electricity_demand", color = "0.43 0.6 0.85", style="rounded"];
0[label = "solve_network", color = "0.19 0.6 0.85", style="rounded"];
1[label = "prepare_network\nll: copt\nopts: ", color = "0.24 0.6 0.85", style="rounded"];
2[label = "add_electricity", color = "0.35 0.6 0.85", style="rounded"];
3[label = "build_renewable_profiles", color = "0.15 0.6 0.85", style="rounded"];
4[label = "determine_availability_matrix\ntechnology: solar", color = "0.39 0.6 0.85", style="rounded"];
5[label = "retrieve_databundle", color = "0.65 0.6 0.85", style="rounded"];
6[label = "build_shapes", color = "0.45 0.6 0.85", style="rounded"];
7[label = "retrieve_naturalearth_countries", color = "0.03 0.6 0.85", style="rounded"];
8[label = "retrieve_eez", color = "0.17 0.6 0.85", style="rounded"];
9[label = "cluster_network\nclusters: 6", color = "0.38 0.6 0.85", style="rounded"];
10[label = "simplify_network", color = "0.14 0.6 0.85", style="rounded"];
11[label = "add_transmission_projects_and_dlr", color = "0.61 0.6 0.85", style="rounded"];
12[label = "base_network", color = "0.36 0.6 0.85", style="rounded"];
13[label = "retrieve_osm_prebuilt", color = "0.22 0.6 0.85", style="rounded"];
14[label = "build_line_rating", color = "0.50 0.6 0.85", style="rounded"];
15[label = "retrieve_cutout\ncutout: be-03-2013-era5", color = "0.02 0.6 0.85", style="rounded"];
16[label = "build_transmission_projects", color = "0.08 0.6 0.85", style="rounded"];
17[label = "build_electricity_demand_base", color = "0.11 0.6 0.85", style="rounded"];
18[label = "build_electricity_demand", color = "0.60 0.6 0.85", style="rounded"];
19[label = "retrieve_electricity_demand", color = "0.60 0.6 0.85", style="rounded"];
20[label = "retrieve_synthetic_electricity_demand", color = "0.32 0.6 0.85", style="rounded"];
21[label = "build_renewable_profiles", color = "0.15 0.6 0.85", style="rounded"];
22[label = "determine_availability_matrix\ntechnology: solar-hsat", color = "0.39 0.6 0.85", style="rounded"];
23[label = "build_renewable_profiles", color = "0.15 0.6 0.85", style="rounded"];
24[label = "determine_availability_matrix\ntechnology: onwind", color = "0.39 0.6 0.85", style="rounded"];
25[label = "build_renewable_profiles", color = "0.15 0.6 0.85", style="rounded"];
26[label = "determine_availability_matrix\ntechnology: offwind-ac", color = "0.39 0.6 0.85", style="rounded"];
27[label = "build_ship_raster", color = "0.12 0.6 0.85", style="rounded"];
28[label = "retrieve_ship_raster", color = "0.44 0.6 0.85", style="rounded"];
29[label = "build_renewable_profiles", color = "0.15 0.6 0.85", style="rounded"];
30[label = "determine_availability_matrix\ntechnology: offwind-dc", color = "0.39 0.6 0.85", style="rounded"];
31[label = "build_renewable_profiles", color = "0.15 0.6 0.85", style="rounded"];
32[label = "determine_availability_matrix\ntechnology: offwind-float", color = "0.39 0.6 0.85", style="rounded"];
33[label = "retrieve_cost_data\nyear: 2030", color = "0.01 0.6 0.85", style="rounded"];
34[label = "build_powerplants", color = "0.52 0.6 0.85", style="rounded"];
1 -> 0
2 -> 1
23 -> 1
33 -> 1
3 -> 2
21 -> 2
23 -> 2
25 -> 2
29 -> 2
31 -> 2
9 -> 2
33 -> 2
34 -> 2
17 -> 2
4 -> 3
23 -> 3
6 -> 3
9 -> 3
15 -> 3
5 -> 4
23 -> 4
7 -> 4
6 -> 5
14 -> 5
15 -> 5
16 -> 5
19 -> 5
20 -> 5
7 -> 5
21 -> 5
22 -> 5
23 -> 5
24 -> 5
25 -> 5
9 -> 5
6 -> 4
9 -> 4
15 -> 4
7 -> 6
12 -> 6
9 -> 6
13 -> 6
8 -> 7
9 -> 7
8 -> 6
5 -> 6
10 -> 9
11 -> 9
12 -> 9
7 -> 14
17 -> 9
11 -> 10
12 -> 10
12 -> 11
14 -> 11
16 -> 11
13 -> 12
6 -> 12
12 -> 14
9 -> 14
13 -> 14
7 -> 15
12 -> 15
9 -> 15
13 -> 15
7 -> 16
15 -> 14
12 -> 16
17 -> 16
9 -> 16
13 -> 16
6 -> 16
10 -> 17
6 -> 17
18 -> 17
13 -> 17
7 -> 19
12 -> 19
17 -> 19
9 -> 19
13 -> 19
7 -> 20
12 -> 20
17 -> 20
9 -> 20
13 -> 20
7 -> 21
13 -> 21
7 -> 22
19 -> 18
20 -> 18
22 -> 21
6 -> 21
9 -> 21
15 -> 21
5 -> 22
6 -> 22
9 -> 22
7 -> 24
15 -> 22
24 -> 23
6 -> 23
9 -> 23
15 -> 23
5 -> 24
6 -> 24
9 -> 24
15 -> 24
26 -> 25
27 -> 25
6 -> 25
9 -> 25
15 -> 25
5 -> 26
27 -> 26
6 -> 26
9 -> 26
15 -> 26
28 -> 27
15 -> 27
30 -> 29
6 -> 29
9 -> 29
15 -> 29
5 -> 30
27 -> 30
6 -> 30
9 -> 30
15 -> 30
32 -> 31
6 -> 31
9 -> 31
15 -> 31
5 -> 32
27 -> 32
6 -> 32
9 -> 32
15 -> 32
9 -> 34
}
|
@ -237,9 +268,10 @@ In the terminal, this will show up as a list of jobs to be run:
job count
------------------------------------- -------
add_electricity 1
add_extra_components 1
add_transmission_projects_and_dlr 1
base_network 1
build_electricity_demand 1
build_electricity_demand_base 1
build_line_rating 1
build_powerplants 1
build_renewable_profiles 6
@ -247,6 +279,7 @@ In the terminal, this will show up as a list of jobs to be run:
build_ship_raster 1
build_transmission_projects 1
cluster_network 1
determine_availability_matrix 6
prepare_network 1
retrieve_cost_data 1
retrieve_cutout 1
@ -259,7 +292,7 @@ In the terminal, this will show up as a list of jobs to be run:
retrieve_synthetic_electricity_demand 1
simplify_network 1
solve_network 1
total 28
total 35
``snakemake`` then runs these jobs in the correct order.
@ -269,13 +302,12 @@ A job (here ``simplify_network``) will display its attributes and normally some
.. code:: bash
rule simplify_network:
input: resources/test/networks/elec.nc, resources/test/costs_2030.csv, resources/test/regions_onshore.geojson, resources/test/regions_offshore.geojson
output: resources/test/networks/elec_s.nc, resources/test/regions_onshore_elec_s.geojson, resources/test/regions_offshore_elec_s.geojson, resources/test/busmap_elec_s.csv
log: logs/test/simplify_network/elec_s.log
jobid: 4
benchmark: benchmarks/test/simplify_network/elec_s
input: resources/test/networks/base_extended.nc, resources/test/regions_onshore.geojson, resources/test/regions_offshore.geojson
output: resources/test/networks/base_s.nc, resources/test/regions_onshore_base_s.geojson, resources/test/regions_offshore_base_s.geojson, resources/test/busmap_base_s.csv
log: logs/test/simplify_network.log
jobid: 10
benchmark: benchmarks/test/simplify_network_b
reason: Forced execution
wildcards: simpl=
resources: tmpdir=<TBD>, mem_mb=12000, mem_mib=11445
Once the whole worktree is finished, it should state so in the terminal.
@ -291,10 +323,9 @@ You can produce any output file occurring in the ``Snakefile`` by running
For example, you can explore the evolution of the PyPSA networks by running
#. ``snakemake resources/networks/base.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/elec.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/elec_s.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/elec_s_6.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/elec_s_6_ec_lcopt_.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/base_s.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/base_s_6.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/base_s_6_elec_lcopt_.nc -call --configfile config/test/config.electricity.yaml``
To run all combinations of wildcard values provided in the ``config/config.yaml`` under ``scenario:``,
you can use the collection rule ``solve_elec_networks``.
@ -332,6 +363,6 @@ Jupyter Notebooks).
import pypsa
n = pypsa.Network("results/networks/elec_s_6_ec_lcopt_.nc")
n = pypsa.Network("results/networks/base_s_6_elec_lcopt_.nc")
For inspiration, read the `examples section in the PyPSA documentation <https://pypsa.readthedocs.io/en/latest/examples-basic.html>`__.

File diff suppressed because it is too large Load Diff

View File

@ -38,15 +38,6 @@ series and potentials using the rule :mod:`build_renewable_profiles`.
It can take the values ``onwind``, ``offwind-ac``, ``offwind-dc``, ``offwind-float``, and ``solar`` but **not** ``hydro``
(since hydroelectric plant profiles are created by a different rule)``
.. _simpl:
The ``{simpl}`` wildcard
========================
The ``{simpl}`` wildcard specifies number of buses a detailed
network model should be pre-clustered to in the rule
:mod:`simplify_network` (before :mod:`cluster_network`).
.. _clusters:
The ``{clusters}`` wildcard
@ -57,11 +48,6 @@ network model should be reduced to in the rule :mod:`cluster_network`.
The number of clusters must be lower than the total number of nodes
and higher than the number of countries. However, a country counts twice if
it has two asynchronous subnetworks (e.g. Denmark or Italy).
If an `m` is placed behind the number of clusters (e.g. ``100m``),
generators are only moved to the clustered buses but not aggregated
by carrier; i.e. the clustered bus may have more than one e.g. wind generator.
.. _ll:
The ``{ll}`` wildcard

View File

@ -35,12 +35,14 @@ rule build_powerplants:
everywhere_powerplants=config_provider("electricity", "everywhere_powerplants"),
countries=config_provider("countries"),
input:
base_network=resources("networks/base.nc"),
network=resources("networks/base_s_{clusters}.nc"),
custom_powerplants="data/custom_powerplants.csv",
output:
resources("powerplants.csv"),
resources("powerplants_s_{clusters}.csv"),
log:
logs("build_powerplants.log"),
logs("build_powerplants_s_{clusters}.log"),
benchmark:
benchmarks("build_powerplants_s_{clusters}")
threads: 1
resources:
mem_mb=7000,
@ -169,6 +171,8 @@ rule build_ship_raster:
rule determine_availability_matrix_MD_UA:
params:
renewable=config_provider("renewable"),
input:
copernicus="data/Copernicus_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif",
wdpa="data/WDPA.gpkg",
@ -186,18 +190,20 @@ rule determine_availability_matrix_MD_UA:
country_shapes=resources("country_shapes.geojson"),
offshore_shapes=resources("offshore_shapes.geojson"),
regions=lambda w: (
resources("regions_onshore.geojson")
resources("regions_onshore_base_s_{clusters}.geojson")
if w.technology in ("onwind", "solar", "solar-hsat")
else resources("regions_offshore.geojson")
else resources("regions_offshore_base_s_{clusters}.geojson")
),
cutout=lambda w: "cutouts/"
+ CDIR
+ config_provider("renewable", w.technology, "cutout")(w)
+ ".nc",
output:
availability_matrix=resources("availability_matrix_MD-UA_{technology}.nc"),
availability_matrix=resources(
"availability_matrix_MD-UA_{clusters}_{technology}.nc"
),
log:
logs("determine_availability_matrix_MD_UA_{technology}.log"),
logs("determine_availability_matrix_MD_UA_{clusters}_{technology}.log"),
threads: config["atlite"].get("nprocesses", 4)
resources:
mem_mb=config["atlite"].get("nprocesses", 4) * 5000,
@ -213,20 +219,17 @@ def input_ua_md_availability_matrix(w):
if {"UA", "MD"}.intersection(countries):
return {
"availability_matrix_MD_UA": resources(
"availability_matrix_MD-UA_{technology}.nc"
"availability_matrix_MD-UA_{clusters}_{technology}.nc"
)
}
return {}
rule build_renewable_profiles:
rule determine_availability_matrix:
params:
snapshots=config_provider("snapshots"),
drop_leap_day=config_provider("enable", "drop_leap_day"),
renewable=config_provider("renewable"),
input:
unpack(input_ua_md_availability_matrix),
base_network=resources("networks/base.nc"),
corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"),
natura=lambda w: (
"data/bundle/natura/natura.tiff"
@ -256,20 +259,48 @@ rule build_renewable_profiles:
country_shapes=resources("country_shapes.geojson"),
offshore_shapes=resources("offshore_shapes.geojson"),
regions=lambda w: (
resources("regions_onshore.geojson")
resources("regions_onshore_base_s_{clusters}.geojson")
if w.technology in ("onwind", "solar", "solar-hsat")
else resources("regions_offshore.geojson")
else resources("regions_offshore_base_s_{clusters}.geojson")
),
cutout=lambda w: "cutouts/"
+ CDIR
+ config_provider("renewable", w.technology, "cutout")(w)
+ ".nc",
output:
profile=resources("profile_{technology}.nc"),
resources("availability_matrix_{clusters}_{technology}.nc"),
log:
logs("build_renewable_profile_{technology}.log"),
logs("determine_availability_matrix_{clusters}_{technology}.log"),
benchmark:
benchmarks("build_renewable_profiles_{technology}")
benchmarks("determine_availability_matrix_{clusters}_{technology}")
threads: config["atlite"].get("nprocesses", 4)
resources:
mem_mb=config["atlite"].get("nprocesses", 4) * 5000,
conda:
"../envs/environment.yaml"
script:
"../scripts/determine_availability_matrix.py"
rule build_renewable_profiles:
params:
snapshots=config_provider("snapshots"),
drop_leap_day=config_provider("enable", "drop_leap_day"),
renewable=config_provider("renewable"),
input:
availability_matrix=resources("availability_matrix_{clusters}_{technology}.nc"),
offshore_shapes=resources("offshore_shapes.geojson"),
regions=resources("regions_onshore_base_s_{clusters}.geojson"),
cutout=lambda w: "cutouts/"
+ CDIR
+ config_provider("renewable", w.technology, "cutout")(w)
+ ".nc",
output:
profile=resources("profile_{clusters}_{technology}.nc"),
log:
logs("build_renewable_profile_{clusters}_{technology}.log"),
benchmark:
benchmarks("build_renewable_profiles_{clusters}_{technology}")
threads: config["atlite"].get("nprocesses", 4)
resources:
mem_mb=config["atlite"].get("nprocesses", 4) * 5000,
@ -337,7 +368,7 @@ rule build_line_rating:
+ config_provider("lines", "dynamic_line_rating", "cutout")(w)
+ ".nc",
output:
output=resources("networks/line_rating.nc"),
output=resources("dlr.nc"),
log:
logs("build_line_rating.log"),
benchmark:
@ -385,6 +416,44 @@ rule build_transmission_projects:
"../scripts/build_transmission_projects.py"
rule add_transmission_projects_and_dlr:
params:
transmission_projects=config_provider("transmission_projects"),
dlr=config_provider("lines", "dynamic_line_rating"),
s_max_pu=config_provider("lines", "s_max_pu"),
input:
network=resources("networks/base.nc"),
dlr=lambda w: (
resources("dlr.nc")
if config_provider("lines", "dynamic_line_rating", "activate")(w)
else []
),
transmission_projects=lambda w: (
[
resources("transmission_projects/new_buses.csv"),
resources("transmission_projects/new_lines.csv"),
resources("transmission_projects/new_links.csv"),
resources("transmission_projects/adjust_lines.csv"),
resources("transmission_projects/adjust_links.csv"),
]
if config_provider("transmission_projects", "enable")(w)
else []
),
output:
network=resources("networks/base_extended.nc"),
log:
logs("add_transmission_projects_and_dlr.log"),
benchmark:
benchmarks("add_transmission_projects_and_dlr")
threads: 1
resources:
mem_mb=4000,
conda:
"../envs/environment.yaml"
script:
"../scripts/add_transmission_projects_and_dlr.py"
def input_profile_tech(w):
return {
f"profile_{tech}": resources(f"profile_{tech}.nc")
@ -414,8 +483,8 @@ rule build_gdp_pop_non_nuts3:
params:
countries=config_provider("countries"),
input:
base_network=resources("networks/base.nc"),
regions=resources("regions_onshore.geojson"),
base_network=resources("networks/base_s.nc"),
regions=resources("regions_onshore_base_s.geojson"),
gdp_non_nuts3="data/bundle/GDP_per_capita_PPP_1990_2015_v2.nc",
pop_non_nuts3="data/bundle/ppp_2013_1km_Aggregated.tif",
output:
@ -433,97 +502,76 @@ rule build_gdp_pop_non_nuts3:
"../scripts/build_gdp_pop_non_nuts3.py"
rule add_electricity:
rule build_electricity_demand_base:
params:
length_factor=config_provider("lines", "length_factor"),
scaling_factor=config_provider("load", "scaling_factor"),
countries=config_provider("countries"),
snapshots=config_provider("snapshots"),
renewable=config_provider("renewable"),
electricity=config_provider("electricity"),
conventional=config_provider("conventional"),
costs=config_provider("costs"),
foresight=config_provider("foresight"),
drop_leap_day=config_provider("enable", "drop_leap_day"),
transmission_projects=config_provider("transmission_projects"),
distribution_key=config_provider("load", "distribution_key"),
input:
unpack(input_profile_tech),
unpack(input_conventional),
unpack(input_gdp_pop_non_nuts3),
base_network=resources("networks/base.nc"),
line_rating=lambda w: (
resources("networks/line_rating.nc")
if config_provider("lines", "dynamic_line_rating", "activate")(w)
else resources("networks/base.nc")
),
transmission_projects=lambda w: (
[
resources("transmission_projects/new_buses.csv"),
resources("transmission_projects/new_lines.csv"),
resources("transmission_projects/new_links.csv"),
resources("transmission_projects/adjust_lines.csv"),
resources("transmission_projects/adjust_links.csv"),
]
if config_provider("transmission_projects", "enable")(w)
else []
),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
regions=resources("regions_onshore.geojson"),
powerplants=resources("powerplants.csv"),
hydro_capacities=ancient("data/hydro_capacities.csv"),
unit_commitment="data/unit_commitment.csv",
fuel_price=lambda w: (
resources("monthly_fuel_price.csv")
if config_provider("conventional", "dynamic_fuel_price")(w)
else []
),
base_network=resources("networks/base_s.nc"),
regions=resources("regions_onshore_base_s.geojson"),
nuts3=resources("nuts3_shapes.geojson"),
load=resources("electricity_demand.csv"),
nuts3_shapes=resources("nuts3_shapes.geojson"),
output:
resources("networks/elec.nc"),
resources("electricity_demand_base_s.nc"),
log:
logs("add_electricity.log"),
logs("build_electricity_demand_base_s.log"),
benchmark:
benchmarks("add_electricity")
threads: 1
benchmarks("build_electricity_demand_base_s")
resources:
mem_mb=5000,
conda:
"../envs/environment.yaml"
script:
"../scripts/build_electricity_demand_base.py"
rule build_hac_features:
params:
snapshots=config_provider("snapshots"),
drop_leap_day=config_provider("enable", "drop_leap_day"),
features=config_provider("clustering", "cluster_network", "hac_features"),
input:
cutout=lambda w: "cutouts/"
+ CDIR
+ config_provider("atlite", "default_cutout")(w)
+ ".nc",
regions=resources("regions_onshore_base_s.geojson"),
output:
resources("hac_features.nc"),
log:
logs("build_hac_features.log"),
benchmark:
benchmarks("build_hac_features")
threads: config["atlite"].get("nprocesses", 4)
resources:
mem_mb=10000,
conda:
"../envs/environment.yaml"
script:
"../scripts/add_electricity.py"
"../scripts/build_hac_features.py"
rule simplify_network:
params:
simplify_network=config_provider("clustering", "simplify_network"),
cluster_network=config_provider("clustering", "cluster_network"),
aggregation_strategies=config_provider(
"clustering", "aggregation_strategies", default={}
),
focus_weights=config_provider("clustering", "focus_weights", default=None),
renewable_carriers=config_provider("electricity", "renewable_carriers"),
max_hours=config_provider("electricity", "max_hours"),
length_factor=config_provider("lines", "length_factor"),
p_max_pu=config_provider("links", "p_max_pu", default=1.0),
costs=config_provider("costs"),
input:
network=resources("networks/elec.nc"),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
network=resources("networks/base_extended.nc"),
regions_onshore=resources("regions_onshore.geojson"),
regions_offshore=resources("regions_offshore.geojson"),
output:
network=resources("networks/elec_s{simpl}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"),
busmap=resources("busmap_elec_s{simpl}.csv"),
network=resources("networks/base_s.nc"),
regions_onshore=resources("regions_onshore_base_s.geojson"),
regions_offshore=resources("regions_offshore_base_s.geojson"),
busmap=resources("busmap_base_s.csv"),
log:
logs("simplify_network/elec_s{simpl}.log"),
logs("simplify_network.log"),
benchmark:
benchmarks("simplify_network/elec_s{simpl}")
benchmarks("simplify_network_b")
threads: 1
resources:
mem_mb=12000,
@ -537,7 +585,7 @@ rule simplify_network:
def input_cluster_network(w):
if config_provider("enable", "custom_busmap", default=False)(w):
base_network = config_provider("electricity", "base_network")(w)
custom_busmap = f"data/busmaps/elec_s{w.simpl}_{w.clusters}_{base_network}.csv"
custom_busmap = f"data/busmaps/base_s_{w.clusters}_{base_network}.csv"
return {"custom_busmap": custom_busmap}
return {"custom_busmap": []}
@ -556,26 +604,29 @@ rule cluster_network:
),
max_hours=config_provider("electricity", "max_hours"),
length_factor=config_provider("lines", "length_factor"),
costs=config_provider("costs"),
input:
unpack(input_cluster_network),
network=resources("networks/elec_s{simpl}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"),
busmap=ancient(resources("busmap_elec_s{simpl}.csv")),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
network=resources("networks/base_s.nc"),
regions_onshore=resources("regions_onshore_base_s.geojson"),
regions_offshore=resources("regions_offshore_base_s.geojson"),
busmap=ancient(resources("busmap_base_s.csv")),
hac_features=lambda w: (
resources("hac_features.nc")
if config_provider("clustering", "cluster_network", "algorithm")(w)
== "hac"
else []
),
load=resources("electricity_demand_base_s.nc"),
output:
network=resources("networks/elec_s{simpl}_{clusters}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"),
busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
linemap=resources("linemap_elec_s{simpl}_{clusters}.csv"),
network=resources("networks/base_s_{clusters}.nc"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_base_s_{clusters}.geojson"),
busmap=resources("busmap_base_s_{clusters}.csv"),
linemap=resources("linemap_base_s_{clusters}.csv"),
log:
logs("cluster_network/elec_s{simpl}_{clusters}.log"),
logs("cluster_network_base_s_{clusters}.log"),
benchmark:
benchmarks("cluster_network/elec_s{simpl}_{clusters}")
benchmarks("cluster_network_base_s_{clusters}")
threads: 1
resources:
mem_mb=10000,
@ -585,29 +636,76 @@ rule cluster_network:
"../scripts/cluster_network.py"
rule add_extra_components:
def input_profile_tech(w):
return {
f"profile_{tech}": resources(
"profile_{clusters}_" + tech + ".nc"
if tech != "hydro"
else f"profile_{tech}.nc"
)
for tech in config_provider("electricity", "renewable_carriers")(w)
}
def input_conventional(w):
return {
f"conventional_{carrier}_{attr}": fn
for carrier, d in config_provider("conventional", default={None: {}})(w).items()
if carrier in config_provider("electricity", "conventional_carriers")(w)
for attr, fn in d.items()
if str(fn).startswith("data/")
}
rule add_electricity:
params:
extendable_carriers=config_provider("electricity", "extendable_carriers"),
max_hours=config_provider("electricity", "max_hours"),
line_length_factor=config_provider("lines", "length_factor"),
link_length_factor=config_provider("links", "length_factor"),
scaling_factor=config_provider("load", "scaling_factor"),
countries=config_provider("countries"),
snapshots=config_provider("snapshots"),
renewable=config_provider("renewable"),
electricity=config_provider("electricity"),
conventional=config_provider("conventional"),
costs=config_provider("costs"),
foresight=config_provider("foresight"),
drop_leap_day=config_provider("enable", "drop_leap_day"),
consider_efficiency_classes=config_provider(
"clustering", "consider_efficiency_classes"
),
aggregation_strategies=config_provider("clustering", "aggregation_strategies"),
exclude_carriers=config_provider("clustering", "exclude_carriers"),
input:
network=resources("networks/elec_s{simpl}_{clusters}.nc"),
unpack(input_profile_tech),
unpack(input_conventional),
base_network=resources("networks/base_s_{clusters}.nc"),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
regions=resources("regions_onshore_base_s_{clusters}.geojson"),
powerplants=resources("powerplants_s_{clusters}.csv"),
hydro_capacities=ancient("data/hydro_capacities.csv"),
unit_commitment="data/unit_commitment.csv",
fuel_price=lambda w: (
resources("monthly_fuel_price.csv")
if config_provider("conventional", "dynamic_fuel_price")(w)
else []
),
load=resources("electricity_demand_base_s.nc"),
busmap=resources("busmap_base_s_{clusters}.csv"),
output:
resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
resources("networks/base_s_{clusters}_elec.nc"),
log:
logs("add_extra_components/elec_s{simpl}_{clusters}.log"),
logs("add_electricity_{clusters}.log"),
benchmark:
benchmarks("add_extra_components/elec_s{simpl}_{clusters}_ec")
benchmarks("add_electricity_{clusters}")
threads: 1
resources:
mem_mb=4000,
mem_mb=10000,
conda:
"../envs/environment.yaml"
script:
"../scripts/add_extra_components.py"
"../scripts/add_electricity.py"
rule prepare_network:
@ -626,17 +724,17 @@ rule prepare_network:
autarky=config_provider("electricity", "autarky", default={}),
drop_leap_day=config_provider("enable", "drop_leap_day"),
input:
resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
resources("networks/base_s_{clusters}_elec.nc"),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
co2_price=lambda w: resources("co2_price.csv") if "Ept" in w.opts else [],
output:
resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
resources("networks/base_s_{clusters}_elec_l{ll}_{opts}.nc"),
log:
logs("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"),
logs("prepare_network_base_s_{clusters}_elec_l{ll}_{opts}.log"),
benchmark:
(benchmarks("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"))
benchmarks("prepare_network_base_s_{clusters}_elec_l{ll}_{opts}")
threads: 1
resources:
mem_mb=4000,

View File

@ -33,19 +33,19 @@ rule build_clustered_population_layouts:
pop_layout_total=resources("pop_layout_total.nc"),
pop_layout_urban=resources("pop_layout_urban.nc"),
pop_layout_rural=resources("pop_layout_rural.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
cutout=lambda w: "cutouts/"
+ CDIR
+ config_provider("atlite", "default_cutout")(w)
+ ".nc",
output:
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
log:
logs("build_clustered_population_layouts_{simpl}_{clusters}.log"),
logs("build_clustered_population_layouts_s_{clusters}.log"),
resources:
mem_mb=10000,
benchmark:
benchmarks("build_clustered_population_layouts/s{simpl}_{clusters}")
benchmarks("build_clustered_population_layouts/s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -57,19 +57,19 @@ rule build_simplified_population_layouts:
pop_layout_total=resources("pop_layout_total.nc"),
pop_layout_urban=resources("pop_layout_urban.nc"),
pop_layout_rural=resources("pop_layout_rural.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"),
regions_onshore=resources("regions_onshore_base_s.geojson"),
cutout=lambda w: "cutouts/"
+ CDIR
+ config_provider("atlite", "default_cutout")(w)
+ ".nc",
output:
clustered_pop_layout=resources("pop_layout_elec_s{simpl}.csv"),
clustered_pop_layout=resources("pop_layout_base_s.csv"),
resources:
mem_mb=10000,
log:
logs("build_simplified_population_layouts_{simpl}"),
logs("build_simplified_population_layouts_s"),
benchmark:
benchmarks("build_simplified_population_layouts/s{simpl}")
benchmarks("build_simplified_population_layouts/s")
conda:
"../envs/environment.yaml"
script:
@ -96,17 +96,17 @@ rule build_gas_input_locations:
gem="data/gem/Europe-Gas-Tracker-2024-05.xlsx",
entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson",
storage="data/gas_network/scigrid-gas/data/IGGIELGN_Storages.geojson",
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_base_s_{clusters}.geojson"),
output:
gas_input_nodes=resources("gas_input_locations_s{simpl}_{clusters}.geojson"),
gas_input_nodes=resources("gas_input_locations_s_{clusters}.geojson"),
gas_input_nodes_simplified=resources(
"gas_input_locations_s{simpl}_{clusters}_simplified.csv"
"gas_input_locations_s_{clusters}_simplified.csv"
),
resources:
mem_mb=2000,
log:
logs("build_gas_input_locations_s{simpl}_{clusters}.log"),
logs("build_gas_input_locations_s_{clusters}.log"),
conda:
"../envs/environment.yaml"
script:
@ -116,14 +116,14 @@ rule build_gas_input_locations:
rule cluster_gas_network:
input:
cleaned_gas_network=resources("gas_network.csv"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_base_s_{clusters}.geojson"),
output:
clustered_gas_network=resources("gas_network_elec_s{simpl}_{clusters}.csv"),
clustered_gas_network=resources("gas_network_base_s_{clusters}.csv"),
resources:
mem_mb=4000,
log:
logs("cluster_gas_network_s{simpl}_{clusters}.log"),
logs("cluster_gas_network_{clusters}.log"),
conda:
"../envs/environment.yaml"
script:
@ -149,17 +149,17 @@ rule build_daily_heat_demand:
drop_leap_day=config_provider("enable", "drop_leap_day"),
input:
pop_layout=resources("pop_layout_total.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
cutout=heat_demand_cutout,
output:
heat_demand=resources("daily_heat_demand_total_elec_s{simpl}_{clusters}.nc"),
heat_demand=resources("daily_heat_demand_total_base_s_{clusters}.nc"),
resources:
mem_mb=20000,
threads: 8
log:
logs("build_daily_heat_demand_total_{simpl}_{clusters}.loc"),
logs("build_daily_heat_demand_total_s_{clusters}.loc"),
benchmark:
benchmarks("build_daily_heat_demand/total_s{simpl}_{clusters}")
benchmarks("build_daily_heat_demand/total_s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -172,16 +172,16 @@ rule build_hourly_heat_demand:
drop_leap_day=config_provider("enable", "drop_leap_day"),
input:
heat_profile="data/heat_load_profile_BDEW.csv",
heat_demand=resources("daily_heat_demand_total_elec_s{simpl}_{clusters}.nc"),
heat_demand=resources("daily_heat_demand_total_base_s_{clusters}.nc"),
output:
heat_demand=resources("hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc"),
heat_demand=resources("hourly_heat_demand_total_base_s_{clusters}.nc"),
resources:
mem_mb=2000,
threads: 8
log:
logs("build_hourly_heat_demand_total_{simpl}_{clusters}.loc"),
logs("build_hourly_heat_demand_total_s_{clusters}.loc"),
benchmark:
benchmarks("build_hourly_heat_demand/total_s{simpl}_{clusters}")
benchmarks("build_hourly_heat_demand/total_s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -194,18 +194,18 @@ rule build_temperature_profiles:
drop_leap_day=config_provider("enable", "drop_leap_day"),
input:
pop_layout=resources("pop_layout_total.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
cutout=heat_demand_cutout,
output:
temp_soil=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"),
temp_air=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"),
temp_soil=resources("temp_soil_total_base_s_{clusters}.nc"),
temp_air=resources("temp_air_total_base_s_{clusters}.nc"),
resources:
mem_mb=20000,
threads: 8
log:
logs("build_temperature_profiles_total_{simpl}_{clusters}.log"),
logs("build_temperature_profiles_total_s_{clusters}.log"),
benchmark:
benchmarks("build_temperature_profiles/total_s{simpl}_{clusters}")
benchmarks("build_temperature_profiles/total_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -252,21 +252,21 @@ rule build_central_heating_temperature_profiles:
"rolling_window_ambient_temperature",
),
input:
temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
temp_air_total=resources("temp_air_total_base_s_{clusters}.nc"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
output:
central_heating_forward_temperature_profiles=resources(
"central_heating_forward_temperature_profiles_elec_s{simpl}_{clusters}.nc"
"central_heating_forward_temperature_profiles_base_s_{clusters}.nc"
),
central_heating_return_temperature_profiles=resources(
"central_heating_return_temperature_profiles_elec_s{simpl}_{clusters}.nc"
"central_heating_return_temperature_profiles_base_s_{clusters}.nc"
),
resources:
mem_mb=20000,
log:
logs("build_central_heating_temperature_profiles_s{simpl}_{clusters}.log"),
logs("build_central_heating_temperature_profiles_s_{clusters}.log"),
benchmark:
benchmarks("build_central_heating_temperature_profiles/s{simpl}_{clusters}")
benchmarks("build_central_heating_temperature_profiles/s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -288,22 +288,22 @@ rule build_cop_profiles:
snapshots=config_provider("snapshots"),
input:
central_heating_forward_temperature_profiles=resources(
"central_heating_forward_temperature_profiles_elec_s{simpl}_{clusters}.nc"
"central_heating_forward_temperature_profiles_base_s_{clusters}.nc"
),
central_heating_return_temperature_profiles=resources(
"central_heating_return_temperature_profiles_elec_s{simpl}_{clusters}.nc"
"central_heating_return_temperature_profiles_base_s_{clusters}.nc"
),
temp_soil_total=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"),
temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
temp_soil_total=resources("temp_soil_total_base_s_{clusters}.nc"),
temp_air_total=resources("temp_air_total_base_s_{clusters}.nc"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
output:
cop_profiles=resources("cop_profiles_elec_s{simpl}_{clusters}.nc"),
cop_profiles=resources("cop_profiles_base_s_{clusters}.nc"),
resources:
mem_mb=20000,
log:
logs("build_cop_profiles_s{simpl}_{clusters}.log"),
logs("build_cop_profiles_s_{clusters}.log"),
benchmark:
benchmarks("build_cop_profiles/s{simpl}_{clusters}")
benchmarks("build_cop_profiles/s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -330,17 +330,17 @@ rule build_solar_thermal_profiles:
solar_thermal=config_provider("solar_thermal"),
input:
pop_layout=resources("pop_layout_total.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
cutout=solar_thermal_cutout,
output:
solar_thermal=resources("solar_thermal_total_elec_s{simpl}_{clusters}.nc"),
solar_thermal=resources("solar_thermal_total_base_s_{clusters}.nc"),
resources:
mem_mb=20000,
threads: 16
log:
logs("build_solar_thermal_profiles_total_s{simpl}_{clusters}.log"),
logs("build_solar_thermal_profiles_total_s_{clusters}.log"),
benchmark:
benchmarks("build_solar_thermal_profiles/total_s{simpl}_{clusters}")
benchmarks("build_solar_thermal_profiles/total_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -406,25 +406,25 @@ rule build_biomass_potentials:
enspreso_biomass="data/ENSPRESO_BIOMASS.xlsx",
eurostat="data/eurostat/Balances-April2023",
nuts2="data/nuts/NUTS_RG_03M_2013_4326_LEVL_2.geojson",
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"),
swiss_cantons=ancient("data/ch_cantons.csv"),
swiss_population=ancient("data/bundle/je-e-21.03.02.xls"),
country_shapes=resources("country_shapes.geojson"),
output:
biomass_potentials_all=resources(
"biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv"
"biomass_potentials_all_{clusters}_{planning_horizons}.csv"
),
biomass_potentials=resources(
"biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv"
"biomass_potentials_s_{clusters}_{planning_horizons}.csv"
),
threads: 8
resources:
mem_mb=1000,
log:
logs("build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log"),
logs("build_biomass_potentials_s_{clusters}_{planning_horizons}.log"),
benchmark:
benchmarks("build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}")
benchmarks("build_biomass_potentials_s_{clusters}_{planning_horizons}")
conda:
"../envs/environment.yaml"
script:
@ -457,19 +457,19 @@ rule build_sequestration_potentials:
),
input:
sequestration_potential="data/complete_map_2020_unit_Mt.geojson",
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_base_s_{clusters}.geojson"),
output:
sequestration_potential=resources(
"co2_sequestration_potential_elec_s{simpl}_{clusters}.csv"
"co2_sequestration_potential_base_s_{clusters}.csv"
),
threads: 1
resources:
mem_mb=4000,
log:
logs("build_sequestration_potentials_s{simpl}_{clusters}.log"),
logs("build_sequestration_potentials_{clusters}.log"),
benchmark:
benchmarks("build_sequestration_potentials_s{simpl}_{clusters}")
benchmarks("build_sequestration_potentials_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -479,17 +479,17 @@ rule build_sequestration_potentials:
rule build_salt_cavern_potentials:
input:
salt_caverns="data/bundle/h2_salt_caverns_GWh_per_sqkm.geojson",
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_base_s_{clusters}.geojson"),
output:
h2_cavern_potential=resources("salt_cavern_potentials_s{simpl}_{clusters}.csv"),
h2_cavern_potential=resources("salt_cavern_potentials_s_{clusters}.csv"),
threads: 1
resources:
mem_mb=2000,
log:
logs("build_salt_cavern_potentials_s{simpl}_{clusters}.log"),
logs("build_salt_cavern_potentials_s_{clusters}.log"),
benchmark:
benchmarks("build_salt_cavern_potentials_s{simpl}_{clusters}")
benchmarks("build_salt_cavern_potentials_s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -625,8 +625,8 @@ rule build_industrial_distribution_key:
),
countries=config_provider("countries"),
input:
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
hotmaps="data/Industrial_Database.csv",
gem_gspt="data/gem/Global-Steel-Plant-Tracker-April-2024-Standard-Copy-V1.xlsx",
ammonia="data/ammonia_plants.csv",
@ -634,15 +634,15 @@ rule build_industrial_distribution_key:
refineries_supplement="data/refineries-noneu.csv",
output:
industrial_distribution_key=resources(
"industrial_distribution_key_elec_s{simpl}_{clusters}.csv"
"industrial_distribution_key_base_s_{clusters}.csv"
),
threads: 1
resources:
mem_mb=1000,
log:
logs("build_industrial_distribution_key_s{simpl}_{clusters}.log"),
logs("build_industrial_distribution_key_{clusters}.log"),
benchmark:
benchmarks("build_industrial_distribution_key/s{simpl}_{clusters}")
benchmarks("build_industrial_distribution_key/s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -652,26 +652,24 @@ rule build_industrial_distribution_key:
rule build_industrial_production_per_node:
input:
industrial_distribution_key=resources(
"industrial_distribution_key_elec_s{simpl}_{clusters}.csv"
"industrial_distribution_key_base_s_{clusters}.csv"
),
industrial_production_per_country_tomorrow=resources(
"industrial_production_per_country_tomorrow_{planning_horizons}.csv"
),
output:
industrial_production_per_node=resources(
"industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"industrial_production_base_s_{clusters}_{planning_horizons}.csv"
),
threads: 1
resources:
mem_mb=1000,
log:
logs(
"build_industrial_production_per_node_s{simpl}_{clusters}_{planning_horizons}.log"
),
logs("build_industrial_production_per_node_{clusters}_{planning_horizons}.log"),
benchmark:
(
benchmarks(
"build_industrial_production_per_node/s{simpl}_{clusters}_{planning_horizons}"
"build_industrial_production_per_node/s_{clusters}_{planning_horizons}"
)
)
conda:
@ -686,26 +684,26 @@ rule build_industrial_energy_demand_per_node:
"industry_sector_ratios_{planning_horizons}.csv"
),
industrial_production_per_node=resources(
"industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"industrial_production_base_s_{clusters}_{planning_horizons}.csv"
),
industrial_energy_demand_per_node_today=resources(
"industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv"
"industrial_energy_demand_today_base_s_{clusters}.csv"
),
output:
industrial_energy_demand_per_node=resources(
"industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"industrial_energy_demand_base_s_{clusters}_{planning_horizons}.csv"
),
threads: 1
resources:
mem_mb=1000,
log:
logs(
"build_industrial_energy_demand_per_node_s{simpl}_{clusters}_{planning_horizons}.log"
"build_industrial_energy_demand_per_node_{clusters}_{planning_horizons}.log"
),
benchmark:
(
benchmarks(
"build_industrial_energy_demand_per_node/s{simpl}_{clusters}_{planning_horizons}"
"build_industrial_energy_demand_per_node/s_{clusters}_{planning_horizons}"
)
)
conda:
@ -744,22 +742,22 @@ rule build_industrial_energy_demand_per_country_today:
rule build_industrial_energy_demand_per_node_today:
input:
industrial_distribution_key=resources(
"industrial_distribution_key_elec_s{simpl}_{clusters}.csv"
"industrial_distribution_key_base_s_{clusters}.csv"
),
industrial_energy_demand_per_country_today=resources(
"industrial_energy_demand_per_country_today.csv"
),
output:
industrial_energy_demand_per_node_today=resources(
"industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv"
"industrial_energy_demand_today_base_s_{clusters}.csv"
),
threads: 1
resources:
mem_mb=1000,
log:
logs("build_industrial_energy_demand_per_node_today_s{simpl}_{clusters}.log"),
logs("build_industrial_energy_demand_per_node_today_{clusters}.log"),
benchmark:
benchmarks("build_industrial_energy_demand_per_node_today/s{simpl}_{clusters}")
benchmarks("build_industrial_energy_demand_per_node_today/s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -773,23 +771,23 @@ rule build_retro_cost:
input:
building_stock="data/retro/data_building_stock.csv",
data_tabula="data/bundle/retro/tabula-calculator-calcsetbuilding.csv",
air_temperature=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"),
air_temperature=resources("temp_air_total_base_s_{clusters}.nc"),
u_values_PL="data/retro/u_values_poland.csv",
tax_w="data/retro/electricity_taxes_eu.csv",
construction_index="data/retro/comparative_level_investment.csv",
floor_area_missing="data/retro/floor_area_missing.csv",
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
cost_germany="data/retro/retro_cost_germany.csv",
window_assumptions="data/retro/window_assumptions.csv",
output:
retro_cost=resources("retro_cost_elec_s{simpl}_{clusters}.csv"),
floor_area=resources("floor_area_elec_s{simpl}_{clusters}.csv"),
retro_cost=resources("retro_cost_base_s_{clusters}.csv"),
floor_area=resources("floor_area_base_s_{clusters}.csv"),
resources:
mem_mb=1000,
log:
logs("build_retro_cost_s{simpl}_{clusters}.log"),
logs("build_retro_cost_{clusters}.log"),
benchmark:
benchmarks("build_retro_cost/s{simpl}_{clusters}")
benchmarks("build_retro_cost/s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -801,14 +799,14 @@ rule build_population_weighted_energy_totals:
snapshots=config_provider("snapshots"),
input:
energy_totals=resources("{kind}_totals.csv"),
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
output:
resources("pop_weighted_{kind}_totals_s{simpl}_{clusters}.csv"),
resources("pop_weighted_{kind}_totals_s_{clusters}.csv"),
threads: 1
resources:
mem_mb=2000,
log:
logs("build_population_weighted_{kind}_totals_s{simpl}_{clusters}.log"),
logs("build_population_weighted_{kind}_totals_{clusters}.log"),
conda:
"../envs/environment.yaml"
script:
@ -819,17 +817,17 @@ rule build_shipping_demand:
input:
ports="data/attributed_ports.json",
scope=resources("europe_shape.geojson"),
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions=resources("regions_onshore_base_s_{clusters}.geojson"),
demand=resources("energy_totals.csv"),
params:
energy_totals_year=config_provider("energy", "energy_totals_year"),
output:
resources("shipping_demand_s{simpl}_{clusters}.csv"),
resources("shipping_demand_s_{clusters}.csv"),
threads: 1
resources:
mem_mb=2000,
log:
logs("build_shipping_demand_s{simpl}_{clusters}.log"),
logs("build_shipping_demand_s_{clusters}.log"),
conda:
"../envs/environment.yaml"
script:
@ -843,24 +841,24 @@ rule build_transport_demand:
sector=config_provider("sector"),
energy_totals_year=config_provider("energy", "energy_totals_year"),
input:
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
pop_weighted_energy_totals=resources(
"pop_weighted_energy_totals_s{simpl}_{clusters}.csv"
"pop_weighted_energy_totals_s_{clusters}.csv"
),
transport_data=resources("transport_data.csv"),
traffic_data_KFZ="data/bundle/emobility/KFZ__count",
traffic_data_Pkw="data/bundle/emobility/Pkw__count",
temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"),
temp_air_total=resources("temp_air_total_base_s_{clusters}.nc"),
output:
transport_demand=resources("transport_demand_s{simpl}_{clusters}.csv"),
transport_data=resources("transport_data_s{simpl}_{clusters}.csv"),
avail_profile=resources("avail_profile_s{simpl}_{clusters}.csv"),
dsm_profile=resources("dsm_profile_s{simpl}_{clusters}.csv"),
transport_demand=resources("transport_demand_s_{clusters}.csv"),
transport_data=resources("transport_data_s_{clusters}.csv"),
avail_profile=resources("avail_profile_s_{clusters}.csv"),
dsm_profile=resources("dsm_profile_s_{clusters}.csv"),
threads: 1
resources:
mem_mb=2000,
log:
logs("build_transport_demand_s{simpl}_{clusters}.log"),
logs("build_transport_demand_s_{clusters}.log"),
conda:
"../envs/environment.yaml"
script:
@ -873,16 +871,16 @@ rule build_district_heat_share:
energy_totals_year=config_provider("energy", "energy_totals_year"),
input:
district_heat_share=resources("district_heat_share.csv"),
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
output:
district_heat_share=resources(
"district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"district_heat_share_base_s_{clusters}_{planning_horizons}.csv"
),
threads: 1
resources:
mem_mb=1000,
log:
logs("build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log"),
logs("build_district_heat_share_{clusters}_{planning_horizons}.log"),
conda:
"../envs/environment.yaml"
script:
@ -896,27 +894,27 @@ rule build_existing_heating_distribution:
existing_capacities=config_provider("existing_capacities"),
input:
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
clustered_pop_energy_layout=resources(
"pop_weighted_energy_totals_s{simpl}_{clusters}.csv"
"pop_weighted_energy_totals_s_{clusters}.csv"
),
district_heat_share=resources(
"district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"district_heat_share_base_s_{clusters}_{planning_horizons}.csv"
),
output:
existing_heating_distribution=resources(
"existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"existing_heating_distribution_base_s_{clusters}_{planning_horizons}.csv"
),
threads: 1
resources:
mem_mb=2000,
log:
logs(
"build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log"
"build_existing_heating_distribution_base_s_{clusters}_{planning_horizons}.log"
),
benchmark:
benchmarks(
"build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}"
"build_existing_heating_distribution/base_s_{clusters}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -930,28 +928,28 @@ rule time_aggregation:
drop_leap_day=config_provider("enable", "drop_leap_day"),
solver_name=config_provider("solving", "solver", "name"),
input:
network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
network=resources("networks/base_s_{clusters}_elec_l{ll}_{opts}.nc"),
hourly_heat_demand_total=lambda w: (
resources("hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc")
resources("hourly_heat_demand_total_base_s_{clusters}.nc")
if config_provider("sector", "heating")(w)
else []
),
solar_thermal_total=lambda w: (
resources("solar_thermal_total_elec_s{simpl}_{clusters}.nc")
resources("solar_thermal_total_base_s_{clusters}.nc")
if config_provider("sector", "solar_thermal")(w)
else []
),
output:
snapshot_weightings=resources(
"snapshot_weightings_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.csv"
"snapshot_weightings_base_s_{clusters}_elec_l{ll}_{opts}.csv"
),
threads: 1
resources:
mem_mb=5000,
log:
logs("time_aggregation_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"),
logs("time_aggregation_base_s_{clusters}_elec_l{ll}_{opts}.log"),
benchmark:
benchmarks("time_aggregation_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}")
benchmarks("time_aggregation_base_s_{clusters}_elec_l{ll}_{opts}")
conda:
"../envs/environment.yaml"
script:
@ -960,7 +958,7 @@ rule time_aggregation:
def input_profile_offwind(w):
return {
f"profile_{tech}": resources(f"profile_{tech}.nc")
f"profile_{tech}": resources("profile_{clusters}_" + tech + ".nc")
for tech in ["offwind-ac", "offwind-dc", "offwind-float"]
if (tech in config_provider("electricity", "renewable_carriers")(w))
}
@ -973,21 +971,21 @@ rule build_egs_potentials:
costs=config_provider("costs"),
input:
egs_cost="data/egs_costs.json",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions=resources("regions_onshore_base_s_{clusters}.geojson"),
air_temperature=(
resources("temp_air_total_elec_s{simpl}_{clusters}.nc")
resources("temp_air_total_base_s_{clusters}.nc")
if config_provider("sector", "enhanced_geothermal", "var_cf")
else []
),
output:
egs_potentials=resources("egs_potentials_s{simpl}_{clusters}.csv"),
egs_overlap=resources("egs_overlap_s{simpl}_{clusters}.csv"),
egs_capacity_factors=resources("egs_capacity_factors_s{simpl}_{clusters}.csv"),
egs_potentials=resources("egs_potentials_{clusters}.csv"),
egs_overlap=resources("egs_overlap_{clusters}.csv"),
egs_capacity_factors=resources("egs_capacity_factors_{clusters}.csv"),
threads: 1
resources:
mem_mb=2000,
log:
logs("build_egs_potentials_s{simpl}_{clusters}.log"),
logs("build_egs_potentials_{clusters}.log"),
conda:
"../envs/environment.yaml"
script:
@ -1005,6 +1003,7 @@ rule prepare_sector_network:
costs=config_provider("costs"),
sector=config_provider("sector"),
industry=config_provider("industry"),
renewable=config_provider("renewable"),
lines=config_provider("lines"),
pypsa_eur=config_provider("pypsa_eur"),
length_factor=config_provider("lines", "length_factor"),
@ -1022,15 +1021,15 @@ rule prepare_sector_network:
**rules.cluster_gas_network.output,
**rules.build_gas_input_locations.output,
snapshot_weightings=resources(
"snapshot_weightings_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.csv"
"snapshot_weightings_base_s_{clusters}_elec_l{ll}_{opts}.csv"
),
retro_cost=lambda w: (
resources("retro_cost_elec_s{simpl}_{clusters}.csv")
resources("retro_cost_base_s_{clusters}.csv")
if config_provider("sector", "retrofitting", "retro_endogen")(w)
else []
),
floor_area=lambda w: (
resources("floor_area_elec_s{simpl}_{clusters}.csv")
resources("floor_area_base_s_{clusters}.csv")
if config_provider("sector", "retrofitting", "retro_endogen")(w)
else []
),
@ -1041,96 +1040,91 @@ rule prepare_sector_network:
else []
),
sequestration_potential=lambda w: (
resources("co2_sequestration_potential_elec_s{simpl}_{clusters}.csv")
resources("co2_sequestration_potential_base_s_{clusters}.csv")
if config_provider(
"sector", "regional_co2_sequestration_potential", "enable"
)(w)
else []
),
network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
network=resources("networks/base_s_{clusters}_elec_l{ll}_{opts}.nc"),
eurostat="data/eurostat/Balances-April2023",
pop_weighted_energy_totals=resources(
"pop_weighted_energy_totals_s{simpl}_{clusters}.csv"
"pop_weighted_energy_totals_s_{clusters}.csv"
),
pop_weighted_heat_totals=resources(
"pop_weighted_heat_totals_s{simpl}_{clusters}.csv"
),
shipping_demand=resources("shipping_demand_s{simpl}_{clusters}.csv"),
transport_demand=resources("transport_demand_s{simpl}_{clusters}.csv"),
transport_data=resources("transport_data_s{simpl}_{clusters}.csv"),
avail_profile=resources("avail_profile_s{simpl}_{clusters}.csv"),
dsm_profile=resources("dsm_profile_s{simpl}_{clusters}.csv"),
pop_weighted_heat_totals=resources("pop_weighted_heat_totals_s_{clusters}.csv"),
shipping_demand=resources("shipping_demand_s_{clusters}.csv"),
transport_demand=resources("transport_demand_s_{clusters}.csv"),
transport_data=resources("transport_data_s_{clusters}.csv"),
avail_profile=resources("avail_profile_s_{clusters}.csv"),
dsm_profile=resources("dsm_profile_s_{clusters}.csv"),
co2_totals_name=resources("co2_totals.csv"),
co2="data/bundle/eea/UNFCCC_v23.csv",
biomass_potentials=lambda w: (
resources(
"biomass_potentials_s{simpl}_{clusters}_"
"biomass_potentials_s_{clusters}_"
+ "{}.csv".format(config_provider("biomass", "year")(w))
)
if config_provider("foresight")(w) == "overnight"
else resources(
"biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv"
)
else resources("biomass_potentials_s_{clusters}_{planning_horizons}.csv")
),
costs=lambda w: (
resources("costs_{}.csv".format(config_provider("costs", "year")(w)))
if config_provider("foresight")(w) == "overnight"
else resources("costs_{planning_horizons}.csv")
),
h2_cavern=resources("salt_cavern_potentials_s{simpl}_{clusters}.csv"),
busmap_s=resources("busmap_elec_s{simpl}.csv"),
busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
simplified_pop_layout=resources("pop_layout_elec_s{simpl}.csv"),
h2_cavern=resources("salt_cavern_potentials_s_{clusters}.csv"),
busmap_s=resources("busmap_base_s.csv"),
busmap=resources("busmap_base_s_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
industrial_demand=resources(
"industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"industrial_energy_demand_base_s_{clusters}_{planning_horizons}.csv"
),
hourly_heat_demand_total=resources(
"hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc"
"hourly_heat_demand_total_base_s_{clusters}.nc"
),
industrial_production=resources(
"industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"industrial_production_base_s_{clusters}_{planning_horizons}.csv"
),
district_heat_share=resources(
"district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"district_heat_share_base_s_{clusters}_{planning_horizons}.csv"
),
heating_efficiencies=resources("heating_efficiencies.csv"),
temp_soil_total=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"),
temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"),
cop_profiles=resources("cop_profiles_elec_s{simpl}_{clusters}.nc"),
temp_soil_total=resources("temp_soil_total_base_s_{clusters}.nc"),
temp_air_total=resources("temp_air_total_base_s_{clusters}.nc"),
cop_profiles=resources("cop_profiles_base_s_{clusters}.nc"),
solar_thermal_total=lambda w: (
resources("solar_thermal_total_elec_s{simpl}_{clusters}.nc")
resources("solar_thermal_total_base_s_{clusters}.nc")
if config_provider("sector", "solar_thermal")(w)
else []
),
egs_potentials=lambda w: (
resources("egs_potentials_s{simpl}_{clusters}.csv")
resources("egs_potentials_{clusters}.csv")
if config_provider("sector", "enhanced_geothermal", "enable")(w)
else []
),
egs_overlap=lambda w: (
resources("egs_overlap_s{simpl}_{clusters}.csv")
resources("egs_overlap_{clusters}.csv")
if config_provider("sector", "enhanced_geothermal", "enable")(w)
else []
),
egs_capacity_factors=lambda w: (
resources("egs_capacity_factors_s{simpl}_{clusters}.csv")
resources("egs_capacity_factors_{clusters}.csv")
if config_provider("sector", "enhanced_geothermal", "enable")(w)
else []
),
output:
RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "prenetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
threads: 1
resources:
mem_mb=2000,
log:
RESULTS
+ "logs/prepare_sector_network_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "logs/prepare_sector_network_base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
RESULTS
+ "benchmarks/prepare_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/prepare_sector_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"

View File

@ -6,7 +6,6 @@
localrules:
all,
cluster_networks,
extra_components_networks,
prepare_elec_networks,
prepare_sector_networks,
solve_elec_networks,
@ -16,16 +15,7 @@ localrules:
rule cluster_networks:
input:
expand(
resources("networks/elec_s{simpl}_{clusters}.nc"),
**config["scenario"],
run=config["run"]["name"],
),
rule extra_components_networks:
input:
expand(
resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
resources("networks/base_s_{clusters}.nc"),
**config["scenario"],
run=config["run"]["name"],
),
@ -34,7 +24,7 @@ rule extra_components_networks:
rule prepare_elec_networks:
input:
expand(
resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
resources("networks/base_s_{clusters}_elec_l{ll}_{opts}.nc"),
**config["scenario"],
run=config["run"]["name"],
),
@ -44,7 +34,7 @@ rule prepare_sector_networks:
input:
expand(
RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "prenetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"],
run=config["run"]["name"],
),
@ -53,7 +43,7 @@ rule prepare_sector_networks:
rule solve_elec_networks:
input:
expand(
RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
RESULTS + "networks/base_s_{clusters}_elec_l{ll}_{opts}.nc",
**config["scenario"],
run=config["run"]["name"],
),
@ -63,7 +53,7 @@ rule solve_sector_networks:
input:
expand(
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"],
run=config["run"]["name"],
),
@ -73,7 +63,7 @@ rule solve_sector_networks_perfect:
input:
expand(
RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
+ "maps/base_s_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config["scenario"],
run=config["run"]["name"],
),
@ -82,14 +72,13 @@ rule solve_sector_networks_perfect:
rule validate_elec_networks:
input:
expand(
RESULTS
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
RESULTS + "figures/.statistics_plots_base_s_{clusters}_elec_l{ll}_{opts}",
**config["scenario"],
run=config["run"]["name"],
),
expand(
RESULTS
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
+ "figures/.validation_{kind}_plots_base_s_{clusters}_elec_l{ll}_{opts}",
**config["scenario"],
run=config["run"]["name"],
kind=["production", "prices", "cross_border"],

View File

@ -98,9 +98,7 @@ def memory(w):
if m is not None:
factor *= int(m.group(1)) / 8760
break
if w.clusters.endswith("m") or w.clusters.endswith("c"):
return int(factor * (55000 + 600 * int(w.clusters[:-1])))
elif w.clusters == "all":
if w.clusters == "all":
return int(factor * (18000 + 180 * 4000))
else:
return int(factor * (10000 + 195 * int(w.clusters)))
@ -144,7 +142,7 @@ def solved_previous_horizon(w):
return (
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ planning_horizon_p
+ ".nc"
)

View File

@ -9,17 +9,15 @@ if config["foresight"] != "perfect":
params:
plotting=config_provider("plotting"),
input:
network=resources("networks/elec_s{simpl}_{clusters}.nc"),
regions_onshore=resources(
"regions_onshore_elec_s{simpl}_{clusters}.geojson"
),
network=resources("networks/base_s_{clusters}.nc"),
regions_onshore=resources("regions_onshore_base_s_{clusters}.geojson"),
output:
map=resources("maps/power-network-s{simpl}-{clusters}.pdf"),
map=resources("maps/power-network-s-{clusters}.pdf"),
threads: 1
resources:
mem_mb=4000,
benchmark:
benchmarks("plot_power_network_clustered/elec_s{simpl}_{clusters}")
benchmarks("plot_power_network_clustered/base_s_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -30,21 +28,21 @@ if config["foresight"] != "perfect":
plotting=config_provider("plotting"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=resources("regions_onshore_base_s_{clusters}.geojson"),
output:
map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
+ "maps/base_s_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
threads: 2
resources:
mem_mb=10000,
log:
RESULTS
+ "logs/plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "logs/plot_power_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
RESULTS
+ "benchmarks/plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/plot_power_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -57,21 +55,21 @@ if config["foresight"] != "perfect":
foresight=config_provider("foresight"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=resources("regions_onshore_base_s_{clusters}.geojson"),
output:
map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf",
+ "maps/base_s_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf",
threads: 2
resources:
mem_mb=10000,
log:
RESULTS
+ "logs/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "logs/plot_hydrogen_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
RESULTS
+ "benchmarks/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/plot_hydrogen_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -83,21 +81,21 @@ if config["foresight"] != "perfect":
plotting=config_provider("plotting"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=resources("regions_onshore_base_s_{clusters}.geojson"),
output:
map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf",
+ "maps/base_s_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf",
threads: 2
resources:
mem_mb=10000,
log:
RESULTS
+ "logs/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "logs/plot_gas_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
RESULTS
+ "benchmarks/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/plot_gas_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -110,7 +108,7 @@ if config["foresight"] == "perfect":
def output_map_year(w):
return {
f"map_{year}": RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ "maps/base_s_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ f"{year}.pdf"
for year in config_provider("scenario", "planning_horizons")(w)
}
@ -120,8 +118,8 @@ if config["foresight"] == "perfect":
plotting=config_provider("plotting"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=resources("regions_onshore_base_s_{clusters}.geojson"),
output:
unpack(output_map_year),
threads: 2
@ -144,7 +142,7 @@ rule make_summary:
input:
networks=expand(
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"],
allow_missing=True,
),
@ -158,20 +156,20 @@ rule make_summary:
)
),
ac_plot=expand(
resources("maps/power-network-s{simpl}-{clusters}.pdf"),
resources("maps/power-network-s-{clusters}.pdf"),
**config["scenario"],
allow_missing=True,
),
costs_plot=expand(
RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
+ "maps/base_s_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config["scenario"],
allow_missing=True,
),
h2_plot=lambda w: expand(
(
RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf"
+ "maps/base_s_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf"
if config_provider("sector", "H2_network")(w)
else []
),
@ -181,7 +179,7 @@ rule make_summary:
ch4_plot=lambda w: expand(
(
RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf"
+ "maps/base_s_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf"
if config_provider("sector", "gas_network")(w)
else []
),
@ -260,19 +258,19 @@ STATISTICS_BARPLOTS = [
]
rule plot_elec_statistics:
rule plot_base_statistics:
params:
plotting=config_provider("plotting"),
barplots=STATISTICS_BARPLOTS,
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
network=RESULTS + "networks/base_s_{clusters}_elec_l{ll}_{opts}.nc",
output:
**{
f"{plot}_bar": RESULTS
+ f"figures/statistics_{plot}_bar_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
+ f"figures/statistics_{plot}_bar_base_s_{{clusters}}_elec_l{{ll}}_{{opts}}.pdf"
for plot in STATISTICS_BARPLOTS
},
barplots_touch=RESULTS
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
+ "figures/.statistics_plots_base_s_{clusters}_elec_l{ll}_{opts}",
script:
"../scripts/plot_statistics.py"

View File

@ -13,19 +13,19 @@ rule solve_network:
),
custom_extra_functionality=input_custom_extra_functionality,
input:
network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
network=resources("networks/base_s_{clusters}_elec_l{ll}_{opts}.nc"),
output:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
config=RESULTS + "configs/config.elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.yaml",
network=RESULTS + "networks/base_s_{clusters}_elec_l{ll}_{opts}.nc",
config=RESULTS + "configs/config.base_s_{clusters}_elec_l{ll}_{opts}.yaml",
log:
solver=normpath(
RESULTS
+ "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
+ "logs/solve_network/base_s_{clusters}_elec_l{ll}_{opts}_solver.log"
),
python=RESULTS
+ "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
+ "logs/solve_network/base_s_{clusters}_elec_l{ll}_{opts}_python.log",
benchmark:
(RESULTS + "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}")
(RESULTS + "benchmarks/solve_network/base_s_{clusters}_elec_l{ll}_{opts}")
threads: solver_threads
resources:
mem_mb=memory,
@ -49,20 +49,20 @@ rule solve_operations_network:
),
custom_extra_functionality=input_custom_extra_functionality,
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
network=RESULTS + "networks/base_s_{clusters}_elec_l{ll}_{opts}.nc",
output:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
network=RESULTS + "networks/base_s_{clusters}_elec_l{ll}_{opts}_op.nc",
log:
solver=normpath(
RESULTS
+ "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
+ "logs/solve_operations_network/base_s_{clusters}_elec_l{ll}_{opts}_op_solver.log"
),
python=RESULTS
+ "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
+ "logs/solve_operations_network/base_s_{clusters}_elec_l{ll}_{opts}_op_python.log",
benchmark:
(
RESULTS
+ "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
+ "benchmarks/solve_operations_network/base_s_{clusters}_elec_l{ll}_{opts}"
)
threads: 4
resources:

View File

@ -13,24 +13,24 @@ rule add_existing_baseyear:
energy_totals_year=config_provider("energy", "energy_totals_year"),
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=resources("powerplants.csv"),
busmap_s=resources("busmap_elec_s{simpl}.csv"),
busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
+ "prenetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=resources("powerplants_s_{clusters}.csv"),
busmap_s=resources("busmap_base_s.csv"),
busmap=resources("busmap_base_s_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
costs=lambda w: resources(
"costs_{}.csv".format(
config_provider("scenario", "planning_horizons", 0)(w)
)
),
cop_profiles=resources("cop_profiles_elec_s{simpl}_{clusters}.nc"),
cop_profiles=resources("cop_profiles_base_s_{clusters}.nc"),
existing_heating_distribution=resources(
"existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"existing_heating_distribution_base_s_{clusters}_{planning_horizons}.csv"
),
heating_efficiencies=resources("heating_efficiencies.csv"),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "prenetworks-brownfield/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
wildcard_constraints:
# TODO: The first planning_horizon needs to be aligned across scenarios
# snakemake does not support passing functions to wildcard_constraints
@ -41,11 +41,11 @@ rule add_existing_baseyear:
mem_mb=2000,
log:
RESULTS
+ "logs/add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "logs/add_existing_baseyear_base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
RESULTS
+ "benchmarks/add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/add_existing_baseyear/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -55,7 +55,7 @@ rule add_existing_baseyear:
def input_profile_tech_brownfield(w):
return {
f"profile_{tech}": resources(f"profile_{tech}.nc")
f"profile_{tech}": resources("profile_{clusters}_" + tech + ".nc")
for tech in config_provider("electricity", "renewable_carriers")(w)
if tech != "hydro"
}
@ -74,26 +74,26 @@ rule add_brownfield:
heat_pump_sources=config_provider("sector", "heat_pump_sources"),
input:
unpack(input_profile_tech_brownfield),
simplify_busmap=resources("busmap_elec_s{simpl}.csv"),
cluster_busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
simplify_busmap=resources("busmap_base_s.csv"),
cluster_busmap=resources("busmap_base_s_{clusters}.csv"),
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "prenetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
network_p=solved_previous_horizon, #solved network at previous time step
costs=resources("costs_{planning_horizons}.csv"),
cop_profiles=resources("cop_profiles_elec_s{simpl}_{clusters}.nc"),
cop_profiles=resources("cop_profiles_base_s_{clusters}.nc"),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "prenetworks-brownfield/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
threads: 4
resources:
mem_mb=10000,
log:
RESULTS
+ "logs/add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "logs/add_brownfield_base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
RESULTS
+ "benchmarks/add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/add_brownfield/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -115,22 +115,22 @@ rule solve_sector_network_myopic:
custom_extra_functionality=input_custom_extra_functionality,
input:
network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "prenetworks-brownfield/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs=resources("costs_{planning_horizons}.csv"),
output:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
config=RESULTS
+ "configs/config.elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.yaml",
+ "configs/config.base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.yaml",
shadow:
"shallow"
log:
solver=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
memory=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
python=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
threads: solver_threads
resources:
mem_mb=config_provider("solving", "mem_mb"),
@ -138,7 +138,7 @@ rule solve_sector_network_myopic:
benchmark:
(
RESULTS
+ "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/solve_sector_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"

View File

@ -14,21 +14,21 @@ rule solve_sector_network:
custom_extra_functionality=input_custom_extra_functionality,
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "prenetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
output:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
config=RESULTS
+ "configs/config.elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.yaml",
+ "configs/config.base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.yaml",
shadow:
"shallow"
log:
solver=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
memory=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
python=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
threads: solver_threads
resources:
mem_mb=config_provider("solving", "mem_mb"),
@ -36,7 +36,7 @@ rule solve_sector_network:
benchmark:
(
RESULTS
+ "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/solve_sector_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"

View File

@ -11,25 +11,25 @@ rule add_existing_baseyear:
energy_totals_year=config_provider("energy", "energy_totals_year"),
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=resources("powerplants.csv"),
busmap_s=resources("busmap_elec_s{simpl}.csv"),
busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
+ "prenetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=resources("powerplants_s_{clusters}.csv"),
busmap_s=resources("busmap_base_s.csv"),
busmap=resources("busmap_base_s_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_base_s_{clusters}.csv"),
costs=lambda w: resources(
"costs_{}.csv".format(
config_provider("scenario", "planning_horizons", 0)(w)
)
),
cop_profiles=resources("cop_profiles_elec_s{simpl}_{clusters}.nc"),
cop_profiles=resources("cop_profiles_base_s_{clusters}.nc"),
existing_heating_distribution=resources(
"existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
"existing_heating_distribution_base_s_{clusters}_{planning_horizons}.csv"
),
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
heating_efficiencies=resources("heating_efficiencies.csv"),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
+ "prenetworks-brownfield/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
wildcard_constraints:
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
threads: 1
@ -38,11 +38,11 @@ rule add_existing_baseyear:
runtime=config_provider("solving", "runtime", default="24h"),
log:
logs(
"add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
"add_existing_baseyear_base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
benchmark:
benchmarks(
"add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
"add_existing_baseyear/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -53,7 +53,7 @@ rule add_existing_baseyear:
def input_network_year(w):
return {
f"network_{year}": RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
+ "prenetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}"
+ f"_{year}.nc"
for year in config_provider("scenario", "planning_horizons")(w)[1:]
}
@ -68,25 +68,21 @@ rule prepare_perfect_foresight:
brownfield_network=lambda w: (
RESULTS
+ "prenetworks-brownfield/"
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "base_s_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "{}.nc".format(
str(config_provider("scenario", "planning_horizons", 0)(w))
)
),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
+ "prenetworks-brownfield/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 2
resources:
mem_mb=10000,
log:
logs(
"prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log"
),
logs("prepare_perfect_foresight_{clusters}_l{ll}_{opts}_{sector_opts}.log"),
benchmark:
benchmarks(
"prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
)
benchmarks("prepare_perfect_foresight_{clusters}_l{ll}_{opts}_{sector_opts}")
conda:
"../envs/environment.yaml"
script:
@ -105,13 +101,13 @@ rule solve_sector_network_perfect:
custom_extra_functionality=input_custom_extra_functionality,
input:
network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
+ "prenetworks-brownfield/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
costs=resources("costs_2030.csv"),
output:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
+ "postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
config=RESULTS
+ "configs/config.elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.yaml",
+ "configs/config.base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.yaml",
threads: solver_threads
resources:
mem_mb=config_provider("solving", "mem"),
@ -119,15 +115,15 @@ rule solve_sector_network_perfect:
"shallow"
log:
solver=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_solver.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_solver.log",
python=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_python.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_python.log",
memory=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
+ "logs/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
benchmark:
(
RESULTS
+ "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
+ "benchmarks/solve_sector_network/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
)
conda:
"../envs/environment.yaml"
@ -137,9 +133,8 @@ rule solve_sector_network_perfect:
def input_networks_make_summary_perfect(w):
return {
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in config_provider("scenario", "simpl")(w)
f"networks_s_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/base_s_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for clusters in config_provider("scenario", "clusters")(w)
for opts in config_provider("scenario", "opts")(w)
for sector_opts in config_provider("scenario", "sector_opts")(w)

View File

@ -69,16 +69,16 @@ rule build_electricity_prices:
rule plot_validation_electricity_production:
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
network=RESULTS + "networks/base_s_{clusters}_elec_l{ll}_{opts}.nc",
electricity_production=resources("historical_electricity_production.csv"),
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
+ f"figures/validation_{plot}_base_s_{{clusters}}_elec_l{{ll}}_{{opts}}.pdf"
for plot in PRODUCTION_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_production_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
+ "figures/.validation_production_plots_base_s_{clusters}_elec_l{ll}_{opts}",
script:
"../scripts/plot_validation_electricity_production.py"
@ -87,31 +87,31 @@ rule plot_validation_cross_border_flows:
params:
countries=config_provider("countries"),
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
network=RESULTS + "networks/base_s_{clusters}_elec_l{ll}_{opts}.nc",
cross_border_flows=resources("historical_cross_border_flows.csv"),
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
+ f"figures/validation_{plot}_base_s_{{clusters}}_elec_l{{ll}}_{{opts}}.pdf"
for plot in CROSS_BORDER_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_cross_border_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
+ "figures/.validation_cross_border_plots_base_s_{clusters}_elec_l{ll}_{opts}",
script:
"../scripts/plot_validation_cross_border_flows.py"
rule plot_validation_electricity_prices:
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
network=RESULTS + "networks/base_s_{clusters}_elec_l{ll}_{opts}.nc",
electricity_prices=resources("historical_electricity_prices.csv"),
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
+ f"figures/validation_{plot}_base_s_{{clusters}}_elec_l{{ll}}_{{opts}}.pdf"
for plot in PRICES_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_prices_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
+ "figures/.validation_prices_plots_base_s_{clusters}_elec_l{ll}_{opts}",
script:
"../scripts/plot_validation_electricity_prices.py"

View File

@ -170,14 +170,6 @@ def adjust_renewable_profiles(n, input_profiles, params, year):
using the latest year below or equal to the selected year.
"""
# spatial clustering
cluster_busmap = pd.read_csv(snakemake.input.cluster_busmap, index_col=0).squeeze()
simplify_busmap = pd.read_csv(
snakemake.input.simplify_busmap, index_col=0
).squeeze()
clustermaps = simplify_busmap.map(cluster_busmap)
clustermaps.index = clustermaps.index.astype(str)
# temporal clustering
dr = get_snapshots(params["snapshots"], params["drop_leap_day"])
snapshotmaps = (
@ -202,11 +194,6 @@ def adjust_renewable_profiles(n, input_profiles, params, year):
.transpose("time", "bus")
.to_pandas()
)
# spatial clustering
weight = ds["weight"].sel(year=closest_year).to_pandas()
weight = weight.groupby(clustermaps).transform(normed_or_uniform)
p_max_pu = (p_max_pu * weight).T.groupby(clustermaps).sum().T
p_max_pu.columns = p_max_pu.columns + f" {carrier}"
# temporal_clustering
@ -222,7 +209,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"add_brownfield",
simpl="",
clusters="37",
opts="",
ll="v1.0",

View File

@ -3,7 +3,8 @@
#
# SPDX-License-Identifier: MIT
"""
Adds electrical generators and existing hydro storage units to a base network.
Adds existing electrical generators, hydro-electric plants as well as
greenfield and battery and hydrogen storage to the clustered network.
Relevant Settings
-----------------
@ -11,19 +12,11 @@ Relevant Settings
.. code:: yaml
costs:
year:
version:
dicountrate:
emission_prices:
year: version: dicountrate: emission_prices:
electricity:
max_hours:
marginal_cost:
capital_cost:
conventional_carriers:
co2limit:
extendable_carriers:
estimate_renewable_capacities:
max_hours: marginal_cost: capital_cost: conventional_carriers: co2limit:
extendable_carriers: estimate_renewable_capacities:
load:
@ -31,13 +24,14 @@ Relevant Settings
renewable:
hydro:
carriers:
hydro_max_hours:
hydro_capital_cost:
carriers: hydro_max_hours: hydro_capital_cost:
lines:
length_factor:
links:
length_factor:
.. seealso::
Documentation of the configuration file ``config/config.yaml`` at :ref:`costs_cf`,
:ref:`electricity_cf`, :ref:`load_cf`, :ref:`renewable_cf`, :ref:`lines_cf`
@ -45,23 +39,31 @@ Relevant Settings
Inputs
------
- ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``data/hydro_capacities.csv``: Hydropower plant store/discharge power capacities, energy storage capacity, and average hourly inflow by country.
- ``resources/costs.csv``: The database of cost assumptions for all included
technologies for specific years from various sources; e.g. discount rate,
lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable
operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide
intensity.
- ``data/hydro_capacities.csv``: Hydropower plant store/discharge power
capacities, energy storage capacity, and average hourly inflow by country.
.. image:: img/hydrocapacities.png
:scale: 34 %
- ``resources/electricity_demand.csv`` Hourly per-country electricity demand profiles.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/electricity_demand_base_s.nc`` Hourly nodal electricity demand
profiles.
- ``resources/regions_onshore_base_s_{clusters}.geojson``: confer
:ref:`busregions`
- ``resources/nuts3_shapes.geojson``: confer :ref:`shapes`
- ``resources/powerplants.csv``: confer :ref:`powerplants`
- ``resources/profile_{}.nc``: all technologies in ``config["renewables"].keys()``, confer :ref:`renewableprofiles`.
- ``networks/base.nc``: confer :ref:`base`
- ``resources/powerplants_s_{clusters}.csv``: confer :ref:`powerplants`
- ``resources/profile_{clusters}_{}.nc``: all technologies in
``config["renewables"].keys()``, confer :ref:`renewableprofiles`.
- ``networks/base_s_{clusters}.nc``
Outputs
-------
- ``networks/elec.nc``:
- ``networks/base_s_{clusters}_elec.nc``:
.. image:: img/elec.png
:scale: 33 %
@ -69,29 +71,53 @@ Outputs
Description
-----------
The rule :mod:`add_electricity` ties all the different data inputs from the preceding rules together into a detailed PyPSA network that is stored in ``networks/elec.nc``. It includes:
The rule :mod:`add_electricity` ties all the different data inputs from the
preceding rules together into a detailed PyPSA network that is stored in
``networks/base_s_{clusters}_elec.nc``. It includes:
- today's transmission topology and transfer capacities (optionally including lines which are under construction according to the config settings ``lines: under_construction`` and ``links: under_construction``),
- today's thermal and hydro power generation capacities (for the technologies listed in the config setting ``electricity: conventional_carriers``), and
- today's load time-series (upsampled in a top-down approach according to population and gross domestic product)
- today's transmission topology and transfer capacities (optionally including
lines which are under construction according to the config settings ``lines:
under_construction`` and ``links: under_construction``),
- today's thermal and hydro power generation capacities (for the technologies
listed in the config setting ``electricity: conventional_carriers``), and
- today's load time-series (upsampled in a top-down approach according to
population and gross domestic product)
It further adds extendable ``generators`` with **zero** capacity for
- photovoltaic, onshore and AC- as well as DC-connected offshore wind installations with today's locational, hourly wind and solar capacity factors (but **no** current capacities),
- additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT`` is listed in the config setting ``electricity: extendable_carriers``)
- photovoltaic, onshore and AC- as well as DC-connected offshore wind
installations with today's locational, hourly wind and solar capacity factors
(but **no** current capacities),
- additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT``
is listed in the config setting ``electricity: extendable_carriers``)
Furthermore, it attaches additional extendable components to the clustered
network with **zero** initial capacity:
- ``StorageUnits`` of carrier 'H2' and/or 'battery'. If this option is chosen,
every bus is given an extendable ``StorageUnit`` of the corresponding carrier.
The energy and power capacities are linked through a parameter that specifies
the energy capacity as maximum hours at full dispatch power and is configured
in ``electricity: max_hours:``. This linkage leads to one investment variable
per storage unit. The default ``max_hours`` lead to long-term hydrogen and
short-term battery storage units.
- ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If
this option is chosen, the script adds extra buses with corresponding carrier
where energy ``Stores`` are attached and which are connected to the
corresponding power buses via two links, one each for charging and
discharging. This leads to three investment variables for the energy capacity,
charging and discharging capacity of the storage unit.
"""
import logging
from itertools import product
from pathlib import Path
from typing import Dict, List
import geopandas as gpd
import numpy as np
import pandas as pd
import powerplantmatching as pm
import pypsa
import scipy.sparse as sparse
import xarray as xr
from _helpers import (
configure_logging,
@ -100,7 +126,7 @@ from _helpers import (
update_p_nom_max,
)
from powerplantmatching.export import map_country_bus
from shapely.prepared import prep
from pypsa.clustering.spatial import DEFAULT_ONE_PORT_STRATEGIES, normed_or_uniform
idx = pd.IndexSlice
@ -263,7 +289,20 @@ def load_costs(tech_costs, config, max_hours, Nyears=1.0):
return costs
def load_powerplants(ppl_fn):
def load_and_aggregate_powerplants(
ppl_fn: str,
costs: pd.DataFrame,
consider_efficiency_classes: bool = False,
aggregation_strategies: dict = None,
exclude_carriers: list = None,
) -> pd.DataFrame:
if not aggregation_strategies:
aggregation_strategies = {}
if not exclude_carriers:
exclude_carriers = []
carrier_dict = {
"ocgt": "OCGT",
"ccgt": "CCGT",
@ -271,94 +310,120 @@ def load_powerplants(ppl_fn):
"ccgt, thermal": "CCGT",
"hard coal": "coal",
}
return (
tech_dict = {
"Run-Of-River": "ror",
"Reservoir": "hydro",
"Pumped Storage": "PHS",
}
ppl = (
pd.read_csv(ppl_fn, index_col=0, dtype={"bus": "str"})
.powerplant.to_pypsa_names()
.rename(columns=str.lower)
.replace({"carrier": carrier_dict})
.replace({"carrier": carrier_dict, "technology": tech_dict})
)
# Replace carriers "natural gas" and "hydro" with the respective technology;
# OCGT or CCGT and hydro, PHS, or ror)
ppl["carrier"] = ppl.carrier.where(
~ppl.carrier.isin(["hydro", "natural gas"]), ppl.technology
)
def shapes_to_shapes(orig, dest):
"""
Adopted from vresutils.transfer.Shapes2Shapes()
"""
orig_prepped = list(map(prep, orig))
transfer = sparse.lil_matrix((len(dest), len(orig)), dtype=float)
cost_columns = [
"VOM",
"FOM",
"efficiency",
"capital_cost",
"marginal_cost",
"fuel",
"lifetime",
]
ppl = ppl.join(costs[cost_columns], on="carrier", rsuffix="_r")
for i, j in product(range(len(dest)), range(len(orig))):
if orig_prepped[j].intersects(dest.iloc[i]):
area = orig.iloc[j].intersection(dest.iloc[i]).area
transfer[i, j] = area / dest.iloc[i].area
ppl["efficiency"] = ppl.efficiency.combine_first(ppl.efficiency_r)
ppl["lifetime"] = (ppl.dateout - ppl.datein).fillna(np.inf)
ppl["build_year"] = ppl.datein.fillna(0).astype(int)
ppl["marginal_cost"] = (
ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency
)
return transfer
strategies = {
**DEFAULT_ONE_PORT_STRATEGIES,
**{"country": "first"},
**aggregation_strategies.get("generators", {}),
}
strategies = {k: v for k, v in strategies.items() if k in ppl.columns}
to_aggregate = ~ppl.carrier.isin(exclude_carriers)
df = ppl[to_aggregate].copy()
if consider_efficiency_classes:
for c in df.carrier.unique():
df_c = df.query("carrier == @c")
low = df_c.efficiency.quantile(0.10)
high = df_c.efficiency.quantile(0.90)
if low < high:
labels = ["low", "medium", "high"]
suffix = pd.cut(
df_c.efficiency, bins=[0, low, high, 1], labels=labels
).astype(str)
df.update({"carrier": df_c.carrier + " " + suffix + " efficiency"})
grouper = ["bus", "carrier"]
weights = df.groupby(grouper).p_nom.transform(normed_or_uniform)
for k, v in strategies.items():
if v == "capacity_weighted_average":
df[k] = df[k] * weights
strategies[k] = pd.Series.sum
aggregated = df.groupby(grouper, as_index=False).agg(strategies)
aggregated.index = aggregated.bus + " " + aggregated.carrier
aggregated.build_year = aggregated.build_year.astype(int)
disaggregated = ppl[~to_aggregate][aggregated.columns].copy()
disaggregated.index = (
disaggregated.bus
+ " "
+ disaggregated.carrier
+ " "
+ disaggregated.index.astype(str)
)
return pd.concat([aggregated, disaggregated])
def attach_load(
n, regions, load, nuts3_shapes, gdp_pop_non_nuts3, countries, scaling=1.0
):
substation_lv_i = n.buses.index[n.buses["substation_lv"]]
gdf_regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i)
opsd_load = pd.read_csv(load, index_col=0, parse_dates=True).filter(items=countries)
n: pypsa.Network,
load_fn: str,
busmap_fn: str,
scaling: float = 1.0,
) -> None:
logger.info(f"Load data scaled by factor {scaling}.")
opsd_load *= scaling
nuts3 = gpd.read_file(nuts3_shapes).set_index("index")
def upsample(cntry, group, gdp_pop_non_nuts3):
load = opsd_load[cntry]
if len(group) == 1:
return pd.DataFrame({group.index[0]: load})
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
gdp_n = pd.Series(
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
)
pop_n = pd.Series(
transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index
)
# relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
if cntry in ["UA", "MD"]:
# overwrite factor because nuts3 provides no data for UA+MD
gdp_pop_non_nuts3 = gpd.read_file(gdp_pop_non_nuts3).set_index("Bus")
gdp_pop_non_nuts3 = gdp_pop_non_nuts3.loc[
(gdp_pop_non_nuts3.country == cntry)
& (gdp_pop_non_nuts3.index.isin(substation_lv_i))
]
factors = normed(
0.6 * normed(gdp_pop_non_nuts3["gdp"])
+ 0.4 * normed(gdp_pop_non_nuts3["pop"])
)
return pd.DataFrame(
factors.values * load.values[:, np.newaxis],
index=load.index,
columns=factors.index,
)
load = pd.concat(
[
upsample(cntry, group, gdp_pop_non_nuts3)
for cntry, group in gdf_regions.geometry.groupby(gdf_regions.country)
],
axis=1,
load = (
xr.open_dataarray(load_fn).to_dataframe().squeeze(axis=1).unstack(level="time")
)
n.madd(
"Load", substation_lv_i, bus=substation_lv_i, p_set=load
) # carrier="electricity"
# apply clustering busmap
busmap = pd.read_csv(busmap_fn, dtype=str).set_index("Bus").squeeze()
load = load.groupby(busmap).sum().T
logger.info(f"Load data scaled by factor {scaling}.")
load *= scaling
n.madd("Load", load.columns, bus=load.columns, p_set=load) # carrier="electricity"
def update_transmission_costs(n, costs, length_factor=1.0):
# TODO: line length factor of lines is applied to lines and links.
# Separate the function to distinguish.
def set_transmission_costs(
n: pypsa.Network,
costs: pd.DataFrame,
line_length_factor: float = 1.0,
link_length_factor: float = 1.0,
) -> None:
n.lines["capital_cost"] = (
n.lines["length"] * length_factor * costs.at["HVAC overhead", "capital_cost"]
n.lines["length"]
* line_length_factor
* costs.at["HVAC overhead", "capital_cost"]
)
if n.links.empty:
@ -373,7 +438,7 @@ def update_transmission_costs(n, costs, length_factor=1.0):
costs = (
n.links.loc[dc_b, "length"]
* length_factor
* link_length_factor
* (
(1.0 - n.links.loc[dc_b, "underwater_fraction"])
* costs.at["HVDC overhead", "capital_cost"]
@ -386,13 +451,25 @@ def update_transmission_costs(n, costs, length_factor=1.0):
def attach_wind_and_solar(
n, costs, input_profiles, carriers, extendable_carriers, line_length_factor=1
):
n: pypsa.Network,
costs: pd.DataFrame,
input_profiles: str,
carriers: list | set,
extendable_carriers: list | set,
line_length_factor: float = 1.0,
landfall_lengths: dict = None,
) -> None:
add_missing_carriers(n, carriers)
if landfall_lengths is None:
landfall_lengths = {}
for car in carriers:
if car == "hydro":
continue
landfall_length = landfall_lengths.get(car, 0.0)
with xr.open_dataset(getattr(input_profiles, "profile_" + car)) as ds:
if ds.indexes["bus"].empty:
continue
@ -403,17 +480,15 @@ def attach_wind_and_solar(
supcar = car.split("-", 2)[0]
if supcar == "offwind":
underwater_fraction = ds["underwater_fraction"].to_pandas()
connection_cost = (
line_length_factor
* ds["average_distance"].to_pandas()
* (
underwater_fraction
* costs.at[car + "-connection-submarine", "capital_cost"]
+ (1.0 - underwater_fraction)
* costs.at[car + "-connection-underground", "capital_cost"]
)
distance = ds["average_distance"].to_pandas()
submarine_cost = costs.at[car + "-connection-submarine", "capital_cost"]
underground_cost = costs.at[
car + "-connection-underground", "capital_cost"
]
connection_cost = line_length_factor * (
distance * submarine_cost + landfall_length * underground_cost
)
capital_cost = (
costs.at["offwind", "capital_cost"]
+ costs.at[car + "-station", "capital_cost"]
@ -435,7 +510,6 @@ def attach_wind_and_solar(
carrier=car,
p_nom_extendable=car in extendable_carriers["Generator"],
p_nom_max=ds["p_nom_max"].to_pandas(),
weight=ds["weight"].to_pandas(),
marginal_cost=costs.at[supcar, "marginal_cost"],
capital_cost=capital_cost,
efficiency=costs.at[supcar, "efficiency"],
@ -457,19 +531,7 @@ def attach_conventional_generators(
):
carriers = list(set(conventional_carriers) | set(extendable_carriers["Generator"]))
# Replace carrier "natural gas" with the respective technology (OCGT or
# CCGT) to align with PyPSA names of "carriers" and avoid filtering "natural
# gas" powerplants in ppl.query("carrier in @carriers")
ppl.loc[ppl["carrier"] == "natural gas", "carrier"] = ppl.loc[
ppl["carrier"] == "natural gas", "technology"
]
ppl = (
ppl.query("carrier in @carriers")
.join(costs, on="carrier", rsuffix="_r")
.rename(index=lambda s: f"C{str(s)}")
)
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
ppl = ppl.query("carrier in @carriers")
# reduce carriers to those in power plant dataset
carriers = list(set(carriers) & set(ppl.carrier.unique()))
@ -496,13 +558,11 @@ def attach_conventional_generators(
fuel_price.columns = ppl.index
marginal_cost = fuel_price.div(ppl.efficiency).add(ppl.carrier.map(costs.VOM))
else:
marginal_cost = (
ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency
)
marginal_cost = ppl.marginal_cost
# Define generators using modified ppl DataFrame
caps = ppl.groupby("carrier").p_nom.sum().div(1e3).round(2)
logger.info(f"Adding {len(ppl)} generators with capacities [GW] \n{caps}")
logger.info(f"Adding {len(ppl)} generators with capacities [GW]pp \n{caps}")
n.madd(
"Generator",
@ -515,8 +575,8 @@ def attach_conventional_generators(
efficiency=ppl.efficiency,
marginal_cost=marginal_cost,
capital_cost=ppl.capital_cost,
build_year=ppl.datein.fillna(0).astype(int),
lifetime=(ppl.dateout - ppl.datein).fillna(np.inf),
build_year=ppl.build_year,
lifetime=ppl.lifetime,
**committable_attrs,
)
@ -546,14 +606,9 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
add_missing_carriers(n, carriers)
add_co2_emissions(n, costs, carriers)
ppl = (
ppl.query('carrier == "hydro"')
.reset_index(drop=True)
.rename(index=lambda s: f"{str(s)} hydro")
)
ror = ppl.query('technology == "Run-Of-River"')
phs = ppl.query('technology == "Pumped Storage"')
hydro = ppl.query('technology == "Reservoir"')
ror = ppl.query('carrier == "ror"')
phs = ppl.query('carrier == "PHS"')
hydro = ppl.query('carrier == "hydro"')
country = ppl["bus"].map(n.buses.country).rename("country")
@ -618,7 +673,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
if "hydro" in carriers and not hydro.empty:
hydro_max_hours = params.get("hydro_max_hours")
assert hydro_max_hours is not None, "No path for hydro capacities given."
assert hydro_capacities is not None, "No path for hydro capacities given."
hydro_stats = pd.read_csv(
hydro_capacities, comment="#", na_values="-", index_col=0
@ -626,7 +681,13 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6
e_installed = hydro.eval("p_nom * max_hours").groupby(hydro.country).sum()
e_missing = e_target - e_installed
missing_mh_i = hydro.query("max_hours.isnull()").index
missing_mh_i = hydro.query("max_hours.isnull() or max_hours == 0").index
# some countries may have missing storage capacity but only one plant
# which needs to be scaled to the target storage capacity
missing_mh_single_i = hydro.index[
~hydro.country.duplicated() & hydro.country.isin(e_missing.dropna().index)
]
missing_mh_i = missing_mh_i.union(missing_mh_single_i)
if hydro_max_hours == "energy_capacity_totals_by_country":
# watch out some p_nom values like IE's are totally underrepresented
@ -649,7 +710,8 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
f'Assuming max_hours=6 for hydro reservoirs in the countries: {", ".join(missing_countries)}'
)
hydro_max_hours = hydro.max_hours.where(
hydro.max_hours > 0, hydro.country.map(max_hours_country)
(hydro.max_hours > 0) & ~hydro.index.isin(missing_mh_single_i),
hydro.country.map(max_hours_country),
).fillna(6)
if params.get("flatten_dispatch", False):
@ -775,64 +837,144 @@ def estimate_renewable_capacities(
)
def attach_line_rating(
n, rating, s_max_pu, correction_factor, max_voltage_difference, max_line_rating
):
# TODO: Only considers overhead lines
n.lines_t.s_max_pu = (rating / n.lines.s_nom[rating.columns]) * correction_factor
if max_voltage_difference:
x_pu = (
n.lines.type.map(n.line_types["x_per_length"])
* n.lines.length
/ (n.lines.v_nom**2)
def attach_storageunits(n, costs, extendable_carriers, max_hours):
carriers = extendable_carriers["StorageUnit"]
n.madd("Carrier", carriers)
buses_i = n.buses.index
lookup_store = {"H2": "electrolysis", "battery": "battery inverter"}
lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"}
for carrier in carriers:
roundtrip_correction = 0.5 if carrier == "battery" else 1
n.madd(
"StorageUnit",
buses_i,
" " + carrier,
bus=buses_i,
carrier=carrier,
p_nom_extendable=True,
capital_cost=costs.at[carrier, "capital_cost"],
marginal_cost=costs.at[carrier, "marginal_cost"],
efficiency_store=costs.at[lookup_store[carrier], "efficiency"]
** roundtrip_correction,
efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"]
** roundtrip_correction,
max_hours=max_hours[carrier],
cyclic_state_of_charge=True,
)
# need to clip here as cap values might be below 1
# -> would mean the line cannot be operated at actual given pessimistic ampacity
s_max_pu_cap = (
np.deg2rad(max_voltage_difference) / (x_pu * n.lines.s_nom)
).clip(lower=1)
n.lines_t.s_max_pu = n.lines_t.s_max_pu.clip(
lower=1, upper=s_max_pu_cap, axis=1
)
if max_line_rating:
n.lines_t.s_max_pu = n.lines_t.s_max_pu.clip(upper=max_line_rating)
n.lines_t.s_max_pu *= s_max_pu
def add_transmission_projects(n, transmission_projects):
logger.info(f"Adding transmission projects to network.")
for path in transmission_projects:
path = Path(path)
df = pd.read_csv(path, index_col=0, dtype={"bus0": str, "bus1": str})
if df.empty:
continue
if "new_buses" in path.name:
n.madd("Bus", df.index, **df)
elif "new_lines" in path.name:
n.madd("Line", df.index, **df)
elif "new_links" in path.name:
n.madd("Link", df.index, **df)
elif "adjust_lines":
n.lines.update(df)
elif "adjust_links":
n.links.update(df)
def attach_stores(n, costs, extendable_carriers):
carriers = extendable_carriers["Store"]
n.madd("Carrier", carriers)
buses_i = n.buses.index
if "H2" in carriers:
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", location=buses_i)
n.madd(
"Store",
h2_buses_i,
bus=h2_buses_i,
carrier="H2",
e_nom_extendable=True,
e_cyclic=True,
capital_cost=costs.at["hydrogen storage underground", "capital_cost"],
)
n.madd(
"Link",
h2_buses_i + " Electrolysis",
bus0=buses_i,
bus1=h2_buses_i,
carrier="H2 electrolysis",
p_nom_extendable=True,
efficiency=costs.at["electrolysis", "efficiency"],
capital_cost=costs.at["electrolysis", "capital_cost"],
marginal_cost=costs.at["electrolysis", "marginal_cost"],
)
n.madd(
"Link",
h2_buses_i + " Fuel Cell",
bus0=h2_buses_i,
bus1=buses_i,
carrier="H2 fuel cell",
p_nom_extendable=True,
efficiency=costs.at["fuel cell", "efficiency"],
# NB: fixed cost is per MWel
capital_cost=costs.at["fuel cell", "capital_cost"]
* costs.at["fuel cell", "efficiency"],
marginal_cost=costs.at["fuel cell", "marginal_cost"],
)
if "battery" in carriers:
b_buses_i = n.madd(
"Bus", buses_i + " battery", carrier="battery", location=buses_i
)
n.madd(
"Store",
b_buses_i,
bus=b_buses_i,
carrier="battery",
e_cyclic=True,
e_nom_extendable=True,
capital_cost=costs.at["battery storage", "capital_cost"],
marginal_cost=costs.at["battery", "marginal_cost"],
)
n.madd("Carrier", ["battery charger", "battery discharger"])
n.madd(
"Link",
b_buses_i + " charger",
bus0=buses_i,
bus1=b_buses_i,
carrier="battery charger",
# the efficiencies are "round trip efficiencies"
efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
capital_cost=costs.at["battery inverter", "capital_cost"],
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"],
)
n.madd(
"Link",
b_buses_i + " discharger",
bus0=b_buses_i,
bus1=buses_i,
carrier="battery discharger",
efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"],
)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("add_electricity")
snakemake = mock_snakemake("add_electricity", clusters=100)
configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params
max_hours = params.electricity["max_hours"]
landfall_lengths = {
tech: settings["landfall_length"]
for tech, settings in params.renewable.items()
if "landfall_length" in settings.keys()
}
n = pypsa.Network(snakemake.input.base_network)
if params["transmission_projects"]["enable"]:
add_transmission_projects(n, snakemake.input.transmission_projects)
time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day)
n.set_snapshots(time)
@ -841,22 +983,31 @@ if __name__ == "__main__":
costs = load_costs(
snakemake.input.tech_costs,
params.costs,
params.electricity["max_hours"],
max_hours,
Nyears,
)
ppl = load_powerplants(snakemake.input.powerplants)
ppl = load_and_aggregate_powerplants(
snakemake.input.powerplants,
costs,
params.consider_efficiency_classes,
params.aggregation_strategies,
params.exclude_carriers,
)
attach_load(
n,
snakemake.input.regions,
snakemake.input.load,
snakemake.input.nuts3_shapes,
snakemake.input.get("gdp_pop_non_nuts3"),
params.countries,
snakemake.input.busmap,
params.scaling_factor,
)
update_transmission_costs(n, costs, params.length_factor)
set_transmission_costs(
n,
costs,
params.line_length_factor,
params.link_length_factor,
)
renewable_carriers = set(params.electricity["renewable_carriers"])
extendable_carriers = params.electricity["extendable_carriers"]
@ -896,7 +1047,8 @@ if __name__ == "__main__":
snakemake.input,
renewable_carriers,
extendable_carriers,
params.length_factor,
params.line_length_factor,
landfall_lengths,
)
if "hydro" in renewable_carriers:
@ -933,24 +1085,12 @@ if __name__ == "__main__":
update_p_nom_max(n)
line_rating_config = snakemake.config["lines"]["dynamic_line_rating"]
if line_rating_config["activate"]:
rating = xr.open_dataarray(snakemake.input.line_rating).to_pandas().transpose()
s_max_pu = snakemake.config["lines"]["s_max_pu"]
correction_factor = line_rating_config["correction_factor"]
max_voltage_difference = line_rating_config["max_voltage_difference"]
max_line_rating = line_rating_config["max_line_rating"]
attach_line_rating(
n,
rating,
s_max_pu,
correction_factor,
max_voltage_difference,
max_line_rating,
)
attach_storageunits(n, costs, extendable_carriers, max_hours)
attach_stores(n, costs, extendable_carriers)
sanitize_carriers(n, snakemake.config)
if "location" in n.buses:
sanitize_locations(n)
n.meta = snakemake.config
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -120,7 +120,7 @@ def add_existing_renewables(df_agg, costs):
df_agg.at[name, "DateOut"] = (
year + costs.at[cost_key, "lifetime"] - 1
)
df_agg.at[name, "cluster_bus"] = node
df_agg.at[name, "bus"] = node
def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, baseyear):
@ -135,7 +135,8 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
baseyear : int
"""
logger.debug(
f"Adding power capacities installed before {baseyear} from powerplants.csv"
f"Adding power capacities installed before {baseyear} from"
" powerplants_s_{clusters}.csv"
)
df_agg = pd.read_csv(snakemake.input.powerplants, index_col=0)
@ -184,19 +185,6 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
)
df_agg.loc[biomass_i, "DateOut"] = df_agg.loc[biomass_i, "DateOut"].fillna(dateout)
# assign clustered bus
busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0).squeeze()
busmap = pd.read_csv(snakemake.input.busmap, index_col=0).squeeze()
inv_busmap = {}
for k, v in busmap.items():
inv_busmap[v] = inv_busmap.get(v, []) + [k]
clustermaps = busmap_s.map(busmap)
clustermaps.index = clustermaps.index.astype(int)
df_agg["cluster_bus"] = df_agg.bus.map(clustermaps)
# include renewables in df_agg
add_existing_renewables(df_agg, costs)
@ -225,14 +213,14 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
df = df_agg.pivot_table(
index=["grouping_year", "Fueltype"],
columns="cluster_bus",
columns="bus",
values="Capacity",
aggfunc="sum",
)
lifetime = df_agg.pivot_table(
index=["grouping_year", "Fueltype"],
columns="cluster_bus",
columns="bus",
values="lifetime",
aggfunc="mean", # currently taken mean for clustering lifetimes
)
@ -280,54 +268,23 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
] = capacity.loc[already_build.str.replace(name_suffix, "")].values
new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")]
if "m" in snakemake.wildcards.clusters:
for ind in new_capacity.index:
# existing capacities are split evenly among regions in every country
inv_ind = list(inv_busmap[ind])
p_max_pu = n.generators_t.p_max_pu[capacity.index + name_suffix_by]
# for offshore the splitting only includes coastal regions
inv_ind = [
i for i in inv_ind if (i + name_suffix_by) in n.generators.index
]
p_max_pu = n.generators_t.p_max_pu[
[i + name_suffix_by for i in inv_ind]
]
p_max_pu.columns = [i + name_suffix for i in inv_ind]
n.madd(
"Generator",
[i + name_suffix for i in inv_ind],
bus=ind,
carrier=generator,
p_nom=new_capacity[ind]
/ len(inv_ind), # split among regions in a country
marginal_cost=marginal_cost,
capital_cost=capital_cost,
efficiency=costs.at[cost_key, "efficiency"],
p_max_pu=p_max_pu,
build_year=grouping_year,
lifetime=costs.at[cost_key, "lifetime"],
)
else:
p_max_pu = n.generators_t.p_max_pu[capacity.index + name_suffix_by]
if not new_build.empty:
n.madd(
"Generator",
new_capacity.index,
suffix=name_suffix,
bus=new_capacity.index,
carrier=generator,
p_nom=new_capacity,
marginal_cost=marginal_cost,
capital_cost=capital_cost,
efficiency=costs.at[cost_key, "efficiency"],
p_max_pu=p_max_pu.rename(columns=n.generators.bus),
build_year=grouping_year,
lifetime=costs.at[cost_key, "lifetime"],
)
if not new_build.empty:
n.madd(
"Generator",
new_capacity.index,
suffix=name_suffix,
bus=new_capacity.index,
carrier=generator,
p_nom=new_capacity,
marginal_cost=marginal_cost,
capital_cost=capital_cost,
efficiency=costs.at[cost_key, "efficiency"],
p_max_pu=p_max_pu.rename(columns=n.generators.bus),
build_year=grouping_year,
lifetime=costs.at[cost_key, "lifetime"],
)
else:
bus0 = vars(spatial)[carrier[generator]].nodes
@ -690,7 +647,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"add_existing_baseyear",
configfiles="config/test/config.myopic.yaml",
simpl="",
clusters="5",
ll="v1.5",
opts="",

View File

@ -1,253 +0,0 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Adds extra extendable components to the clustered and simplified network.
Relevant Settings
-----------------
.. code:: yaml
costs:
year:
version:
dicountrate:
emission_prices:
electricity:
max_hours:
marginal_cost:
capital_cost:
extendable_carriers:
StorageUnit:
Store:
.. seealso::
Documentation of the configuration file ``config/config.yaml`` at :ref:`costs_cf`,
:ref:`electricity_cf`
Inputs
------
- ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
Outputs
-------
- ``networks/elec_s{simpl}_{clusters}_ec.nc``:
Description
-----------
The rule :mod:`add_extra_components` attaches additional extendable components to the clustered and simplified network. These can be configured in the ``config/config.yaml`` at ``electricity: extendable_carriers:``. It processes ``networks/elec_s{simpl}_{clusters}.nc`` to build ``networks/elec_s{simpl}_{clusters}_ec.nc``, which in contrast to the former (depending on the configuration) contain with **zero** initial capacity
- ``StorageUnits`` of carrier 'H2' and/or 'battery'. If this option is chosen, every bus is given an extendable ``StorageUnit`` of the corresponding carrier. The energy and power capacities are linked through a parameter that specifies the energy capacity as maximum hours at full dispatch power and is configured in ``electricity: max_hours:``. This linkage leads to one investment variable per storage unit. The default ``max_hours`` lead to long-term hydrogen and short-term battery storage units.
- ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit.
"""
import logging
import numpy as np
import pandas as pd
import pypsa
from _helpers import configure_logging, set_scenario_config
from add_electricity import load_costs, sanitize_carriers, sanitize_locations
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def attach_storageunits(n, costs, extendable_carriers, max_hours):
carriers = extendable_carriers["StorageUnit"]
n.madd("Carrier", carriers)
buses_i = n.buses.index
lookup_store = {"H2": "electrolysis", "battery": "battery inverter"}
lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"}
for carrier in carriers:
roundtrip_correction = 0.5 if carrier == "battery" else 1
n.madd(
"StorageUnit",
buses_i,
" " + carrier,
bus=buses_i,
carrier=carrier,
p_nom_extendable=True,
capital_cost=costs.at[carrier, "capital_cost"],
marginal_cost=costs.at[carrier, "marginal_cost"],
efficiency_store=costs.at[lookup_store[carrier], "efficiency"]
** roundtrip_correction,
efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"]
** roundtrip_correction,
max_hours=max_hours[carrier],
cyclic_state_of_charge=True,
)
def attach_stores(n, costs, extendable_carriers):
carriers = extendable_carriers["Store"]
n.madd("Carrier", carriers)
buses_i = n.buses.index
if "H2" in carriers:
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", location=buses_i)
n.madd(
"Store",
h2_buses_i,
bus=h2_buses_i,
carrier="H2",
e_nom_extendable=True,
e_cyclic=True,
capital_cost=costs.at["hydrogen storage underground", "capital_cost"],
)
n.madd(
"Link",
h2_buses_i + " Electrolysis",
bus0=buses_i,
bus1=h2_buses_i,
carrier="H2 electrolysis",
p_nom_extendable=True,
efficiency=costs.at["electrolysis", "efficiency"],
capital_cost=costs.at["electrolysis", "capital_cost"],
marginal_cost=costs.at["electrolysis", "marginal_cost"],
)
n.madd(
"Link",
h2_buses_i + " Fuel Cell",
bus0=h2_buses_i,
bus1=buses_i,
carrier="H2 fuel cell",
p_nom_extendable=True,
efficiency=costs.at["fuel cell", "efficiency"],
# NB: fixed cost is per MWel
capital_cost=costs.at["fuel cell", "capital_cost"]
* costs.at["fuel cell", "efficiency"],
marginal_cost=costs.at["fuel cell", "marginal_cost"],
)
if "battery" in carriers:
b_buses_i = n.madd(
"Bus", buses_i + " battery", carrier="battery", location=buses_i
)
n.madd(
"Store",
b_buses_i,
bus=b_buses_i,
carrier="battery",
e_cyclic=True,
e_nom_extendable=True,
capital_cost=costs.at["battery storage", "capital_cost"],
marginal_cost=costs.at["battery", "marginal_cost"],
)
n.madd("Carrier", ["battery charger", "battery discharger"])
n.madd(
"Link",
b_buses_i + " charger",
bus0=buses_i,
bus1=b_buses_i,
carrier="battery charger",
# the efficiencies are "round trip efficiencies"
efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
capital_cost=costs.at["battery inverter", "capital_cost"],
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"],
)
n.madd(
"Link",
b_buses_i + " discharger",
bus0=b_buses_i,
bus1=buses_i,
carrier="battery discharger",
efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"],
)
def attach_hydrogen_pipelines(n, costs, extendable_carriers):
as_stores = extendable_carriers.get("Store", [])
if "H2 pipeline" not in extendable_carriers.get("Link", []):
return
assert "H2" in as_stores, (
"Attaching hydrogen pipelines requires hydrogen "
"storage to be modelled as Store-Link-Bus combination. See "
"`config.yaml` at `electricity: extendable_carriers: Store:`."
)
# determine bus pairs
attrs = ["bus0", "bus1", "length"]
candidates = pd.concat(
[n.lines[attrs], n.links.query('carrier=="DC"')[attrs]]
).reset_index(drop=True)
# remove bus pair duplicates regardless of order of bus0 and bus1
h2_links = candidates[
~pd.DataFrame(np.sort(candidates[["bus0", "bus1"]])).duplicated()
]
h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1)
# add pipelines
n.add("Carrier", "H2 pipeline")
n.madd(
"Link",
h2_links.index,
bus0=h2_links.bus0.values + " H2",
bus1=h2_links.bus1.values + " H2",
p_min_pu=-1,
p_nom_extendable=True,
length=h2_links.length.values,
capital_cost=costs.at["H2 pipeline", "capital_cost"] * h2_links.length,
efficiency=costs.at["H2 pipeline", "efficiency"],
carrier="H2 pipeline",
)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)
extendable_carriers = snakemake.params.extendable_carriers
max_hours = snakemake.params.max_hours
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(
snakemake.input.tech_costs, snakemake.params.costs, max_hours, Nyears
)
attach_storageunits(n, costs, extendable_carriers, max_hours)
attach_stores(n, costs, extendable_carriers)
attach_hydrogen_pipelines(n, costs, extendable_carriers)
sanitize_carriers(n, snakemake.config)
if "location" in n.buses:
sanitize_locations(n)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -0,0 +1,106 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Add transmission projects and DLR to the network.
"""
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__)
def attach_transmission_projects(
n: pypsa.Network, transmission_projects: list[str]
) -> None:
logger.info("Adding transmission projects to network.")
for path in transmission_projects:
path = Path(path)
df = pd.read_csv(path, index_col=0, dtype={"bus0": str, "bus1": str})
if df.empty:
continue
if "new_buses" in path.name:
n.madd("Bus", df.index, **df)
elif "new_lines" in path.name:
n.madd("Line", df.index, **df)
elif "new_links" in path.name:
n.madd("Link", df.index, **df)
elif "adjust_lines" in path.name:
n.lines.update(df)
elif "adjust_links" in path.name:
n.links.update(df)
def attach_line_rating(
n: pypsa.Network,
rating: pd.DataFrame,
s_max_pu: float,
correction_factor: float,
max_voltage_difference: float | bool,
max_line_rating: float | bool,
) -> None:
logger.info("Attaching dynamic line rating to network.")
# TODO: Only considers overhead lines
n.lines_t.s_max_pu = (rating / n.lines.s_nom[rating.columns]) * correction_factor
if max_voltage_difference:
x_pu = (
n.lines.type.map(n.line_types["x_per_length"])
* n.lines.length
/ (n.lines.v_nom**2)
)
# need to clip here as cap values might be below 1
# -> would mean the line cannot be operated at actual given pessimistic ampacity
s_max_pu_cap = (
np.deg2rad(max_voltage_difference) / (x_pu * n.lines.s_nom)
).clip(lower=1)
n.lines_t.s_max_pu = n.lines_t.s_max_pu.clip(
lower=1, upper=s_max_pu_cap, axis=1
)
if max_line_rating:
n.lines_t.s_max_pu = n.lines_t.s_max_pu.clip(upper=max_line_rating)
n.lines_t.s_max_pu *= s_max_pu
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("add_transmission_projects_and_dlr")
configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params
n = pypsa.Network(snakemake.input.network)
if params["transmission_projects"]["enable"]:
attach_transmission_projects(n, snakemake.input.transmission_projects)
if params["dlr"]["activate"]:
rating = xr.open_dataarray(snakemake.input.dlr).to_pandas().transpose()
s_max_pu = params["s_max_pu"]
correction_factor = params["dlr"]["correction_factor"]
max_voltage_difference = params["dlr"]["max_voltage_difference"]
max_line_rating = params["dlr"]["max_line_rating"]
attach_line_rating(
n,
rating,
s_max_pu,
correction_factor,
max_voltage_difference,
max_line_rating,
)
n.export_to_netcdf(snakemake.output[0])

View File

@ -344,7 +344,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_biomass_potentials",
simpl="",
clusters="39",
planning_horizons=2050,
)

View File

@ -136,7 +136,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_cop_profiles",
simpl="",
clusters=48,
)

View File

@ -17,11 +17,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_clustered_population_layouts",
simpl="",
clusters=48,
)
snakemake = mock_snakemake("build_clustered_population_layouts", clusters=48)
set_scenario_config(snakemake)

View File

@ -104,7 +104,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_cop_profiles",
simpl="",
clusters=48,
)

View File

@ -27,13 +27,13 @@ Inputs
------
- ``resources/<run_name>/pop_layout_<scope>.nc``: Population layout (spatial population distribution).
- ``resources/<run_name>/regions_onshore_elec_s<simpl>_<clusters>.geojson``: Onshore region shapes.
- ``resources/<run_name>/regions_onshore_base_s<simpl>_<clusters>.geojson``: Onshore region shapes.
- ``cutout``: Weather data cutout, as specified in config
Outputs
-------
- ``resources/daily_heat_demand_<scope>_elec_s<simpl>_<clusters>.nc``:
- ``resources/daily_heat_demand_<scope>_base_s<simpl>_<clusters>.nc``:
Relevant settings
-----------------
@ -58,7 +58,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_daily_heat_demands",
scope="total",
simpl="",
clusters=48,
)
set_scenario_config(snakemake)

View File

@ -44,7 +44,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_district_heat_share",
simpl="",
clusters=60,
planning_horizons="2050",
)

View File

@ -201,7 +201,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_egs_potentials",
simpl="",
clusters=37,
)

View File

@ -0,0 +1,127 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Builds the electricity demand for base regions based on population and GDP.
"""
import logging
from itertools import product
import geopandas as gpd
import numpy as np
import pandas as pd
import pypsa
import scipy.sparse as sparse
import xarray as xr
from _helpers import configure_logging, set_scenario_config
from shapely.prepared import prep
logger = logging.getLogger(__name__)
def normed(s: pd.Series) -> pd.Series:
return s / s.sum()
def shapes_to_shapes(orig: gpd.GeoSeries, dest: gpd.GeoSeries) -> sparse.lil_matrix:
"""
Adopted from vresutils.transfer.Shapes2Shapes()
"""
orig_prepped = list(map(prep, orig))
transfer = sparse.lil_matrix((len(dest), len(orig)), dtype=float)
for i, j in product(range(len(dest)), range(len(orig))):
if orig_prepped[j].intersects(dest.iloc[i]):
area = orig.iloc[j].intersection(dest.iloc[i]).area
transfer[i, j] = area / dest.iloc[i].area
return transfer
def upsample_load(
n: pypsa.Network,
regions_fn: str,
load_fn: str,
nuts3_fn: str,
gdp_pop_non_nuts3_fn: str,
distribution_key: dict[str, float],
) -> pd.DataFrame:
substation_lv_i = n.buses.index[n.buses["substation_lv"]]
gdf_regions = gpd.read_file(regions_fn).set_index("name").reindex(substation_lv_i)
load = pd.read_csv(load_fn, index_col=0, parse_dates=True)
nuts3 = gpd.read_file(nuts3_fn).set_index("index")
gdp_weight = distribution_key.get("gdp", 0.6)
pop_weight = distribution_key.get("pop", 0.4)
data_arrays = []
for cntry, group in gdf_regions.geometry.groupby(gdf_regions.country):
load_ct = load[cntry]
if cntry in ["UA", "MD"]:
# separate handling because nuts3 provides no data for UA+MD
gdp_pop_non_nuts3 = gpd.read_file(gdp_pop_non_nuts3_fn).set_index("Bus")
gdp_pop_non_nuts3 = gdp_pop_non_nuts3.loc[
(gdp_pop_non_nuts3.country == cntry)
& (gdp_pop_non_nuts3.index.isin(substation_lv_i))
]
factors = normed(
gdp_weight * normed(gdp_pop_non_nuts3["gdp"])
+ pop_weight * normed(gdp_pop_non_nuts3["pop"])
)
elif len(group) == 1:
factors = pd.Series(1.0, index=group.index)
else:
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
gdp_n = pd.Series(
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
)
pop_n = pd.Series(
transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index
)
factors = normed(gdp_weight * normed(gdp_n) + pop_weight * normed(pop_n))
data_arrays.append(
xr.DataArray(
factors.values * load_ct.values[:, np.newaxis],
dims=["time", "bus"],
coords={"time": load_ct.index.values, "bus": factors.index.values},
)
)
return xr.concat(data_arrays, dim="bus")
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_electricity_demand_base")
configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params
n = pypsa.Network(snakemake.input.base_network)
load = upsample_load(
n,
regions_fn=snakemake.input.regions,
load_fn=snakemake.input.load,
nuts3_fn=snakemake.input.nuts3,
gdp_pop_non_nuts3_fn=snakemake.input.get("gdp_pop_non_nuts3"),
distribution_key=params.distribution_key,
)
load.name = "electricity demand (MW)"
comp = dict(zlib=True, complevel=9, least_significant_digit=5)
load.to_netcdf(snakemake.output[0], encoding={load.name: comp})

View File

@ -14,11 +14,11 @@ Inputs:
- Existing heating generators: `data/existing_heating_raw.csv` per country
- Population layout: `resources/{run_name}/pop_layout_s<simpl>_<clusters>.csv`. Output of `scripts/build_clustered_population_layout.py`
- Population layout with energy demands: `resources/<run_name>/pop_weighted_energy_totals_s<simpl>_<clusters>.csv`
- District heating share: `resources/<run_name>/district_heat_share_elec_s<simpl>_<clusters>_<planning_horizons>.csv`
- District heating share: `resources/<run_name>/district_heat_share_base_s<simpl>_<clusters>_<planning_horizons>.csv`
Outputs:
--------
- Existing heat generation capacities distributed to nodes: `resources/{run_name}/existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv`
- Existing heat generation capacities distributed to nodes: `resources/{run_name}/existing_heating_distribution_base_s_{clusters}_{planning_horizons}.csv`
Relevant settings:
------------------
@ -154,7 +154,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_existing_heating_distribution",
simpl="",
clusters=48,
planning_horizons=2050,
)

View File

@ -141,7 +141,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_gas_input_locations",
simpl="",
clusters="128",
)

View File

@ -42,11 +42,7 @@ def calc_gdp_pop(country, regions, gdp_non_nuts3, pop_non_nuts3):
- gdp: A GeoDataFrame with the mean GDP p.c. values mapped to each bus.
- pop: A GeoDataFrame with the summed POP values mapped to each bus.
"""
regions = (
regions.rename(columns={"name": "Bus"})
.drop(columns=["x", "y"])
.set_index("Bus")
)
regions = regions.rename(columns={"name": "Bus"}).set_index("Bus")
regions = regions[regions.country == country]
# Create a bounding box for UA, MD from region shape, including a buffer of 10000 metres
bounding_box = (

View File

@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Aggregate all rastered cutout data to base regions Voronoi cells.
"""
import logging
import atlite
import geopandas as gpd
from _helpers import get_snapshots, set_scenario_config
from atlite.aggregate import aggregate_matrix
from dask.distributed import Client
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_hac_features")
set_scenario_config(snakemake)
params = snakemake.params
nprocesses = int(snakemake.threads)
if nprocesses > 1:
client = Client(n_workers=nprocesses, threads_per_worker=1)
else:
client = None
time = get_snapshots(params.snapshots, params.drop_leap_day)
cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time)
regions = gpd.read_file(snakemake.input.regions).set_index("name")
I = cutout.indicatormatrix(regions) # noqa: E741
ds = cutout.data[params.features].map(
aggregate_matrix, matrix=I, index=regions.index
)
ds = ds.load(scheduler=client)
ds.to_netcdf(snakemake.output[0])

View File

@ -22,12 +22,12 @@ Inputs
------
- ``data/heat_load_profile_BDEW.csv``: Intraday heat profile for water and space heating demand for the residential and services sectors for weekends and weekdays.
- ``resources/daily_heat_demand_total_elec_s<simpl>_<clusters>.nc``: Daily heat demand per cluster.
- ``resources/daily_heat_demand_total_base_s<simpl>_<clusters>.nc``: Daily heat demand per cluster.
Outputs
-------
- ``resources/hourly_heat_demand_total_elec_s<simpl>_<clusters>.nc``:
- ``resources/hourly_heat_demand_total_base_s<simpl>_<clusters>.nc``:
"""
from itertools import product
@ -43,7 +43,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_hourly_heat_demand",
scope="total",
simpl="",
clusters=5,
)
set_scenario_config(snakemake)

View File

@ -8,13 +8,13 @@ Build spatial distribution of industries from Hotmaps database.
Inputs
-------
- ``resources/regions_onshore_elec_s{simpl}_{clusters}.geojson``
- ``resources/pop_layout_elec_s{simpl}_{clusters}.csv``
- ``resources/regions_onshore_base_s_{clusters}.geojson``
- ``resources/pop_layout_base_s_{clusters}.csv``
Outputs
-------
- ``resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv``
- ``resources/industrial_distribution_key_base_s_{clusters}.csv``
Description
-------
@ -388,7 +388,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_industrial_distribution_key",
simpl="",
clusters=128,
)
configure_logging(snakemake)

View File

@ -8,14 +8,14 @@ Build industrial energy demand per model region.
Inputs
------
- ``resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv``
- ``resources/industrial_energy_demand_today_base_s_{clusters}.csv``
- ``resources/industry_sector_ratios_{planning_horizons}.csv``
- ``resources/industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv``
- ``resources/industrial_production_base_s_{clusters}_{planning_horizons}.csv``
Outputs
-------
- ``resources/industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv``
- ``resources/industrial_energy_demand_base_s_{clusters}_{planning_horizons}.csv``
Description
-------
@ -45,7 +45,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_industrial_energy_demand_per_node",
simpl="",
clusters=48,
planning_horizons=2030,
)

View File

@ -8,19 +8,19 @@ Build industrial energy demand per model region.
Inputs
-------
- ``resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv``
- ``resources/industrial_distribution_key_base_s_{clusters}.csv``
- ``resources/industrial_energy_demand_per_country_today.csv``
Outputs
-------
- ``resources/industrial_energy_demand_per_node_today_elec_s{simpl}_{clusters}.csv``
- ``resources/industrial_energy_demand_per_node_today_base_s_{clusters}.csv``
Description
-------
This rule maps the industrial energy demand per country `industrial_energy_demand_per_country_today.csv` to each bus region.
The energy demand per country is multiplied by the mapping value from the file ``industrial_distribution_key_elec_s{simpl}_{clusters}.csv`` between 0 and 1 to get the industrial energy demand per bus.
The energy demand per country is multiplied by the mapping value from the file ``industrial_distribution_key_base_s_{clusters}.csv`` between 0 and 1 to get the industrial energy demand per bus.
The unit of the energy demand is TWh/a.
"""
@ -92,7 +92,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_industrial_energy_demand_per_node_today",
simpl="",
clusters=48,
)
set_scenario_config(snakemake)

View File

@ -8,13 +8,13 @@ Build industrial production per model region.
Inputs
-------
- ``resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv``
- ``resources/industrial_distribution_key_base_s_{clusters}.csv``
- ``resources/industrial_production_per_country_tomorrow_{planning_horizons}.csv``
Outputs
-------
- ``resources/industrial_production_per_node_elec_s{simpl}_{clusters}_{planning_horizons}.csv``
- ``resources/industrial_production_per_node_base_s_{clusters}_{planning_horizons}.csv``
Description
-------
@ -87,11 +87,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_industrial_production_per_node",
simpl="",
clusters=48,
)
snakemake = mock_snakemake("build_industrial_production_per_node", clusters=48)
set_scenario_config(snakemake)
build_nodal_industrial_production()

View File

@ -5,7 +5,7 @@
# coding: utf-8
"""
Adds dynamic line rating timeseries to the base network.
Calculates dynamic line rating time series from base network.
Relevant Settings
-----------------
@ -14,11 +14,12 @@ Relevant Settings
lines:
cutout:
line_rating:
dynamic_line_rating:
.. seealso::
Documentation of the configuration file ``config.yaml`
Inputs
------
@ -28,7 +29,7 @@ Inputs
Outputs
-------
- ``resources/line_rating.nc``
- ``resources/dlr.nc``
Description
@ -50,6 +51,7 @@ With a heat balance considering the maximum temperature threshold of the transmi
the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated.
"""
import logging
import re
import atlite
@ -58,11 +60,14 @@ import numpy as np
import pypsa
import xarray as xr
from _helpers import configure_logging, get_snapshots, set_scenario_config
from dask.distributed import Client
from shapely.geometry import LineString as Line
from shapely.geometry import Point
logger = logging.getLogger(__name__)
def calculate_resistance(T, R_ref, T_ref=293, alpha=0.00403):
def calculate_resistance(T, R_ref, T_ref: float | int = 293, alpha: float = 0.00403):
"""
Calculates the resistance at other temperatures than the reference
temperature.
@ -84,7 +89,12 @@ def calculate_resistance(T, R_ref, T_ref=293, alpha=0.00403):
return R_ref * (1 + alpha * (T - T_ref))
def calculate_line_rating(n, cutout):
def calculate_line_rating(
n: pypsa.Network,
cutout: atlite.Cutout,
show_progress: bool = True,
dask_kwargs: dict = None,
) -> xr.DataArray:
"""
Calculates the maximal allowed power flow in each line for each time step
considering the maximal temperature.
@ -97,6 +107,10 @@ def calculate_line_rating(n, cutout):
-------
xarray DataArray object with maximal power.
"""
if dask_kwargs is None:
dask_kwargs = {}
logger.info("Calculating dynamic line rating.")
relevant_lines = n.lines[~n.lines["underground"]].copy()
buses = relevant_lines[["bus0", "bus1"]].values
x = n.buses.x
@ -120,7 +134,16 @@ def calculate_line_rating(n, cutout):
relevant_lines["n_bundle"] = relevant_lines["n_bundle"].fillna(1)
R *= relevant_lines["n_bundle"]
R = calculate_resistance(T=353, R_ref=R)
Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8)
Imax = cutout.line_rating(
shapes,
R,
D=0.0218,
Ts=353,
epsilon=0.8,
alpha=0.8,
show_progress=show_progress,
dask_kwargs=dask_kwargs,
)
line_factor = relevant_lines.eval("v_nom * n_bundle * num_parallel") / 1e3 # in mW
return xr.DataArray(
data=np.sqrt(3) * Imax * line_factor.values.reshape(-1, 1),
@ -134,21 +157,23 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_line_rating",
network="elec",
simpl="",
clusters="5",
ll="v1.0",
opts="Co2L-4H",
)
snakemake = mock_snakemake("build_line_rating")
configure_logging(snakemake)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)
show_progress = not snakemake.config["run"].get("disable_progressbar", True)
show_progress = show_progress and snakemake.config["atlite"]["show_progress"]
if nprocesses > 1:
client = Client(n_workers=nprocesses, threads_per_worker=1)
else:
client = None
dask_kwargs = {"scheduler": client}
n = pypsa.Network(snakemake.input.base_network)
time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day)
cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time)
da = calculate_line_rating(n, cutout)
da = calculate_line_rating(n, cutout, show_progress, dask_kwargs)
da.to_netcdf(snakemake.output[0])

View File

@ -16,7 +16,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_population_weighted_energy_totals",
kind="heat",
simpl="",
clusters=60,
)
set_scenario_config(snakemake)

View File

@ -35,7 +35,7 @@ Inputs
Outputs
-------
- ``resource/powerplants.csv``: A list of conventional power plants (i.e. neither wind nor solar) with fields for name, fuel type, technology, country, capacity in MW, duration, commissioning year, retrofit year, latitude, longitude, and dam information as documented in the `powerplantmatching README <https://github.com/PyPSA/powerplantmatching/blob/master/README.md>`_; additionally it includes information on the closest substation/bus in ``networks/base.nc``.
- ``resource/powerplants_s_{clusters}.csv``: A list of conventional power plants (i.e. neither wind nor solar) with fields for name, fuel type, technology, country, capacity in MW, duration, commissioning year, retrofit year, latitude, longitude, and dam information as documented in the `powerplantmatching README <https://github.com/PyPSA/powerplantmatching/blob/master/README.md>`_; additionally it includes information on the closest substation/bus in ``networks/base_s_{clusters}.nc``.
.. image:: img/powerplantmatching.png
:scale: 30 %
@ -171,7 +171,7 @@ if __name__ == "__main__":
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.base_network)
n = pypsa.Network(snakemake.input.network)
countries = snakemake.params.countries
ppl = (

View File

@ -5,12 +5,11 @@
#
# SPDX-License-Identifier: MIT
"""
Calculates for each network node the (i) installable capacity (based on land-
use), (ii) the available generation time series (based on weather data), and
(iii) the average distance from the node for onshore wind, AC-connected
offshore wind, DC-connected offshore wind and solar PV generators. In addition
for offshore wind it calculates the fraction of the grid connection which is
under water.
Calculates for each clustered region the (i) installable capacity (based on
land-use from :mod:`determine_availability_matrix`), (ii) the available
generation time series (based on weather data), and (iii) the average distance
from the node for onshore wind, AC-connected offshore wind, DC-connected
offshore wind and solar PV generators.
.. note:: Hydroelectric profiles are built in script :mod:`build_hydro_profiles`.
@ -26,9 +25,8 @@ Relevant settings
renewable:
{technology}:
cutout: corine: luisa: grid_codes: distance: natura: max_depth: min_depth:
max_shore_distance: min_shore_distance: capacity_per_sqkm:
correction_factor: min_p_max_pu: clip_p_max_pu: resource:
cutout: capacity_per_sqkm: correction_factor: min_p_max_pu:
clip_p_max_pu: resource:
.. seealso::
Documentation of the configuration file ``config/config.yaml`` at
@ -37,40 +35,14 @@ Relevant settings
Inputs
------
- ``data/bundle/corine/g250_clc06_V18_5.tif``: `CORINE Land Cover (CLC)
<https://land.copernicus.eu/pan-european/corine-land-cover>`_ inventory on `44
classes <https://wiki.openstreetmap.org/wiki/Corine_Land_Cover#Tagging>`_ of
land use (e.g. forests, arable land, industrial, urban areas) at 100m
resolution.
.. image:: img/corine.png
:scale: 33 %
- ``data/LUISA_basemap_020321_50m.tif``: `LUISA Base Map
<https://publications.jrc.ec.europa.eu/repository/handle/JRC124621>`_ land
coverage dataset at 50m resolution similar to CORINE. For codes in relation to
CORINE land cover, see `Annex 1 of the technical documentation
<https://publications.jrc.ec.europa.eu/repository/bitstream/JRC124621/technical_report_luisa_basemap_2018_v7_final.pdf>`_.
- ``data/bundle/gebco/GEBCO_2014_2D.nc``: A `bathymetric
<https://en.wikipedia.org/wiki/Bathymetry>`_ data set with a global terrain
model for ocean and land at 15 arc-second intervals by the `General
Bathymetric Chart of the Oceans (GEBCO)
<https://www.gebco.net/data_and_products/gridded_bathymetry_data/>`_.
.. image:: img/gebco_2019_grid_image.jpg
:scale: 50 %
**Source:** `GEBCO
<https://www.gebco.net/data_and_products/images/gebco_2019_grid_image.jpg>`_
- ``resources/natura.tiff``: confer :ref:`natura`
- ``resources/availability_matrix_{clusters}_{technology}.nc``: see :mod:`determine_availability_matrix`
- ``resources/offshore_shapes.geojson``: confer :ref:`shapes`
- ``resources/regions_onshore.geojson``: (if not offshore wind), confer
- ``resources/regions_onshore_base_s_{clusters}.geojson``: (if not offshore
wind), confer :ref:`busregions`
- ``resources/regions_offshore_base_s_{clusters}.geojson``: (if offshore wind),
:ref:`busregions`
- ``resources/regions_offshore.geojson``: (if offshore wind), :ref:`busregions`
- ``"cutouts/" + params["renewable"][{technology}]['cutout']``: :ref:`cutout`
- ``networks/base.nc``: :ref:`base`
- ``networks/_base_s_{clusters}.nc``: :ref:`base`
Outputs
-------
@ -80,21 +52,13 @@ Outputs
=================== ========== =========================================================
Field Dimensions Description
=================== ========== =========================================================
profile bus, time the per unit hourly availability factors for each node
profile bus, time the per unit hourly availability factors for each bus
------------------- ---------- ---------------------------------------------------------
weight bus sum of the layout weighting for each node
p_nom_max bus maximal installable capacity at the bus (in MW)
------------------- ---------- ---------------------------------------------------------
p_nom_max bus maximal installable capacity at the node (in MW)
------------------- ---------- ---------------------------------------------------------
potential y, x layout of generator units at cutout grid cells inside the
Voronoi cell (maximal installable capacity at each grid
cell multiplied by capacity factor)
------------------- ---------- ---------------------------------------------------------
average_distance bus average distance of units in the Voronoi cell to the
grid node (in km)
------------------- ---------- ---------------------------------------------------------
underwater_fraction bus fraction of the average connection distance which is
under water (only for offshore)
average_distance bus average distance of units in the region to the
grid bus for onshore technologies and to the shoreline
for offshore technologies (in km)
=================== ========== =========================================================
- **profile**
@ -109,50 +73,28 @@ Outputs
:scale: 33 %
:align: center
- **potential**
.. image:: img/potential_heatmap.png
:scale: 33 %
:align: center
- **average_distance**
.. image:: img/distance_hist.png
:scale: 33 %
:align: center
- **underwater_fraction**
.. image:: img/underwater_hist.png
:scale: 33 %
:align: center
Description
-----------
This script functions at two main spatial resolutions: the resolution of the
network nodes and their `Voronoi cells
<https://en.wikipedia.org/wiki/Voronoi_diagram>`_, and the resolution of the
cutout grid cells for the weather data. Typically the weather data grid is finer
than the network nodes, so we have to work out the distribution of generators
across the grid cells within each Voronoi cell. This is done by taking account
of a combination of the available land at each grid cell and the capacity factor
there.
clustered network regions, and the resolution of the cutout grid cells for the
weather data. Typically the weather data grid is finer than the network regions,
so we have to work out the distribution of generators across the grid cells
within each region. This is done by taking account of a combination of the
available land at each grid cell (computed in
:mod:`determine_availability_matrix`) and the capacity factor there.
First the script computes how much of the technology can be installed at each
cutout grid cell and each node using the `atlite
<https://github.com/pypsa/atlite>`_ library. This uses the CORINE land use data,
LUISA land use data, Natura2000 nature reserves, GEBCO bathymetry data, and
shipping lanes.
.. image:: img/eligibility.png
:scale: 50 %
:align: center
To compute the layout of generators in each node's Voronoi cell, the installable
potential in each grid cell is multiplied with the capacity factor at each grid
cell. This is done since we assume more generators are installed at cells with a
higher capacity factor.
Based on the availability matrix, the script first computes how much of the
technology can be installed at each cutout grid cell. To compute the layout of
generators in each clustered region, the installable potential in each grid cell
is multiplied with the capacity factor at each grid cell. This is done since we
assume more generators are installed at cells with a higher capacity factor.
.. image:: img/offwinddc-gridcell.png
:scale: 50 %
@ -174,23 +116,17 @@ This layout is then used to compute the generation availability time series from
the weather data cutout from ``atlite``.
The maximal installable potential for the node (`p_nom_max`) is computed by
adding up the installable potentials of the individual grid cells. If the model
comes close to this limit, then the time series may slightly overestimate
production since it is assumed the geographical distribution is proportional to
capacity factor.
adding up the installable potentials of the individual grid cells.
"""
import functools
import logging
import time
import atlite
import geopandas as gpd
import numpy as np
import xarray as xr
from _helpers import configure_logging, get_snapshots, set_scenario_config
from build_shapes import _simplify_polys
from dask.distributed import Client
from pypsa.geo import haversine
from shapely.geometry import LineString
logger = logging.getLogger(__name__)
@ -199,15 +135,19 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_renewable_profiles", technology="offwind-dc")
snakemake = mock_snakemake(
"build_renewable_profiles", clusters=38, technology="offwind-ac"
)
configure_logging(snakemake)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)
noprogress = snakemake.config["run"].get("disable_progressbar", True)
noprogress = noprogress or not snakemake.config["atlite"]["show_progress"]
params = snakemake.params.renewable[snakemake.wildcards.technology]
technology = snakemake.wildcards.technology
params = snakemake.params.renewable[technology]
resource = params["resource"] # pv panel params / wind turbine params
resource["show_progress"] = not noprogress
tech = next(t for t in ["panel", "turbine"] if t in resource)
models = resource[tech]
@ -229,6 +169,9 @@ if __name__ == "__main__":
sns = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day)
cutout = atlite.Cutout(snakemake.input.cutout).sel(time=sns)
availability = xr.open_dataarray(snakemake.input.availability_matrix)
regions = gpd.read_file(snakemake.input.regions)
assert not regions.empty, (
f"List of regions in {snakemake.input.regions} is empty, please "
@ -236,186 +179,96 @@ if __name__ == "__main__":
)
# do not pull up, set_index does not work if geo dataframe is empty
regions = regions.set_index("name").rename_axis("bus")
if snakemake.wildcards.technology.startswith("offwind"):
# for offshore regions, the shortest distance to the shoreline is used
offshore_regions = availability.coords["bus"].values
regions = regions.loc[offshore_regions]
regions = regions.map(lambda g: _simplify_polys(g, minarea=1)).set_crs(
regions.crs
)
else:
# for onshore regions, the representative point of the region is used
regions = regions.representative_point()
regions = regions.geometry.to_crs(3035)
buses = regions.index
res = params.get("excluder_resolution", 100)
excluder = atlite.ExclusionContainer(crs=3035, res=res)
if params["natura"]:
excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True)
for dataset in ["corine", "luisa"]:
kwargs = {"nodata": 0} if dataset == "luisa" else {}
settings = params.get(dataset, {})
if not settings:
continue
if dataset == "luisa" and res > 50:
logger.info(
"LUISA data is available at 50m resolution, "
f"but coarser {res}m resolution is used."
)
if isinstance(settings, list):
settings = {"grid_codes": settings}
if "grid_codes" in settings:
codes = settings["grid_codes"]
excluder.add_raster(
snakemake.input[dataset], codes=codes, invert=True, crs=3035, **kwargs
)
if settings.get("distance", 0.0) > 0.0:
codes = settings["distance_grid_codes"]
buffer = settings["distance"]
excluder.add_raster(
snakemake.input[dataset], codes=codes, buffer=buffer, crs=3035, **kwargs
)
if params.get("ship_threshold"):
shipping_threshold = (
params["ship_threshold"] * 8760 * 6
) # approximation because 6 years of data which is hourly collected
func = functools.partial(np.less, shipping_threshold)
excluder.add_raster(
snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True
)
if params.get("max_depth"):
# lambda not supported for atlite + multiprocessing
# use named function np.greater with partially frozen argument instead
# and exclude areas where: -max_depth > grid cell depth
func = functools.partial(np.greater, -params["max_depth"])
excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000)
if params.get("min_depth"):
func = functools.partial(np.greater, -params["min_depth"])
excluder.add_raster(
snakemake.input.gebco, codes=func, crs=4326, nodata=-1000, invert=True
)
if "min_shore_distance" in params:
buffer = params["min_shore_distance"]
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer)
if "max_shore_distance" in params:
buffer = params["max_shore_distance"]
excluder.add_geometry(
snakemake.input.country_shapes, buffer=buffer, invert=True
)
logger.info("Calculate landuse availability...")
start = time.time()
kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress)
availability = cutout.availabilitymatrix(regions, excluder, **kwargs)
duration = time.time() - start
logger.info(f"Completed landuse availability calculation ({duration:2.2f}s)")
# For Moldova and Ukraine: Overwrite parts not covered by Corine with
# externally determined available areas
if "availability_matrix_MD_UA" in snakemake.input.keys():
availability_MDUA = xr.open_dataarray(
snakemake.input["availability_matrix_MD_UA"]
)
availability.loc[availability_MDUA.coords] = availability_MDUA
area = cutout.grid.to_crs(3035).area / 1e6
area = xr.DataArray(
area.values.reshape(cutout.shape), [cutout.coords["y"], cutout.coords["x"]]
)
potential = capacity_per_sqkm * availability.sum("bus") * area
func = getattr(cutout, resource.pop("method"))
if client is not None:
resource["dask_kwargs"] = {"scheduler": client}
logger.info("Calculate average capacity factor...")
logger.info(f"Calculate average capacity factor for technology {technology}...")
start = time.time()
capacity_factor = correction_factor * func(capacity_factor=True, **resource)
layout = capacity_factor * area * capacity_per_sqkm
duration = time.time() - start
logger.info(f"Completed average capacity factor calculation ({duration:2.2f}s)")
logger.info(
f"Completed average capacity factor calculation for technology {technology} ({duration:2.2f}s)"
)
profiles = []
capacities = []
for year, model in models.items():
logger.info(
f"Calculate weighted capacity factor time series for model {model}..."
f"Calculate weighted capacity factor time series for model {model} for technology {technology}..."
)
start = time.time()
resource[tech] = model
profile, capacity = func(
profile = func(
matrix=availability.stack(spatial=["y", "x"]),
layout=layout,
index=buses,
per_unit=True,
return_capacity=True,
return_capacity=False,
**resource,
)
dim = {"year": [year]}
profile = profile.expand_dims(dim)
capacity = capacity.expand_dims(dim)
profiles.append(profile.rename("profile"))
capacities.append(capacity.rename("weight"))
duration = time.time() - start
logger.info(
f"Completed weighted capacity factor time series calculation for model {model} ({duration:2.2f}s)"
f"Completed weighted capacity factor time series calculation for model {model} for technology {technology} ({duration:2.2f}s)"
)
profiles = xr.merge(profiles)
capacities = xr.merge(capacities)
logger.info("Calculating maximal capacity per bus")
logger.info(f"Calculating maximal capacity per bus for technology {technology}")
p_nom_max = capacity_per_sqkm * availability @ area
logger.info("Calculate average distances.")
logger.info(f"Calculate average distances for technology {technology}.")
layoutmatrix = (layout * availability).stack(spatial=["y", "x"])
coords = cutout.grid[["x", "y"]]
bus_coords = regions[["x", "y"]]
coords = cutout.grid.representative_point().to_crs(3035)
average_distance = []
centre_of_mass = []
for bus in buses:
row = layoutmatrix.sel(bus=bus).data
nz_b = row != 0
row = row[nz_b]
co = coords[nz_b]
distances = haversine(bus_coords.loc[bus], co)
distances = co.distance(regions[bus]).div(1e3) # km
average_distance.append((distances * (row / row.sum())).sum())
centre_of_mass.append(co.values.T @ (row / row.sum()))
average_distance = xr.DataArray(average_distance, [buses])
centre_of_mass = xr.DataArray(centre_of_mass, [buses, ("spatial", ["x", "y"])])
ds = xr.merge(
[
correction_factor * profiles,
capacities,
p_nom_max.rename("p_nom_max"),
potential.rename("potential"),
average_distance.rename("average_distance"),
]
)
if snakemake.wildcards.technology.startswith("offwind"):
logger.info("Calculate underwater fraction of connections.")
offshore_shape = gpd.read_file(snakemake.input["offshore_shapes"]).union_all()
underwater_fraction = []
for bus in buses:
p = centre_of_mass.sel(bus=bus).data
line = LineString([p, regions.loc[bus, ["x", "y"]]])
frac = line.intersection(offshore_shape).length / line.length
underwater_fraction.append(frac)
ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses])
# select only buses with some capacity and minimal capacity factor
mean_profile = ds["profile"].mean("time")
if "year" in ds.indexes:

View File

@ -1050,7 +1050,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_retro_cost",
simpl="",
clusters=48,
ll="v1.0",
sector_opts="Co2L0-168H-T-H-B-I-solar3-dist1",

View File

@ -74,9 +74,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_salt_cavern_potentials", simpl="", clusters="37"
)
snakemake = mock_snakemake("build_salt_cavern_potentials", clusters="37")
set_scenario_config(snakemake)

View File

@ -38,9 +38,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_sequestration_potentials", simpl="", clusters="128"
)
snakemake = mock_snakemake("build_sequestration_potentials", clusters="128")
set_scenario_config(snakemake)

View File

@ -17,11 +17,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_shipping_demand",
simpl="",
clusters=48,
)
snakemake = mock_snakemake("build_shipping_demand", clusters=48)
set_scenario_config(snakemake)
scope = gpd.read_file(snakemake.input.scope).geometry[0]

View File

@ -26,13 +26,13 @@ Inputs
------
- ``resources/<run_name/pop_layout_<scope>.nc``:
- ``resources/<run_name/regions_onshore_elec_s<simpl>_<clusters>.geojson``:
- ``resources/<run_name/regions_onshore_base_s<simpl>_<clusters>.geojson``:
- ``cutout``: Weather data cutout, as specified in config
Outputs
-------
- ``resources/solar_thermal_<scope>_elec_s<simpl>_<clusters>.nc``:
- ``resources/solar_thermal_<scope>_base_s<simpl>_<clusters>.nc``:
"""
import atlite
@ -46,11 +46,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_solar_thermal_profiles",
simpl="",
clusters=48,
)
snakemake = mock_snakemake("build_solar_thermal_profiles", clusters=48)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)

View File

@ -26,14 +26,14 @@ Inputs
------
- ``resources/<run_name>/pop_layout_total.nc``:
- ``resources/<run_name>/regions_onshore_elec_s<simpl>_<clusters>.geojson``:
- ``resources/<run_name>/regions_onshore_base_s<simpl>_<clusters>.geojson``:
- ``cutout``: Weather data cutout, as specified in config
Outputs
-------
- ``resources/temp_soil_total_elec_s<simpl>_<clusters>.nc``:
- ``resources/temp_air_total_elec_s<simpl>_<clusters>.nc`
- ``resources/temp_soil_total_base_s<simpl>_<clusters>.nc``:
- ``resources/temp_air_total_base_s<simpl>_<clusters>.nc`
"""
import atlite
@ -49,7 +49,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_temperature_profiles",
simpl="",
clusters=48,
)
set_scenario_config(snakemake)

View File

@ -167,11 +167,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_transport_demand",
simpl="",
clusters=128,
)
snakemake = mock_snakemake("build_transport_demand", clusters=128)
configure_logging(snakemake)
set_scenario_config(snakemake)

View File

@ -108,7 +108,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37")
snakemake = mock_snakemake("cluster_gas_network", clusters="37")
configure_logging(snakemake)
set_scenario_config(snakemake)

View File

@ -6,7 +6,7 @@
# coding: utf-8
"""
Creates networks clustered to ``{cluster}`` number of zones with aggregated
buses, generators and transmission corridors.
buses and transmission corridors.
Relevant Settings
-----------------
@ -32,30 +32,30 @@ Relevant Settings
Inputs
------
- ``resources/regions_onshore_elec_s{simpl}.geojson``: confer :ref:`simplify`
- ``resources/regions_offshore_elec_s{simpl}.geojson``: confer :ref:`simplify`
- ``resources/busmap_elec_s{simpl}.csv``: confer :ref:`simplify`
- ``networks/elec_s{simpl}.nc``: confer :ref:`simplify`
- ``data/custom_busmap_elec_s{simpl}_{clusters}_{base_network}.csv``: optional input
- ``resources/regions_onshore_base.geojson``: confer :ref:`simplify`
- ``resources/regions_offshore_base.geojson``: confer :ref:`simplify`
- ``resources/busmap_base_s.csv``: confer :ref:`simplify`
- ``networks/base.nc``: confer :ref:`simplify`
- ``data/custom_busmap_base_s_{clusters}_{base_network}.csv``: optional input
Outputs
-------
- ``resources/regions_onshore_elec_s{simpl}_{clusters}.geojson``:
- ``resources/regions_onshore_base_s_{clusters}.geojson``:
.. image:: img/regions_onshore_elec_s_X.png
.. image:: img/regions_onshore_base_s_X.png
:scale: 33 %
- ``resources/regions_offshore_elec_s{simpl}_{clusters}.geojson``:
- ``resources/regions_offshore_base_s_{clusters}.geojson``:
.. image:: img/regions_offshore_elec_s_X.png
.. image:: img/regions_offshore_base_s_X.png
:scale: 33 %
- ``resources/busmap_elec_s{simpl}_{clusters}.csv``: Mapping of buses from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``;
- ``resources/linemap_elec_s{simpl}_{clusters}.csv``: Mapping of lines from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``;
- ``networks/elec_s{simpl}_{clusters}.nc``:
- ``resources/busmap_base_s_{clusters}.csv``: Mapping of buses from ``networks/base.nc`` to ``networks/base_s_{clusters}.nc``;
- ``resources/linemap_base_s_{clusters}.csv``: Mapping of lines from ``networks/base.nc`` to ``networks/base_s_{clusters}.nc``;
- ``networks/base_s_{clusters}.nc``:
.. image:: img/elec_s_X.png
.. image:: img/base_s_X.png
:scale: 40 %
Description
@ -63,60 +63,33 @@ Description
.. note::
**Why is clustering used both in** ``simplify_network`` **and** ``cluster_network`` **?**
Consider for example a network ``networks/elec_s100_50.nc`` in which
``simplify_network`` clusters the network to 100 buses and in a second
step ``cluster_network``` reduces it down to 50 buses.
In preliminary tests, it turns out, that the principal effect of
changing spatial resolution is actually only partially due to the
transmission network. It is more important to differentiate between
wind generators with higher capacity factors from those with lower
capacity factors, i.e. to have a higher spatial resolution in the
renewable generation than in the number of buses.
The two-step clustering allows to study this effect by looking at
networks like ``networks/elec_s100_50m.nc``. Note the additional
``m`` in the ``{cluster}`` wildcard. So in the example network
there are still up to 100 different wind generators.
In combination these two features allow you to study the spatial
resolution of the transmission network separately from the
spatial resolution of renewable generators.
**Is it possible to run the model without the** ``simplify_network`` **rule?**
No, the network clustering methods in the PyPSA module
`pypsa.clustering.spatial <https://github.com/PyPSA/PyPSA/blob/master/pypsa/clustering/spatial.py>`_
do not work reliably with multiple voltage levels and transformers.
.. tip::
The rule :mod:`cluster_networks` runs
for all ``scenario`` s in the configuration file
the rule :mod:`cluster_network`.
Exemplary unsolved network clustered to 512 nodes:
.. image:: img/elec_s_512.png
.. image:: img/base_s_512.png
:scale: 40 %
:align: center
Exemplary unsolved network clustered to 256 nodes:
.. image:: img/elec_s_256.png
.. image:: img/base_s_256.png
:scale: 40 %
:align: center
Exemplary unsolved network clustered to 128 nodes:
.. image:: img/elec_s_128.png
.. image:: img/base_s_128.png
:scale: 40 %
:align: center
Exemplary unsolved network clustered to 37 nodes:
.. image:: img/elec_s_37.png
.. image:: img/base_s_37.png
:scale: 40 %
:align: center
"""
@ -127,13 +100,11 @@ from functools import reduce
import geopandas as gpd
import linopy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging, set_scenario_config, update_p_nom_max
from add_electricity import load_costs
import xarray as xr
from _helpers import configure_logging, set_scenario_config
from base_network import append_bus_shapes
from packaging.version import Version, parse
from pypsa.clustering.spatial import (
@ -142,6 +113,7 @@ from pypsa.clustering.spatial import (
busmap_by_kmeans,
get_clustering_from_busmap,
)
from scipy.sparse.csgraph import connected_components
PD_GE_2_2 = parse(pd.__version__) >= Version("2.2")
@ -154,79 +126,61 @@ def normed(x):
return (x / x.sum()).fillna(0.0)
def weighting_for_country(n, x):
conv_carriers = {"OCGT", "CCGT", "PHS", "hydro"}
gen = n.generators.loc[n.generators.carrier.isin(conv_carriers)].groupby(
"bus"
).p_nom.sum().reindex(n.buses.index, fill_value=0.0) + n.storage_units.loc[
n.storage_units.carrier.isin(conv_carriers)
].groupby(
"bus"
).p_nom.sum().reindex(
n.buses.index, fill_value=0.0
def weighting_for_country(df: pd.DataFrame, weights: pd.Series) -> pd.Series:
w = normed(weights.reindex(df.index, fill_value=0))
return (w * (100 / w.max())).clip(lower=1).astype(int)
def get_feature_data_for_hac(fn: str) -> pd.DataFrame:
ds = xr.open_dataset(fn)
feature_data = (
pd.concat([ds[var].to_pandas() for var in ds.data_vars], axis=0).fillna(0.0).T
)
load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum()
b_i = x.index
g = normed(gen.reindex(b_i, fill_value=0))
l = normed(load.reindex(b_i, fill_value=0))
w = g + l
return (w * (100.0 / w.max())).clip(lower=1.0).astype(int)
def get_feature_for_hac(n, buses_i=None, feature=None):
if buses_i is None:
buses_i = n.buses.index
if feature is None:
feature = "solar+onwind-time"
carriers = feature.split("-")[0].split("+")
if "offwind" in carriers:
carriers.remove("offwind")
carriers = np.append(
carriers, n.generators.carrier.filter(like="offwind").unique()
)
if feature.split("-")[1] == "cap":
feature_data = pd.DataFrame(index=buses_i, columns=carriers)
for carrier in carriers:
gen_i = n.generators.query("carrier == @carrier").index
attach = (
n.generators_t.p_max_pu[gen_i]
.mean()
.rename(index=n.generators.loc[gen_i].bus)
)
feature_data[carrier] = attach
if feature.split("-")[1] == "time":
feature_data = pd.DataFrame(columns=buses_i)
for carrier in carriers:
gen_i = n.generators.query("carrier == @carrier").index
attach = n.generators_t.p_max_pu[gen_i].rename(
columns=n.generators.loc[gen_i].bus
)
feature_data = pd.concat([feature_data, attach], axis=0)[buses_i]
feature_data = feature_data.T
# timestamp raises error in sklearn >= v1.2:
feature_data.columns = feature_data.columns.astype(str)
feature_data = feature_data.fillna(0)
feature_data.columns = feature_data.columns.astype(str)
return feature_data
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="scip"):
def fix_country_assignment_for_hac(n: pypsa.Network) -> None:
# overwrite country of nodes that are disconnected from their country-topology
for country in n.buses.country.unique():
m = n[n.buses.country == country].copy()
_, labels = connected_components(m.adjacency_matrix(), directed=False)
component = pd.Series(labels, index=m.buses.index)
component_sizes = component.value_counts()
if len(component_sizes) > 1:
disconnected_bus = component[component == component_sizes.index[-1]].index[
0
]
neighbor_bus = n.lines.query(
"bus0 == @disconnected_bus or bus1 == @disconnected_bus"
).iloc[0][["bus0", "bus1"]]
new_country = list(set(n.buses.loc[neighbor_bus].country) - {country})[0]
logger.info(
f"overwriting country `{country}` of bus `{disconnected_bus}` "
f"to new country `{new_country}`, because it is disconnected "
"from its initial inter-country transmission grid."
)
n.buses.at[disconnected_bus, "country"] = new_country
def distribute_n_clusters_to_countries(
n: pypsa.Network,
n_clusters: int,
cluster_weights: pd.Series,
focus_weights: dict | None = None,
solver_name: str = "scip",
) -> pd.Series:
"""
Determine the number of clusters per country.
"""
L = (
n.loads_t.p_set.mean()
.groupby(n.loads.bus)
.sum()
.groupby([n.buses.country, n.buses.sub_network])
cluster_weights.groupby([n.buses.country, n.buses.sub_network])
.sum()
.pipe(normed)
)
@ -277,92 +231,50 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="scip"):
def busmap_for_n_clusters(
n,
n_clusters,
solver_name,
focus_weights=None,
algorithm="kmeans",
feature=None,
n: pypsa.Network,
n_clusters_c: pd.Series,
cluster_weights: pd.Series,
algorithm: str = "kmeans",
features: pd.DataFrame | None = None,
**algorithm_kwds,
):
) -> pd.Series:
if algorithm == "hac" and features is None:
raise ValueError("For HAC clustering, features must be provided.")
if algorithm == "kmeans":
algorithm_kwds.setdefault("n_init", 1000)
algorithm_kwds.setdefault("max_iter", 30000)
algorithm_kwds.setdefault("tol", 1e-6)
algorithm_kwds.setdefault("random_state", 0)
def fix_country_assignment_for_hac(n):
from scipy.sparse import csgraph
# overwrite country of nodes that are disconnected from their country-topology
for country in n.buses.country.unique():
m = n[n.buses.country == country].copy()
_, labels = csgraph.connected_components(
m.adjacency_matrix(), directed=False
)
component = pd.Series(labels, index=m.buses.index)
component_sizes = component.value_counts()
if len(component_sizes) > 1:
disconnected_bus = component[
component == component_sizes.index[-1]
].index[0]
neighbor_bus = n.lines.query(
"bus0 == @disconnected_bus or bus1 == @disconnected_bus"
).iloc[0][["bus0", "bus1"]]
new_country = list(set(n.buses.loc[neighbor_bus].country) - {country})[
0
]
logger.info(
f"overwriting country `{country}` of bus `{disconnected_bus}` "
f"to new country `{new_country}`, because it is disconnected "
"from its initial inter-country transmission grid."
)
n.buses.at[disconnected_bus, "country"] = new_country
return n
if algorithm == "hac":
feature = get_feature_for_hac(n, buses_i=n.buses.index, feature=feature)
n = fix_country_assignment_for_hac(n)
if (algorithm != "hac") and (feature is not None):
logger.warning(
f"Keyword argument feature is only valid for algorithm `hac`. "
f"Given feature `{feature}` will be ignored."
)
n.determine_network_topology()
n_clusters = distribute_clusters(
n, n_clusters, focus_weights=focus_weights, solver_name=solver_name
)
def busmap_for_country(x):
prefix = x.name[0] + x.name[1] + " "
logger.debug(f"Determining busmap for country {prefix[:-1]}")
logger.debug(
f"Determining busmap for country {prefix[:-1]} "
f"from {len(x)} buses to {n_clusters_c[x.name]}."
)
if len(x) == 1:
return pd.Series(prefix + "0", index=x.index)
weight = weighting_for_country(n, x)
weight = weighting_for_country(x, cluster_weights)
if algorithm == "kmeans":
return prefix + busmap_by_kmeans(
n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds
n, weight, n_clusters_c[x.name], buses_i=x.index, **algorithm_kwds
)
elif algorithm == "hac":
return prefix + busmap_by_hac(
n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index]
n,
n_clusters_c[x.name],
buses_i=x.index,
feature=features.reindex(x.index, fill_value=0.0),
)
elif algorithm == "modularity":
return prefix + busmap_by_greedy_modularity(
n, n_clusters[x.name], buses_i=x.index
n, n_clusters_c[x.name], buses_i=x.index
)
else:
raise ValueError(
f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}."
f"`algorithm` must be one of 'kmeans' or 'hac' or 'modularity'. Is {algorithm}."
)
compat_kws = dict(include_groups=False) if PD_GE_2_2 else {}
@ -376,93 +288,61 @@ def busmap_for_n_clusters(
def clustering_for_n_clusters(
n,
n_clusters,
custom_busmap=False,
aggregate_carriers=None,
line_length_factor=1.25,
aggregation_strategies=dict(),
solver_name="scip",
algorithm="hac",
feature=None,
extended_link_costs=0,
focus_weights=None,
):
if not isinstance(custom_busmap, pd.Series):
busmap = busmap_for_n_clusters(
n, n_clusters, solver_name, focus_weights, algorithm, feature
)
else:
busmap = custom_busmap
n: pypsa.Network,
busmap: pd.Series,
line_length_factor: float = 1.25,
aggregation_strategies: dict | None = None,
) -> pypsa.clustering.spatial.Clustering:
if aggregation_strategies is None:
aggregation_strategies = dict()
line_strategies = aggregation_strategies.get("lines", dict())
generator_strategies = aggregation_strategies.get("generators", dict())
one_port_strategies = aggregation_strategies.get("one_ports", dict())
bus_strategies = aggregation_strategies.get("buses", dict())
bus_strategies.setdefault("substation_lv", lambda x: bool(x.sum()))
bus_strategies.setdefault("substation_off", lambda x: bool(x.sum()))
clustering = get_clustering_from_busmap(
n,
busmap,
aggregate_generators_weighted=True,
aggregate_generators_carriers=aggregate_carriers,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=line_length_factor,
bus_strategies=bus_strategies,
line_strategies=line_strategies,
generator_strategies=generator_strategies,
one_port_strategies=one_port_strategies,
scale_link_capital_costs=False,
custom_line_groupers=["build_year"],
)
if not n.links.empty:
nc = clustering.network
nc.links["underwater_fraction"] = (
n.links.eval("underwater_fraction * length").div(nc.links.length).dropna()
)
nc.links["capital_cost"] = nc.links["capital_cost"].add(
(nc.links.length - n.links.length)
.clip(lower=0)
.mul(extended_link_costs)
.dropna(),
fill_value=0,
)
return clustering
def cluster_regions(busmaps, regions):
def cluster_regions(
busmaps: tuple | list, regions: gpd.GeoDataFrame, with_country: bool = False
) -> gpd.GeoDataFrame:
"""
Cluster regions based on busmaps and save the results to a file and to the
network.
Parameters:
- busmaps (list): A list of busmaps used for clustering.
- which (str): The type of regions to cluster.
- regions (gpd.GeoDataFrame): The regions to cluster.
- with_country (bool): Whether to keep country column.
Returns:
None
"""
busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0])
regions = regions.reindex(columns=["name", "geometry"]).set_index("name")
columns = ["name", "country", "geometry"] if with_country else ["name", "geometry"]
regions = regions.reindex(columns=columns).set_index("name")
regions_c = regions.dissolve(busmap)
regions_c.index.name = "name"
return regions_c.reset_index()
def plot_busmap_for_n_clusters(n, n_clusters, solver_name="scip", fn=None):
busmap = busmap_for_n_clusters(n, n_clusters, solver_name)
cs = busmap.unique()
cr = sns.color_palette("hls", len(cs))
n.plot(bus_colors=busmap.map(dict(zip(cs, cr))))
if fn is not None:
plt.savefig(fn, bbox_inches="tight")
del cs, cr
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("cluster_network", simpl="", clusters="40")
snakemake = mock_snakemake("cluster_network", clusters=60)
configure_logging(snakemake)
set_scenario_config(snakemake)
@ -470,43 +350,20 @@ if __name__ == "__main__":
solver_name = snakemake.config["solving"]["solver"]["name"]
n = pypsa.Network(snakemake.input.network)
buses_prev, lines_prev, links_prev = len(n.buses), len(n.lines), len(n.links)
# remove integer outputs for compatibility with PyPSA v0.26.0
n.generators.drop("n_mod", axis=1, inplace=True, errors="ignore")
load = (
xr.open_dataarray(snakemake.input.load)
.mean(dim="time")
.to_pandas()
.reindex(n.buses.index, fill_value=0.0)
)
exclude_carriers = params.cluster_network["exclude_carriers"]
aggregate_carriers = set(n.generators.carrier) - set(exclude_carriers)
conventional_carriers = set(params.conventional_carriers)
if snakemake.wildcards.clusters.endswith("m"):
n_clusters = int(snakemake.wildcards.clusters[:-1])
aggregate_carriers = conventional_carriers & aggregate_carriers
elif snakemake.wildcards.clusters.endswith("c"):
n_clusters = int(snakemake.wildcards.clusters[:-1])
aggregate_carriers = aggregate_carriers - conventional_carriers
elif snakemake.wildcards.clusters == "all":
if snakemake.wildcards.clusters == "all":
n_clusters = len(n.buses)
else:
n_clusters = int(snakemake.wildcards.clusters)
if params.cluster_network.get("consider_efficiency_classes", False):
carriers = []
for c in aggregate_carriers:
gens = n.generators.query("carrier == @c")
low = gens.efficiency.quantile(0.10)
high = gens.efficiency.quantile(0.90)
if low >= high:
carriers += [c]
else:
labels = ["low", "medium", "high"]
suffix = pd.cut(
gens.efficiency, bins=[0, low, high, 1], labels=labels
).astype(str)
carriers += [f"{c} {label} efficiency" for label in labels]
n.generators.update(
{"carrier": gens.carrier + " " + suffix + " efficiency"}
)
aggregate_carriers = carriers
if n_clusters == len(n.buses):
# Fast-path if no clustering is necessary
busmap = n.buses.index.to_series()
@ -515,13 +372,6 @@ if __name__ == "__main__":
else:
Nyears = n.snapshot_weightings.objective.sum() / 8760
hvac_overhead_cost = load_costs(
snakemake.input.tech_costs,
params.costs,
params.max_hours,
Nyears,
).at["HVAC overhead", "capital_cost"]
custom_busmap = params.custom_busmap
if custom_busmap:
custom_busmap = pd.read_csv(
@ -529,32 +379,42 @@ if __name__ == "__main__":
).squeeze()
custom_busmap.index = custom_busmap.index.astype(str)
logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}")
busmap = custom_busmap
else:
algorithm = params.cluster_network["algorithm"]
features = None
if algorithm == "hac":
features = get_feature_data_for_hac(snakemake.input.hac_features)
fix_country_assignment_for_hac(n)
n.determine_network_topology()
n_clusters_c = distribute_n_clusters_to_countries(
n,
n_clusters,
load,
focus_weights=params.focus_weights,
solver_name=solver_name,
)
busmap = busmap_for_n_clusters(
n,
n_clusters_c,
cluster_weights=load,
algorithm=algorithm,
features=features,
)
clustering = clustering_for_n_clusters(
n,
n_clusters,
custom_busmap,
aggregate_carriers,
params.length_factor,
params.aggregation_strategies,
solver_name,
params.cluster_network["algorithm"],
params.cluster_network["feature"],
hvac_overhead_cost,
params.focus_weights,
busmap,
line_length_factor=params.length_factor,
aggregation_strategies=params.aggregation_strategies,
)
nc = clustering.network
update_p_nom_max(nc)
if params.cluster_network.get("consider_efficiency_classes"):
labels = [f" {label} efficiency" for label in ["low", "medium", "high"]]
nc.generators["carrier"] = nc.generators.carrier.replace(labels, "", regex=True)
for attr in (
"busmap",
"linemap",
): # also available: linemap_positive, linemap_negative
for attr in ["busmap", "linemap"]:
getattr(clustering, attr).to_csv(snakemake.output[attr])
# nc.shapes = n.shapes.copy()
@ -566,3 +426,10 @@ if __name__ == "__main__":
nc.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
nc.export_to_netcdf(snakemake.output.network)
logger.info(
f"Clustered network:\n"
f"Buses: {buses_prev} to {len(nc.buses)}\n"
f"Lines: {lines_prev} to {len(nc.lines)}\n"
f"Links: {links_prev} to {len(nc.links)}"
)

View File

@ -0,0 +1,194 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
The script performs a land eligibility analysis of what share of land is
availability for developing the selected technology at each cutout grid cell.
The script uses the `atlite <https://github.com/pypsa/atlite>`_ library and
several GIS datasets like the CORINE land use data, LUISA land use data,
Natura2000 nature reserves, GEBCO bathymetry data, and shipping lanes.
Relevant settings
-----------------
.. code:: yaml
atlite:
nprocesses:
renewable:
{technology}:
cutout: corine: luisa: grid_codes: distance: natura: max_depth:
min_depth: max_shore_distance: min_shore_distance: resource:
.. seealso::
Documentation of the configuration file ``config/config.yaml`` at
:ref:`atlite_cf`, :ref:`renewable_cf`
Inputs
------
- ``data/bundle/corine/g250_clc06_V18_5.tif``: `CORINE Land Cover (CLC)
<https://land.copernicus.eu/pan-european/corine-land-cover>`_ inventory on `44
classes <https://wiki.openstreetmap.org/wiki/Corine_Land_Cover#Tagging>`_ of
land use (e.g. forests, arable land, industrial, urban areas) at 100m
resolution.
.. image:: img/corine.png
:scale: 33 %
- ``data/LUISA_basemap_020321_50m.tif``: `LUISA Base Map
<https://publications.jrc.ec.europa.eu/repository/handle/JRC124621>`_ land
coverage dataset at 50m resolution similar to CORINE. For codes in relation to
CORINE land cover, see `Annex 1 of the technical documentation
<https://publications.jrc.ec.europa.eu/repository/bitstream/JRC124621/technical_report_luisa_basemap_2018_v7_final.pdf>`_.
- ``data/bundle/gebco/GEBCO_2014_2D.nc``: A `bathymetric
<https://en.wikipedia.org/wiki/Bathymetry>`_ data set with a global terrain
model for ocean and land at 15 arc-second intervals by the `General
Bathymetric Chart of the Oceans (GEBCO)
<https://www.gebco.net/data_and_products/gridded_bathymetry_data/>`_.
.. image:: img/gebco_2019_grid_image.jpg
:scale: 50 %
**Source:** `GEBCO
<https://www.gebco.net/data_and_products/images/gebco_2019_grid_image.jpg>`_
- ``resources/natura.tiff``: confer :ref:`natura`
- ``resources/offshore_shapes.geojson``: confer :ref:`shapes`
- ``resources/regions_onshore_base_s_{clusters}.geojson``: (if not offshore
wind), confer :ref:`busregions`
- ``resources/regions_offshore_base_s_{clusters}.geojson``: (if offshore wind),
:ref:`busregions`
- ``"cutouts/" + params["renewable"][{technology}]['cutout']``: :ref:`cutout`
- ``networks/_base_s_{clusters}.nc``: :ref:`base`
Outputs
-------
- ``resources/availability_matrix_{clusters_{technology}.nc``
"""
import functools
import logging
import time
import atlite
import geopandas as gpd
import numpy as np
import xarray as xr
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_renewable_profiles", clusters=100, technology="onwind"
)
configure_logging(snakemake)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)
noprogress = snakemake.config["run"].get("disable_progressbar", True)
noprogress = noprogress or not snakemake.config["atlite"]["show_progress"]
technology = snakemake.wildcards.technology
params = snakemake.params.renewable[technology]
cutout = atlite.Cutout(snakemake.input.cutout)
regions = gpd.read_file(snakemake.input.regions)
assert not regions.empty, (
f"List of regions in {snakemake.input.regions} is empty, please "
"disable the corresponding renewable technology"
)
# do not pull up, set_index does not work if geo dataframe is empty
regions = regions.set_index("name").rename_axis("bus")
res = params.get("excluder_resolution", 100)
excluder = atlite.ExclusionContainer(crs=3035, res=res)
if params["natura"]:
excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True)
for dataset in ["corine", "luisa"]:
kwargs = {"nodata": 0} if dataset == "luisa" else {}
settings = params.get(dataset, {})
if not settings:
continue
if dataset == "luisa" and res > 50:
logger.info(
"LUISA data is available at 50m resolution, "
f"but coarser {res}m resolution is used."
)
if isinstance(settings, list):
settings = {"grid_codes": settings}
if "grid_codes" in settings:
codes = settings["grid_codes"]
excluder.add_raster(
snakemake.input[dataset], codes=codes, invert=True, crs=3035, **kwargs
)
if settings.get("distance", 0.0) > 0.0:
codes = settings["distance_grid_codes"]
buffer = settings["distance"]
excluder.add_raster(
snakemake.input[dataset], codes=codes, buffer=buffer, crs=3035, **kwargs
)
if params.get("ship_threshold"):
shipping_threshold = (
params["ship_threshold"] * 8760 * 6
) # approximation because 6 years of data which is hourly collected
func = functools.partial(np.less, shipping_threshold)
excluder.add_raster(
snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True
)
if params.get("max_depth"):
# lambda not supported for atlite + multiprocessing
# use named function np.greater with partially frozen argument instead
# and exclude areas where: -max_depth > grid cell depth
func = functools.partial(np.greater, -params["max_depth"])
excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000)
if params.get("min_depth"):
func = functools.partial(np.greater, -params["min_depth"])
excluder.add_raster(
snakemake.input.gebco, codes=func, crs=4326, nodata=-1000, invert=True
)
if "min_shore_distance" in params:
buffer = params["min_shore_distance"]
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer)
if "max_shore_distance" in params:
buffer = params["max_shore_distance"]
excluder.add_geometry(
snakemake.input.country_shapes, buffer=buffer, invert=True
)
logger.info(f"Calculate landuse availability for {technology}...")
start = time.time()
kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress)
availability = cutout.availabilitymatrix(regions, excluder, **kwargs)
duration = time.time() - start
logger.info(
f"Completed landuse availability calculation for {technology} ({duration:2.2f}s)"
)
# For Moldova and Ukraine: Overwrite parts not covered by Corine with
# externally determined available areas
if "availability_matrix_MD_UA" in snakemake.input.keys():
availability_MDUA = xr.open_dataarray(
snakemake.input["availability_matrix_MD_UA"]
)
availability.loc[availability_MDUA.coords] = availability_MDUA
availability.to_netcdf(snakemake.output[0])

View File

@ -34,21 +34,21 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"determine_availability_matrix_MD_UA", technology="solar"
"determine_availability_matrix_MD_UA", clusters=100, technology="solar"
)
configure_logging(snakemake)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)
noprogress = not snakemake.config["atlite"].get("show_progress", True)
config = snakemake.config["renewable"][snakemake.wildcards.technology]
config = snakemake.params["renewable"][snakemake.wildcards.technology]
cutout = atlite.Cutout(snakemake.input.cutout)
regions = (
gpd.read_file(snakemake.input.regions).set_index("name").rename_axis("bus")
)
# Limit to "UA" and "MD" regions
buses = regions.loc[regions["country"].isin(["UA", "MD"])].index.values
buses = regions.filter(regex="(UA|MD)", axis=0).index.values
regions = regions.loc[buses]
excluder = atlite.ExclusionContainer(crs=3035, res=100)
@ -125,24 +125,24 @@ if __name__ == "__main__":
time.sleep(1)
excluder.add_geometry(pts_tmp_fn)
if "max_depth" in config:
if config.get("max_depth"):
# lambda not supported for atlite + multiprocessing
# use named function np.greater with partially frozen argument instead
# and exclude areas where: -max_depth > grid cell depth
func = functools.partial(np.greater, -config["max_depth"])
excluder.add_raster(snakemake.input.gebco, codes=func, crs=4236, nodata=-1000)
if "min_shore_distance" in config:
if config.get("min_shore_distance"):
buffer = config["min_shore_distance"]
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer)
if "max_shore_distance" in config:
if config.get("max_shore_distance"):
buffer = config["max_shore_distance"]
excluder.add_geometry(
snakemake.input.country_shapes, buffer=buffer, invert=True
)
if "ship_threshold" in config:
if config.get("ship_threshold"):
shipping_threshold = config["ship_threshold"] * 8760 * 6
func = functools.partial(np.less, shipping_threshold)
excluder.add_raster(

View File

@ -761,8 +761,7 @@ if __name__ == "__main__":
networks_dict = {
(cluster, ll, opt + sector_opt, planning_horizon): "results/"
+ snakemake.params.RDIR
+ f"/postnetworks/elec_s{simpl}_{cluster}_l{ll}_{opt}_{sector_opt}_{planning_horizon}.nc"
for simpl in snakemake.params.scenario["simpl"]
+ f"/postnetworks/base_s_{cluster}_l{ll}_{opt}_{sector_opt}_{planning_horizon}.nc"
for cluster in snakemake.params.scenario["clusters"]
for opt in snakemake.params.scenario["opts"]
for sector_opt in snakemake.params.scenario["sector_opts"]

View File

@ -732,8 +732,7 @@ if __name__ == "__main__":
networks_dict = {
(clusters, lv, opts + sector_opts): "results/"
+ run
+ f"postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in snakemake.config["scenario"]["simpl"]
+ f"postnetworks/base_s_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc"
for clusters in snakemake.config["scenario"]["clusters"]
for opts in snakemake.config["scenario"]["opts"]
for sector_opts in snakemake.config["scenario"]["sector_opts"]

View File

@ -229,7 +229,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"plot_gas_network",
simpl="",
opts="",
clusters="37",
ll="v1.0",

View File

@ -256,7 +256,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"plot_hydrogen_network",
simpl="",
opts="",
clusters="37",
ll="v1.0",

View File

@ -249,7 +249,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"plot_power_network",
simpl="",
opts="",
clusters="37",
ll="v1.0",

View File

@ -176,7 +176,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"plot_power_network_perfect",
simpl="",
opts="",
clusters="37",
ll="v1.0",

View File

@ -18,7 +18,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"plot_elec_statistics",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",

View File

@ -181,7 +181,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"plot_electricity_prices",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",

View File

@ -18,7 +18,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"plot_electricity_prices",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",

View File

@ -29,7 +29,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"plot_validation_electricity_production",
simpl="",
opts="Ept",
clusters="37c",
ll="v1.0",

View File

@ -41,12 +41,12 @@ Inputs
------
- ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``networks/elec_s{simpl}_{clusters}.nc``: confer :ref:`cluster`
- ``networks/base_s_{clusters}.nc``: confer :ref:`cluster`
Outputs
-------
- ``networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc``: Complete PyPSA network that will be handed to the ``solve_network`` rule.
- ``networks/base_s_{clusters}_elec_l{ll}_{opts}.nc``: Complete PyPSA network that will be handed to the ``solve_network`` rule.
Description
-----------
@ -68,7 +68,7 @@ from _helpers import (
set_scenario_config,
update_config_from_wildcards,
)
from add_electricity import load_costs, update_transmission_costs
from add_electricity import load_costs, set_transmission_costs
from pypsa.descriptors import expand_series
idx = pd.IndexSlice
@ -191,7 +191,7 @@ def set_transmission_limit(n, ll_type, factor, costs, Nyears=1):
+ n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col]
)
update_transmission_costs(n, costs)
set_transmission_costs(n, costs)
if factor == "opt" or float(factor) > 1.0:
n.lines["s_nom_min"] = lines_s_nom
@ -325,7 +325,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"prepare_network",
simpl="",
clusters="37",
ll="v1.0",
opts="Co2L-4H",

View File

@ -493,7 +493,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"prepare_perfect_foresight",
simpl="",
opts="",
clusters="37",
ll="v1.5",

View File

@ -407,12 +407,20 @@ def create_network_topology(
return topo
# TODO merge issue with PyPSA-Eur
def update_wind_solar_costs(n, costs):
def update_wind_solar_costs(
n: pypsa.Network,
costs: pd.DataFrame,
line_length_factor: int | float = 1,
landfall_lengths: dict = None,
) -> None:
"""
Update costs for wind and solar generators added with pypsa-eur to those
cost in the planning year.
"""
if landfall_lengths is None:
landfall_lengths = {}
# NB: solar costs are also manipulated for rooftop
# when distribution grid is inserted
n.generators.loc[n.generators.carrier == "solar", "capital_cost"] = costs.at[
@ -424,22 +432,9 @@ def update_wind_solar_costs(n, costs):
]
# for offshore wind, need to calculated connection costs
# assign clustered bus
# map initial network -> simplified network
busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0).squeeze()
busmap_s.index = busmap_s.index.astype(str)
busmap_s = busmap_s.astype(str)
# map simplified network -> clustered network
busmap = pd.read_csv(snakemake.input.busmap, index_col=0).squeeze()
busmap.index = busmap.index.astype(str)
busmap = busmap.astype(str)
# map initial network -> clustered network
clustermaps = busmap_s.map(busmap)
# code adapted from pypsa-eur/scripts/add_electricity.py
for connection in ["dc", "ac", "float"]:
tech = "offwind-" + connection
landfall_length = landfall_lengths.get(tech, 0.0)
if tech not in n.generators.carrier.values:
continue
profile = snakemake.input["profile_offwind-" + connection]
@ -449,31 +444,13 @@ def update_wind_solar_costs(n, costs):
if "year" in ds.indexes:
ds = ds.sel(year=ds.year.min(), drop=True)
underwater_fraction = ds["underwater_fraction"].to_pandas()
connection_cost = (
snakemake.params.length_factor
* ds["average_distance"].to_pandas()
* (
underwater_fraction
* costs.at[tech + "-connection-submarine", "fixed"]
+ (1.0 - underwater_fraction)
* costs.at[tech + "-connection-underground", "fixed"]
)
distance = ds["average_distance"].to_pandas()
submarine_cost = costs.at[tech + "-connection-submarine", "fixed"]
underground_cost = costs.at[tech + "-connection-underground", "fixed"]
connection_cost = line_length_factor * (
distance * submarine_cost + landfall_length * underground_cost
)
# convert to aggregated clusters with weighting
weight = ds["weight"].to_pandas()
# e.g. clusters == 37m means that VRE generators are left
# at clustering of simplified network, but that they are
# connected to 37-node network
genmap = (
busmap_s if snakemake.wildcards.clusters[-1:] == "m" else clustermaps
)
connection_cost = (connection_cost * weight).groupby(
genmap
).sum() / weight.groupby(genmap).sum()
capital_cost = (
costs.at["offwind", "fixed"]
+ costs.at[tech + "-station", "fixed"]
@ -613,10 +590,10 @@ def remove_non_electric_buses(n):
n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])]
def patch_electricity_network(n):
def patch_electricity_network(n, costs, landfall_lengths):
remove_elec_base_techs(n)
remove_non_electric_buses(n)
update_wind_solar_costs(n, costs)
update_wind_solar_costs(n, costs, landfall_lengths=landfall_lengths)
n.loads["carrier"] = "electricity"
n.buses["location"] = n.buses.index
n.buses["unit"] = "MWh_el"
@ -1340,13 +1317,7 @@ def insert_electricity_distribution_grid(n, costs):
# set existing solar to cost of utility cost rather the 50-50 rooftop-utility
solar = n.generators.index[n.generators.carrier == "solar"]
n.generators.loc[solar, "capital_cost"] = costs.at["solar-utility", "fixed"]
if snakemake.wildcards.clusters[-1:] == "m":
simplified_pop_layout = pd.read_csv(
snakemake.input.simplified_pop_layout, index_col=0
)
pop_solar = simplified_pop_layout.total.rename(index=lambda x: x + " solar")
else:
pop_solar = pop_layout.total.rename(index=lambda x: x + " solar")
pop_solar = pop_layout.total.rename(index=lambda x: x + " solar")
# add max solar rooftop potential assuming 0.1 kW/m2 and 20 m2/person,
# i.e. 2 kW/person (population data is in thousands of people) so we get MW
@ -4621,7 +4592,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"prepare_sector_network",
simpl="",
opts="",
clusters="38",
ll="vopt",
@ -4658,12 +4628,17 @@ if __name__ == "__main__":
)
pop_weighted_energy_totals.update(pop_weighted_heat_totals)
landfall_lengths = {
tech: settings["landfall_length"]
for tech, settings in snakemake.params.renewable.items()
if "landfall_length" in settings.keys()
}
patch_electricity_network(n, costs, landfall_lengths)
fn = snakemake.input.heating_efficiencies
year = int(snakemake.params["energy_totals_year"])
heating_efficiencies = pd.read_csv(fn, index_col=[1, 0]).loc[year]
patch_electricity_network(n)
spatial = define_spatial(pop_layout.index, options)
if snakemake.params.foresight in ["myopic", "perfect"]:

View File

@ -19,96 +19,74 @@ Relevant Settings
cluster_network:
aggregation_strategies:
costs:
year:
version:
fill_values:
marginal_cost:
capital_cost:
electricity:
max_hours:
lines:
length_factor:
links:
p_max_pu:
solving:
solver:
name:
.. seealso::
Documentation of the configuration file ``config/config.yaml`` at
:ref:`costs_cf`, :ref:`electricity_cf`, :ref:`renewable_cf`,
:ref:`lines_cf`, :ref:`links_cf`, :ref:`solving_cf`
:ref:`electricity_cf`, :ref:`renewable_cf`,
:ref:`lines_cf`, :ref:`links_cf`
Inputs
------
- ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/regions_offshore.geojson``: confer :ref:`busregions`
- ``networks/elec.nc``: confer :ref:`electricity`
- ``networks/base.nc``
Outputs
-------
- ``resources/regions_onshore_elec_s{simpl}.geojson``:
- ``resources/regions_onshore_base.geojson``:
.. image:: img/regions_onshore_elec_s.png
.. image:: img/regions_onshore_base_s.png
:scale: 33 %
- ``resources/regions_offshore_elec_s{simpl}.geojson``:
- ``resources/regions_offshore_base.geojson``:
.. image:: img/regions_offshore_elec_s .png
.. image:: img/regions_offshore_base_s .png
:scale: 33 %
- ``resources/busmap_elec_s{simpl}.csv``: Mapping of buses from ``networks/elec.nc`` to ``networks/elec_s{simpl}.nc``;
- ``networks/elec_s{simpl}.nc``:
- ``resources/busmap_base_s.csv``: Mapping of buses from ``networks/base.nc`` to ``networks/base_s.nc``;
- ``networks/base.nc``:
.. image:: img/elec_s.png
.. image:: img/base_s.png
:scale: 33 %
Description
-----------
The rule :mod:`simplify_network` does up to four things:
The rule :mod:`simplify_network` does up to three things:
1. Create an equivalent transmission network in which all voltage levels are mapped to the 380 kV level by the function ``simplify_network(...)``.
2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``. The components attached to buses in between are moved to the nearest endpoint. The grid connection cost of offshore wind generators are added to the capital costs of the generator.
2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``.
3. Stub lines and links, i.e. dead-ends of the network, are sequentially removed from the network in the function ``remove_stubs(...)``. Components are moved along.
4. Optionally, if an integer were provided for the wildcard ``{simpl}`` (e.g. ``networks/elec_s500.nc``), the network is clustered to this number of clusters with the routines from the ``cluster_network`` rule with the function ``cluster_network.cluster(...)``. This step is usually skipped!
3. Stub lines and links, i.e. dead-ends of the network, are sequentially removed from the network in the function ``remove_stubs(...)``.
"""
import logging
from functools import reduce
from typing import Tuple
import geopandas as gpd
import numpy as np
import pandas as pd
import pypsa
import scipy as sp
from _helpers import configure_logging, set_scenario_config, update_p_nom_max
from add_electricity import load_costs
from _helpers import configure_logging, set_scenario_config
from base_network import append_bus_shapes
from cluster_network import cluster_regions, clustering_for_n_clusters
from pypsa.clustering.spatial import (
aggregateoneport,
busmap_by_stubs,
get_clustering_from_busmap,
)
from pypsa.io import import_components_from_dataframe, import_series_from_dataframe
from cluster_network import cluster_regions
from pypsa.clustering.spatial import busmap_by_stubs, get_clustering_from_busmap
from scipy.sparse.csgraph import connected_components, dijkstra
logger = logging.getLogger(__name__)
def simplify_network_to_380(n, linetype_380):
def simplify_network_to_380(
n: pypsa.Network, linetype_380: str
) -> Tuple[pypsa.Network, pd.Series]:
"""
Fix all lines to a voltage level of 380 kV and remove all transformers.
@ -149,123 +127,7 @@ def simplify_network_to_380(n, linetype_380):
return n, trafo_map
def _prepare_connection_costs_per_link(n, costs, renewable_carriers, length_factor):
if n.links.empty:
return {}
return {
tech: (
n.links.length
* length_factor
* (
n.links.underwater_fraction
* costs.at[tech + "-connection-submarine", "capital_cost"]
+ (1.0 - n.links.underwater_fraction)
* costs.at[tech + "-connection-underground", "capital_cost"]
)
)
for tech in renewable_carriers
if tech.startswith("offwind")
}
def _compute_connection_costs_to_bus(
n,
busmap,
costs,
renewable_carriers,
length_factor,
connection_costs_per_link=None,
buses=None,
):
if connection_costs_per_link is None:
connection_costs_per_link = _prepare_connection_costs_per_link(
n, costs, renewable_carriers, length_factor
)
if buses is None:
buses = busmap.index[busmap.index != busmap.values]
connection_costs_to_bus = pd.DataFrame(index=buses)
for tech in connection_costs_per_link:
adj = n.adjacency_matrix(
weights=pd.concat(
dict(
Link=connection_costs_per_link[tech].reindex(n.links.index),
Line=pd.Series(0.0, n.lines.index),
)
)
)
costs_between_buses = dijkstra(
adj, directed=False, indices=n.buses.index.get_indexer(buses)
)
connection_costs_to_bus[tech] = costs_between_buses[
np.arange(len(buses)), n.buses.index.get_indexer(busmap.loc[buses])
]
return connection_costs_to_bus
def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus):
connection_costs = {}
for tech in connection_costs_to_bus:
tech_b = n.generators.carrier == tech
costs = (
n.generators.loc[tech_b, "bus"]
.map(connection_costs_to_bus[tech])
.loc[lambda s: s > 0]
)
if not costs.empty:
n.generators.loc[costs.index, "capital_cost"] += costs
logger.info(
"Displacing {} generator(s) and adding connection costs to capital_costs: {} ".format(
tech,
", ".join(
"{:.0f} Eur/MW/a for `{}`".format(d, b)
for b, d in costs.items()
),
)
)
connection_costs[tech] = costs
def _aggregate_and_move_components(
n,
busmap,
connection_costs_to_bus,
aggregate_one_ports={"Load", "StorageUnit"},
aggregation_strategies=dict(),
exclude_carriers=None,
):
def replace_components(n, c, df, pnl):
n.mremove(c, n.df(c).index)
import_components_from_dataframe(n, df, c)
for attr, df in pnl.items():
if not df.empty:
import_series_from_dataframe(n, df, c, attr)
_adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus)
generator_strategies = aggregation_strategies["generators"]
carriers = set(n.generators.carrier) - set(exclude_carriers)
generators, generators_pnl = aggregateoneport(
n,
busmap,
"Generator",
carriers=carriers,
custom_strategies=generator_strategies,
)
replace_components(n, "Generator", generators, generators_pnl)
for one_port in aggregate_one_ports:
df, pnl = aggregateoneport(n, busmap, component=one_port)
replace_components(n, one_port, df, pnl)
def _remove_clustered_buses_and_branches(n: pypsa.Network, busmap: pd.Series) -> None:
buses_to_del = n.buses.index.difference(busmap)
n.mremove("Bus", buses_to_del)
for c in n.branch_components:
@ -274,14 +136,8 @@ def _aggregate_and_move_components(
def simplify_links(
n,
costs,
renewables,
length_factor,
p_max_pu,
exclude_carriers,
aggregation_strategies=dict(),
):
n: pypsa.Network, p_max_pu: int | float
) -> Tuple[pypsa.Network, pd.Series]:
## Complex multi-node links are folded into end-points
logger.info("Simplifying connected link components")
@ -343,13 +199,6 @@ def simplify_links(
busmap = n.buses.index.to_series()
connection_costs_per_link = _prepare_connection_costs_per_link(
n, costs, renewables, length_factor
)
connection_costs_to_bus = pd.DataFrame(
0.0, index=n.buses.index, columns=list(connection_costs_per_link)
)
node_corsica = find_closest_bus(
n,
x=9.44802,
@ -375,15 +224,6 @@ def simplify_links(
n.buses.loc[b, ["x", "y"]], n.buses.loc[buses[1:-1], ["x", "y"]]
)
busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]]
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(
n,
busmap,
costs,
renewables,
length_factor,
connection_costs_per_link,
buses,
)
all_links = [i for _, i in sum(links, [])]
@ -421,61 +261,41 @@ def simplify_links(
params.setdefault(attr, default)
n.links.loc[name] = pd.Series(params)
# n.add("Link", **params)
# n.add("Link", name, **params)
logger.debug("Collecting all components using the busmap")
_remove_clustered_buses_and_branches(n, busmap)
# Change carrier type of all added super_nodes to "AC"
n.buses.loc[added_supernodes, "carrier"] = "AC"
_aggregate_and_move_components(
n,
busmap,
connection_costs_to_bus,
aggregation_strategies=aggregation_strategies,
exclude_carriers=exclude_carriers,
)
return n, busmap
def remove_stubs(
n,
costs,
renewable_carriers,
length_factor,
simplify_network,
aggregation_strategies=dict(),
):
n: pypsa.Network, simplify_network: dict
) -> Tuple[pypsa.Network, pd.Series]:
logger.info("Removing stubs")
across_borders = simplify_network["remove_stubs_across_borders"]
matching_attrs = [] if across_borders else ["country"]
busmap = busmap_by_stubs(n, matching_attrs)
connection_costs_to_bus = _compute_connection_costs_to_bus(
n, busmap, costs, renewable_carriers, length_factor
)
_aggregate_and_move_components(
n,
busmap,
connection_costs_to_bus,
aggregation_strategies=aggregation_strategies,
exclude_carriers=simplify_network["exclude_carriers"],
)
_remove_clustered_buses_and_branches(n, busmap)
return n, busmap
def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None):
def aggregate_to_substations(
n: pypsa.Network,
buses_i: pd.Index | list,
aggregation_strategies: dict | None = None,
) -> Tuple[pypsa.Network, pd.Series]:
# can be used to aggregate a selection of buses to electrically closest neighbors
# if no buses are given, nodes that are no substations or without offshore connection are aggregated
if buses_i is None:
logger.info(
"Aggregating buses that are no substations or have no valid offshore connection"
)
buses_i = list(set(n.buses.index) - set(n.generators.bus) - set(n.loads.bus))
logger.info("Aggregating buses to substations")
if aggregation_strategies is None:
aggregation_strategies = dict()
weight = pd.concat(
{
@ -503,49 +323,21 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None):
busmap.loc[buses_i] = dist.idxmin(1)
line_strategies = aggregation_strategies.get("lines", dict())
generator_strategies = aggregation_strategies.get("generators", dict())
one_port_strategies = aggregation_strategies.get("one_ports", dict())
bus_strategies = aggregation_strategies.get("buses", dict())
bus_strategies.setdefault("substation_lv", lambda x: bool(x.sum()))
bus_strategies.setdefault("substation_off", lambda x: bool(x.sum()))
clustering = get_clustering_from_busmap(
n,
busmap,
aggregate_generators_weighted=True,
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
bus_strategies=bus_strategies,
line_strategies=line_strategies,
generator_strategies=generator_strategies,
one_port_strategies=one_port_strategies,
scale_link_capital_costs=False,
)
return clustering.network, busmap
def cluster(
n,
n_clusters,
focus_weights,
solver_name,
algorithm="hac",
feature=None,
aggregation_strategies=dict(),
):
logger.info(f"Clustering to {n_clusters} buses")
clustering = clustering_for_n_clusters(
n,
n_clusters,
custom_busmap=False,
aggregation_strategies=aggregation_strategies,
solver_name=solver_name,
algorithm=algorithm,
feature=feature,
focus_weights=focus_weights,
)
return clustering.network, clustering.busmap
def find_closest_bus(n, x, y, tol=2000):
"""
Find the index of the closest bus to the given coordinates within a specified tolerance.
@ -586,71 +378,28 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("simplify_network", simpl="", run="all")
snakemake = mock_snakemake("simplify_network")
configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params
solver_name = snakemake.config["solving"]["solver"]["name"]
n = pypsa.Network(snakemake.input.network)
Nyears = n.snapshot_weightings.objective.sum() / 8760
# remove integer outputs for compatibility with PyPSA v0.26.0
n.generators.drop("n_mod", axis=1, inplace=True, errors="ignore")
buses_prev, lines_prev, links_prev = len(n.buses), len(n.lines), len(n.links)
linetype_380 = snakemake.config["lines"]["types"][380]
n, trafo_map = simplify_network_to_380(n, linetype_380)
technology_costs = load_costs(
snakemake.input.tech_costs,
params.costs,
params.max_hours,
Nyears,
)
n, simplify_links_map = simplify_links(
n,
technology_costs,
params.renewable_carriers,
params.length_factor,
params.p_max_pu,
params.simplify_network["exclude_carriers"],
params.aggregation_strategies,
)
n, simplify_links_map = simplify_links(n, params.p_max_pu)
busmaps = [trafo_map, simplify_links_map]
if params.simplify_network["remove_stubs"]:
n, stub_map = remove_stubs(
n,
technology_costs,
params.renewable_carriers,
params.length_factor,
params.simplify_network,
aggregation_strategies=params.aggregation_strategies,
)
n, stub_map = remove_stubs(n, params.simplify_network)
busmaps.append(stub_map)
if params.simplify_network["to_substations"]:
n, substation_map = aggregate_to_substations(n, params.aggregation_strategies)
busmaps.append(substation_map)
# treatment of outliers (nodes without a profile for considered carrier):
# all nodes that have no profile of the given carrier are being aggregated to closest neighbor
if params.simplify_network["algorithm"] == "hac":
carriers = params.simplify_network["feature"].split("-")[0].split("+")
for carrier in carriers:
buses_i = list(
set(n.buses.index) - set(n.generators.query("carrier == @carrier").bus)
)
logger.info(
f"clustering preparation (hac): aggregating {len(buses_i)} buses of type {carrier}."
)
n, busmap_hac = aggregate_to_substations(
n, params.aggregation_strategies, buses_i
)
busmaps.append(busmap_hac)
substations_i = n.buses.query("substation_lv or substation_off").index
# some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed
# and are lost when clustering (for example with the simpl wildcard), we remove them for consistency:
@ -659,8 +408,6 @@ if __name__ == "__main__":
"tags",
"under_construction",
"onshore_bus",
"substation_lv",
"substation_off",
"geometry",
"underground",
"project_status",
@ -668,30 +415,39 @@ if __name__ == "__main__":
n.buses.drop(remove, axis=1, inplace=True, errors="ignore")
n.lines.drop(remove, axis=1, errors="ignore", inplace=True)
if snakemake.wildcards.simpl:
# shapes = n.shapes
n, cluster_map = cluster(
n,
int(snakemake.wildcards.simpl),
params.focus_weights,
solver_name,
params.simplify_network["algorithm"],
params.simplify_network["feature"],
params.aggregation_strategies,
if params.simplify_network["to_substations"]:
n, substation_map = aggregate_to_substations(
n, substations_i, params.aggregation_strategies
)
# n.shapes = shapes
busmaps.append(cluster_map)
busmaps.append(substation_map)
update_p_nom_max(n)
# all buses without shapes need to be clustered to their closest neighbor for HAC
if params.cluster_network["algorithm"] == "hac":
buses_i = list(n.buses.index.difference(n.shapes.idx))
logger.info(
"Preparing for HAC-Clustering. "
f"Aggregating {len(buses_i)} buses without Voronoi shapes to closest neighbor."
)
n, busmap_hac = aggregate_to_substations(
n, buses_i, params.aggregation_strategies
)
busmaps.append(busmap_hac)
busmap_s = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0])
busmap_s.to_csv(snakemake.output.busmap)
for which in ["regions_onshore", "regions_offshore"]:
regions = gpd.read_file(snakemake.input[which])
clustered_regions = cluster_regions(busmaps, regions)
clustered_regions = cluster_regions(busmaps, regions, with_country=True)
clustered_regions.to_file(snakemake.output[which])
# append_bus_shapes(n, clustered_regions, type=which.split("_")[1])
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output.network)
logger.info(
f"Simplified network:\n"
f"Buses: {buses_prev} to {len(n.buses)}\n"
f"Lines: {lines_prev} to {len(n.lines)}\n"
f"Links: {links_prev} to {len(n.links)}"
)

View File

@ -51,13 +51,6 @@ logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING)
def add_land_use_constraint(n, planning_horizons, config):
if "m" in snakemake.wildcards.clusters:
_add_land_use_constraint_m(n, planning_horizons, config)
else:
_add_land_use_constraint(n)
def add_land_use_constraint_perfect(n):
"""
Add global constraints for tech capacity limit.
@ -121,7 +114,7 @@ def add_land_use_constraint_perfect(n):
return n
def _add_land_use_constraint(n):
def add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in [
@ -159,58 +152,6 @@ def _add_land_use_constraint(n):
n.generators["p_nom_max"] = n.generators["p_nom_max"].clip(lower=0)
def _add_land_use_constraint_m(n, planning_horizons, config):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters
grouping_years = config["existing_capacities"]["grouping_years_power"]
current_horizon = snakemake.wildcards.planning_horizons
for carrier in [
"solar",
"solar rooftop",
"solar-hsat",
"onwind",
"offwind-ac",
"offwind-dc",
]:
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
ind = list(
{i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] for i in existing.index}
)
previous_years = [
str(y)
for y in set(planning_horizons + grouping_years)
if y < int(snakemake.wildcards.planning_horizons)
]
for p_year in previous_years:
ind2 = [
i for i in ind if i + " " + carrier + "-" + p_year in existing.index
]
sel_current = [i + " " + carrier + "-" + current_horizon for i in ind2]
sel_p_year = [i + " " + carrier + "-" + p_year for i in ind2]
n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[
sel_p_year
].rename(lambda x: x[:-4] + current_horizon)
# check if existing capacities are larger than technical potential
existing_large = n.generators[
n.generators["p_nom_min"] > n.generators["p_nom_max"]
].index
if len(existing_large):
logger.warning(
f"Existing capacities larger than technical potential for {existing_large},\
adjust technical potential to existing capacities"
)
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
existing_large, "p_nom_min"
]
n.generators["p_nom_max"] = n.generators["p_nom_max"].clip(lower=0)
def add_solar_potential_constraints(n, config):
"""
Add constraint to make sure the sum capacity of all solar technologies (fixed, tracking, ets. ) is below the region potential.
@ -246,37 +187,17 @@ def add_solar_potential_constraints(n, config):
lambda x: (x * factor) if carrier in x.name else x, axis=1
)
if "m" in snakemake.wildcards.clusters:
location = pd.Series(
[" ".join(i.split(" ")[:2]) for i in n.generators.index],
index=n.generators.index,
)
ggrouper = pd.Series(
n.generators.loc[solar].index.rename("bus").map(location),
index=n.generators.loc[solar].index,
).to_xarray()
rhs = (
n.generators.loc[solar_today, "p_nom_max"]
.groupby(n.generators.loc[solar_today].index.rename("bus").map(location))
.sum()
- n.generators.loc[solar_hsat, "p_nom_opt"]
.groupby(n.generators.loc[solar_hsat].index.rename("bus").map(location))
.sum()
* land_use_factors["solar-hsat"]
).clip(lower=0)
else:
location = pd.Series(n.buses.index, index=n.buses.index)
ggrouper = n.generators.loc[solar].bus
rhs = (
n.generators.loc[solar_today, "p_nom_max"]
.groupby(n.generators.loc[solar_today].bus.map(location))
.sum()
- n.generators.loc[solar_hsat, "p_nom_opt"]
.groupby(n.generators.loc[solar_hsat].bus.map(location))
.sum()
* land_use_factors["solar-hsat"]
).clip(lower=0)
location = pd.Series(n.buses.index, index=n.buses.index)
ggrouper = n.generators.loc[solar].bus
rhs = (
n.generators.loc[solar_today, "p_nom_max"]
.groupby(n.generators.loc[solar_today].bus.map(location))
.sum()
- n.generators.loc[solar_hsat, "p_nom_opt"]
.groupby(n.generators.loc[solar_hsat].bus.map(location))
.sum()
* land_use_factors["solar-hsat"]
).clip(lower=0)
lhs = (
(n.model["Generator-p_nom"].rename(rename).loc[solar] * land_use.squeeze())
@ -515,7 +436,7 @@ def prepare_network(
n.snapshot_weightings[:] = 8760.0 / nhours
if foresight == "myopic":
add_land_use_constraint(n, planning_horizons, config)
add_land_use_constraint(n)
if foresight == "perfect":
n = add_land_use_constraint_perfect(n)
@ -1134,7 +1055,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"solve_sector_network_perfect",
configfiles="../config/test/config.perfect.yaml",
simpl="",
opts="",
clusters="5",
ll="v1.0",

View File

@ -29,7 +29,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"solve_operations_network",
configfiles="test/config.electricity.yaml",
simpl="",
opts="",
clusters="5",
ll="v1.5",

View File

@ -20,17 +20,17 @@ Relevant Settings
Inputs
------
- ``networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc``: the network whose
- ``networks/base_s_{clusters}_elec_l{ll}_{opts}.nc``: the network whose
snapshots are to be aggregated
- ``resources/hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc``: the total
- ``resources/hourly_heat_demand_total_base_s_{clusters}.nc``: the total
hourly heat demand
- ``resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc``: the total
- ``resources/solar_thermal_total_base_s_{clusters}.nc``: the total
hourly solar thermal generation
Outputs
-------
- ``snapshot_weightings_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.csv``
- ``snapshot_weightings_base_s_{clusters}_elec_l{ll}_{opts}.csv``
Description
-----------
@ -63,7 +63,6 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"time_aggregation",
configfiles="test/config.overnight.yaml",
simpl="",
opts="",
clusters="37",
ll="v1.0",

View File

@ -7,7 +7,7 @@ set -x && \
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime && \
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime && \
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime && \
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime && \
snakemake -call make_summary_perfect --configfile config/test/config.perfect.yaml --rerun-triggers=mtime && \
snakemake -call all --configfile config/test/config.scenarios.yaml --rerun-triggers=mtime -n && \
set +x