Merge remote-tracking branch 'upstream/master' into no-offwind-fix

This commit is contained in:
Koen van Greevenbroek 2024-02-20 14:41:24 +01:00
commit 87088904ee
111 changed files with 2549 additions and 1777 deletions

19
.gitignore vendored
View File

@ -20,9 +20,16 @@ gurobi.log
/notebooks
/data
/cutouts
/tmp
doc/_build
/scripts/old
/scripts/create_scenarios.py
/config/create_scenarios.py
config/config.yaml
config/scenarios.yaml
config.yaml
config/config.yaml
@ -54,25 +61,15 @@ d1gam3xoknrgr2.cloudfront.net/
*.nc
*~
/scripts/old
*.pyc
/cutouts
/tmp
/pypsa
*.xlsx
config.yaml
doc/_build
*.xls
*.geojson
*.ipynb
data/costs_*
merger-todos.md

View File

@ -51,7 +51,7 @@ repos:
# Formatting with "black" coding style
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.1.1
rev: 24.2.0
hooks:
# Format Python files
- id: black

View File

@ -4,40 +4,50 @@
from os.path import normpath, exists
from shutil import copyfile, move, rmtree
from pathlib import Path
import yaml
from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider
HTTP = HTTPRemoteProvider()
from snakemake.utils import min_version
min_version("7.7")
from scripts._helpers import path_provider
conf_file = os.path.join(workflow.current_basedir, "config/config.yaml")
conf_default_file = os.path.join(workflow.current_basedir, "config/config.default.yaml")
if not exists(conf_file) and exists(conf_default_file):
copyfile(conf_default_file, conf_file)
min_version("7.7")
HTTP = HTTPRemoteProvider()
default_files = {
"config/config.default.yaml": "config/config.yaml",
"config/scenarios.template.yaml": "config/scenarios.yaml",
}
for template, target in default_files.items():
target = os.path.join(workflow.current_basedir, target)
template = os.path.join(workflow.current_basedir, template)
if not exists(target) and exists(template):
copyfile(template, target)
configfile: "config/config.default.yaml"
configfile: "config/config.yaml"
COSTS = f"data/costs_{config['costs']['year']}.csv"
ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4)
run = config.get("run", {})
RDIR = run["name"] + "/" if run.get("name") else ""
CDIR = RDIR if not run.get("shared_cutouts") else ""
LOGS = "logs/" + RDIR
BENCHMARKS = "benchmarks/" + RDIR
if not (shared_resources := run.get("shared_resources")):
RESOURCES = "resources/" + RDIR
elif isinstance(shared_resources, str):
RESOURCES = "resources/" + shared_resources + "/"
run = config["run"]
scenarios = run.get("scenarios", {})
if run["name"] and scenarios.get("enable"):
fn = Path(scenarios["file"])
scenarios = yaml.safe_load(fn.read_text())
RDIR = "{run}/"
if run["name"] == "all":
config["run"]["name"] = list(scenarios.keys())
elif run["name"]:
RDIR = run["name"] + "/"
else:
RESOURCES = "resources/"
RDIR = ""
logs = path_provider("logs/", RDIR, run["shared_resources"])
benchmarks = path_provider("benchmarks/", RDIR, run["shared_resources"])
resources = path_provider("resources/", RDIR, run["shared_resources"])
CDIR = "" if run["shared_cutouts"] else RDIR
RESULTS = "results/" + RDIR
@ -80,10 +90,19 @@ if config["foresight"] == "perfect":
rule all:
input:
RESULTS + "graphs/costs.pdf",
expand(RESULTS + "graphs/costs.pdf", run=config["run"]["name"]),
default_target: True
rule create_scenarios:
output:
config["run"]["scenarios"]["file"],
conda:
"envs/retrieve.yaml"
script:
"config/create_scenarios.py"
rule purge:
run:
import builtins
@ -104,9 +123,9 @@ rule dag:
message:
"Creating DAG of workflow."
output:
dot=RESOURCES + "dag.dot",
pdf=RESOURCES + "dag.pdf",
png=RESOURCES + "dag.png",
dot=resources("dag.dot"),
pdf=resources("dag.pdf"),
png=resources("dag.png"),
conda:
"envs/environment.yaml"
shell:

View File

@ -21,6 +21,9 @@ remote:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
run:
name: ""
scenarios:
enable: false
file: config/scenarios.yaml
disable_progressbar: false
shared_resources: false
shared_cutouts: true
@ -59,9 +62,6 @@ snapshots:
start: "2013-01-01"
end: "2014-01-01"
inclusive: 'left'
resolution: false
segmentation: false
#representative: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#enable
enable:
@ -366,6 +366,11 @@ existing_capacities:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#sector
sector:
transport: true
heating: true
biomass: true
industry: true
agriculture: true
district_heating:
potential: 0.6
progress:
@ -531,6 +536,7 @@ sector:
use_methanation_waste_heat: true
use_fuel_cell_waste_heat: true
use_electrolysis_waste_heat: true
electricity_transmission_grid: true
electricity_distribution_grid: true
electricity_distribution_grid_cost_factor: 1.0
electricity_grid_connection: true
@ -712,6 +718,14 @@ clustering:
committable: any
ramp_limit_up: max
ramp_limit_down: max
temporal:
resolution_elec: false
resolution_sector: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#adjustments
adjustments:
electricity: false
sector: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving
solving:

View File

@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# This script helps to generate a scenarios.yaml file for PyPSA-Eur.
# You can modify the template to your needs and define all possible combinations of config values that should be considered.
if "snakemake" in globals():
filename = snakemake.output[0]
else:
filename = "../config/scenarios.yaml"
import itertools
# Insert your config values that should be altered in the template.
# Change `config_section` and `config_section2` to the actual config sections.
template = """
scenario{scenario_number}:
config_section:
config_key: {config_value}
config_section2:
config_key2: {config_value2}
"""
# Define all possible combinations of config values.
# This must define all config values that are used in the template.
config_values = dict(config_value=["true", "false"], config_value2=[1, 2, 3, 4])
combinations = [
dict(zip(config_values.keys(), values))
for values in itertools.product(*config_values.values())
]
with open(filename, "w") as f:
for i, config in enumerate(combinations):
f.write(template.format(scenario_number=i, **config))

View File

@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# This file is used to define the scenarios that are run by snakemake. Each entry on the first level is a scenario. Each scenario can contain configuration overrides with respect to the config/config.yaml settings.
#
# Example
#
# custom-scenario: # name of the scenario
# electricity:
# renewable_carriers: [wind, solar] # override the list of renewable carriers
normal:
electricity:
renewable_carriers:
- solar
- onwind
- offwind-ac
- offwind-dc
- hydro
no-offwind:
electricity:
renewable_carriers:
- solar
- onwind
- hydro

View File

@ -0,0 +1,60 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
tutorial: true
run:
name:
- test-elec-no-offshore-wind
- test-elec-no-onshore-wind
scenarios:
enable: true
file: "config/test/scenarios.yaml"
disable_progressbar: true
shared_resources: base
shared_cutouts: true
scenario:
clusters:
- 5
opts:
- Co2L-24H
countries: ['BE']
snapshots:
start: "2013-03-01"
end: "2013-03-08"
electricity:
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery, H2]
Store: []
atlite:
default_cutout: be-03-2013-era5
cutouts:
be-03-2013-era5:
module: era5
x: [4., 15.]
y: [46., 56.]
time: ["2013-03-01", "2013-03-08"]
renewable:
onwind:
cutout: be-03-2013-era5
offwind-ac:
cutout: be-03-2013-era5
max_depth: false
offwind-dc:
cutout: be-03-2013-era5
max_depth: false
solar:
cutout: be-03-2013-era5
solving:
solver:
name: glpk
options: "glpk-default"

View File

@ -0,0 +1,11 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
test-elec-no-offshore-wind:
electricity:
renewable_carriers: [solar, onwind]
test-elec-no-onshore-wind:
electricity:
renewable_carriers: [solar, offwind-ac, offwind-dc]

View File

@ -0,0 +1,8 @@
,Unit,Values,Description
adjustments,,,
-- electricity,bool or dict,,"Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_network.`"
-- -- {attr},,,"Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``"
-- -- -- {carrier},float,per-unit,"Any carrier of the network to which parameter adjustment factor should be applied."
-- sector,bool or dict,,"Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_sector_network.`"
-- -- {attr},,,"Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``"
-- -- -- {carrier},float,per-unit,"Any carrier of the network to which parameter adjustment factor should be applied."
1 Unit Values Description
2 adjustments
3 -- electricity bool or dict Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_network.`
4 -- -- {attr} Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``
5 -- -- -- {carrier} float per-unit Any carrier of the network to which parameter adjustment factor should be applied.
6 -- sector bool or dict Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_sector_network.`
7 -- -- {attr} Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``
8 -- -- -- {carrier} float per-unit Any carrier of the network to which parameter adjustment factor should be applied.

View File

@ -17,3 +17,6 @@ aggregation_strategies,,,
-- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
-- buses,,,
-- -- {key},str,"{key} can be any of the component of the bus (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."
temporal,,,Options for temporal resolution
-- resolution_elec,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks."
-- resolution_sector,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_sector_network`."

1 Unit Values Description
17 -- -- {key} str {key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator.
18 -- buses
19 -- -- {key} str {key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus.
20 temporal Options for temporal resolution
21 -- resolution_elec -- {false,``nH``; i.e. ``2H``-``6H``} Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks.
22 -- resolution_sector -- {false,``nH``; i.e. ``2H``-``6H``} Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_sector_network`.

View File

@ -1,5 +1,8 @@
,Unit,Values,Description
name,--,"any string","Specify a name for your run. Results will be stored under this name."
disable_progrssbar,bool,"{true, false}","Switch to select whether progressbar should be disabled."
shared_resources,bool,"{true, false}","Switch to select whether resources should be shared across runs."
name,--,str/list,"Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run."
scenarios,,,
-- enable,bool,"{true, false}","Switch to select whether workflow should generate scenarios based on ``file``."
-- file,str,,"Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory."
disable_progressbar,bool,"{true, false}","Switch to select whether progressbar should be disabled."
shared_resources,bool/str,,"Switch to select whether resources should be shared across runs. If a string is passed, this is used as a subdirectory name for shared resources. If set to 'base', only resources before creating the elec.nc file are shared."
shared_cutouts,bool,"{true, false}","Switch to select whether cutouts should be shared across runs."

1 Unit Values Description
2 name -- any string str/list Specify a name for your run. Results will be stored under this name. Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run.
3 disable_progrssbar scenarios bool {true, false} Switch to select whether progressbar should be disabled.
4 shared_resources -- enable bool {true, false} Switch to select whether resources should be shared across runs. Switch to select whether workflow should generate scenarios based on ``file``.
5 -- file str Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory.
6 disable_progressbar bool {true, false} Switch to select whether progressbar should be disabled.
7 shared_resources bool/str Switch to select whether resources should be shared across runs. If a string is passed, this is used as a subdirectory name for shared resources. If set to 'base', only resources before creating the elec.nc file are shared.
8 shared_cutouts bool {true, false} Switch to select whether cutouts should be shared across runs.

View File

@ -7,5 +7,5 @@ Trigger, Description, Definition, Status
``B``,Add biomass,,In active use
``I``,Add industry sector,,In active use
``A``,Add agriculture sector,,In active use
``dist``+``n``,Add distribution grid with investment costs of ``n`` times costs in ``data/costs_{cost_year}.csv``,,In active use
``dist``+``n``,Add distribution grid with investment costs of ``n`` times costs in ``resources/costs_{cost_year}.csv``,,In active use
``seq``+``n``,Sets the CO2 sequestration potential to ``n`` Mt CO2 per year,,In active use

1 Trigger Description Definition Status
7 ``B`` Add biomass In active use
8 ``I`` Add industry sector In active use
9 ``A`` Add agriculture sector In active use
10 ``dist``+``n`` Add distribution grid with investment costs of ``n`` times costs in ``data/costs_{cost_year}.csv`` Add distribution grid with investment costs of ``n`` times costs in ``resources/costs_{cost_year}.csv`` In active use
11 ``seq``+``n`` Sets the CO2 sequestration potential to ``n`` Mt CO2 per year In active use

View File

@ -1,4 +1,9 @@
,Unit,Values,Description
transport,--,"{true, false}",Flag to include transport sector.
heating,--,"{true, false}",Flag to include heating sector.
biomass,--,"{true, false}",Flag to include biomass sector.
industry,--,"{true, false}",Flag to include industry sector.
agriculture,--,"{true, false}",Flag to include agriculture sector.
district_heating,--,,`prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_
-- potential,--,float,maximum fraction of urban demand which can be supplied by district heating
-- progress,--,Dictionary with planning horizons as keys., Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating
@ -109,6 +114,7 @@ min_part_load _methanolisation,per unit of p_nom ,float,The minimum unit dispatc
use_fischer_tropsch _waste_heat,--,"{true, false}",Add option for using waste heat of Fischer Tropsch in district heating networks
use_fuel_cell_waste_heat,--,"{true, false}",Add option for using waste heat of fuel cells in district heating networks
use_electrolysis_waste _heat,--,"{true, false}",Add option for using waste heat of electrolysis in district heating networks
electricity_transmission _grid,--,"{true, false}",Switch for enabling/disabling the electricity transmission grid.
electricity_distribution _grid,--,"{true, false}",Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link.
electricity_distribution _grid_cost_factor,,,Multiplies the investment cost of the electricity distribution grid
,,,

1 Unit Values Description
2 transport -- {true, false} Flag to include transport sector.
3 heating -- {true, false} Flag to include heating sector.
4 biomass -- {true, false} Flag to include biomass sector.
5 industry -- {true, false} Flag to include industry sector.
6 agriculture -- {true, false} Flag to include agriculture sector.
7 district_heating -- `prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_
8 -- potential -- float maximum fraction of urban demand which can be supplied by district heating
9 -- progress -- Dictionary with planning horizons as keys. Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating
114 use_fischer_tropsch _waste_heat -- {true, false} Add option for using waste heat of Fischer Tropsch in district heating networks
115 use_fuel_cell_waste_heat -- {true, false} Add option for using waste heat of fuel cells in district heating networks
116 use_electrolysis_waste _heat -- {true, false} Add option for using waste heat of electrolysis in district heating networks
117 electricity_transmission _grid -- {true, false} Switch for enabling/disabling the electricity transmission grid.
118 electricity_distribution _grid -- {true, false} Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link.
119 electricity_distribution _grid_cost_factor Multiplies the investment cost of the electricity distribution grid
120

View File

@ -2,5 +2,3 @@
start,--,str or datetime-like; e.g. YYYY-MM-DD,Left bound of date range
end,--,str or datetime-like; e.g. YYYY-MM-DD,Right bound of date range
inclusive,--,"One of {'neither', 'both', left, right}","Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``."
resolution ,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks."
segmentation,--,"{false,``n``; e.g. ``4380``}","Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks."

1 Unit Values Description
2 start -- str or datetime-like; e.g. YYYY-MM-DD Left bound of date range
3 end -- str or datetime-like; e.g. YYYY-MM-DD Right bound of date range
4 inclusive -- One of {'neither', 'both', ‘left’, ‘right’} Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``.
resolution -- {false,``nH``; i.e. ``2H``-``6H``} Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks.
segmentation -- {false,``n``; e.g. ``4380``} Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks.

View File

@ -561,6 +561,21 @@ The list of available biomass is given by the category in `ENSPRESO_BIOMASS <htt
use ``min`` in ``p_nom_max:`` for more `
conservative assumptions.
.. _adjustments_cf:
``adjustments``
=============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: adjustments:
:end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/adjustments.csv
.. _solving_cf:
``solving``

View File

@ -9,7 +9,7 @@ Techno-Economic Assumptions
The database of cost assumptions is retrieved from the repository
`PyPSA/technology-data <https://github.com/pypsa/technology-data>`_ and then
saved to a file ``data/costs_{year}.csv``. The ``config/config.yaml`` provides options
saved to a file ``resources/costs_{year}.csv``. The ``config/config.yaml`` provides options
to choose a reference year and use a specific version of the repository.
.. literalinclude:: ../config/config.default.yaml
@ -50,7 +50,7 @@ Modifying Assumptions
Some cost assumptions (e.g. marginal cost and capital cost) can be directly
set in the ``config/config.yaml`` (cf. Section :ref:`costs_cf` in
:ref:`config`). To change cost assumptions in more detail, make a copy of
``data/costs_{year}.csv`` and reference the new cost file in the ``Snakefile``:
``resources/costs_{year}.csv`` and reference the new cost file in the ``Snakefile``:
.. literalinclude:: ../Snakefile
:start-at: COSTS

View File

@ -31,7 +31,7 @@ Install Python Dependencies
PyPSA-Eur relies on a set of other Python packages to function.
We recommend using the package manager `mamba <https://mamba.readthedocs.io/en/latest/>`_ to install them and manage your environments.
For instructions for your operating system follow the ``mamba`` `installation guide <https://mamba.readthedocs.io/en/latest/installation.html>`_.
For instructions for your operating system follow the ``mamba`` `installation guide <https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html>`_.
You can also use ``conda`` equivalently.
The package requirements are curated in the `envs/environment.yaml <https://github.com/PyPSA/pypsa-eur/blob/master/envs/environment.yaml>`_ file.

View File

@ -7,9 +7,77 @@
Release Notes
##########################################
.. Upcoming Release
.. ================
..
Upcoming Release
================
* Linearly interpolate missing investment periods in year-dependent
configuration options.
* Added new scenario management that supports the simultaneous execution of
multiple scenarios with a single ``snakemake`` call. For this purpose, a
``scenarios.yaml`` file is introduced which contains customizable scenario
names with configuration overrides. To enable it, set the ``run: scenarios:
true`` and define the list of scenario names to run under ``run: name:`` in
the configuration file. The latter must be a subset of toplevel keys in the
scenario file.
- To get started, a scenarios template file ``config/scenarios.template.yaml``
is included in the repository, which is copied to ``config/scenarios.yaml``
on first use.
- The scenario file can be changed via ``run: scenarios: file:``.
- If scenario management is activated with ``run: scenarios: enable: true``, a
new wildcard ``{run}`` is introduced. This means that the configuration
settings may depend on the new ``{run}`` wildcard. Therefore, a new
``config_provider()`` function is used in the ``Snakefile`` and ``.smk``
files, which takes wildcard values into account. The calls to the ``config``
object have been reduced in ``.smk`` files since there is no awareness of
wildcard values outside rule definitions.
- The scenario files can also be programmatically created using the template
script ``config/create_scenarios.py``. This script can be run with
``snakemake -j1 create_scenarios`` and creates the scenarios file referenced
under ``run: scenarios: file:``.
- The setting ``run: name: all`` will run all scenarios in
``config/scenarios.yaml``. Otherwise, it will run those passed as list in
``run: name:`` as long as ``run: scenarios: enable: true``.
- The setting ``run: shared_resources:`` indicates via a boolean whether the
resources should be encapsulated by the ``run: name:``. The special setting
``run: shared_resources: base`` shares resources until ``add_electricity``
that do not contain wildcards other than ``{"technology", "year",
"scope"}``.
- Added new configuration options for all ``{opts}`` and ``{sector_opts}``
wildcard values to create a unique configuration file (``config.yaml``) per
PyPSA network file. This is done with the help of a new function
``update_config_from_wildcards()`` which parses configuration settings from
wildcards and updates the ``snakemake.config`` object. These updated
configuration settings are used in the scripts rather than directly parsed
values from ``snakemake.wildcards``.
- The cost data was moved from ``data/costs_{year}.csv`` to
``resources/costs_{year}.csv`` since it depends on configuration settings.
The ``retrieve_cost_data`` rule was changed to calling a Python script.
- Moved time clustering settings to ``clustering: temporal:`` from
``snapshots:`` so that the latter is only used to define the
``pandas.DatetimeIndex`` which simplifies the scenario management.
- Collection rules get a new wildcard ``run=config["run"]["name"]`` so they
can collect outputs across different scenarios.
- **Warning:** One caveat remains for the scenario management with myopic or
perfect foresight pathway optimisation. The first investment period must be
shared across all scenarios. The reason is that the ``wildcard_constraints``
defined for the rule ``add_existing_baseyear`` do not accept wildcard-aware
input functions (cf.
`https://github.com/snakemake/snakemake/issues/2703`_).
* The outputs of the rule ``retrieve_gas_infrastructure_data`` no longer
marked as ``protected()`` as the download size is small.
PyPSA-Eur 0.10.0 (19th February 2024)
=====================================

View File

@ -91,7 +91,7 @@ None.
**Outputs**
- ``resources/electricity_demand.csv``
- ``data/electricity_demand_raw.csv``
Rule ``retrieve_cost_data``

View File

@ -4,443 +4,443 @@
name: pypsa-eur
channels:
- bioconda
- http://conda.anaconda.org/gurobi
- conda-forge
- defaults
- bioconda
- http://conda.anaconda.org/gurobi
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1
- _openmp_mutex=4.5
- affine=2.4.0
- alsa-lib=1.2.10
- ampl-mp=3.1.0
- amply=0.1.6
- appdirs=1.4.4
- asttokens=2.4.1
- atk-1.0=2.38.0
- atlite=0.2.12
- attr=2.5.1
- attrs=23.2.0
- aws-c-auth=0.7.15
- aws-c-cal=0.6.9
- aws-c-common=0.9.12
- aws-c-compression=0.2.17
- aws-c-event-stream=0.4.1
- aws-c-http=0.8.0
- aws-c-io=0.14.3
- aws-c-mqtt=0.10.1
- aws-c-s3=0.5.0
- aws-c-sdkutils=0.1.14
- aws-checksums=0.1.17
- aws-crt-cpp=0.26.1
- aws-sdk-cpp=1.11.242
- azure-core-cpp=1.10.3
- azure-storage-blobs-cpp=12.10.0
- azure-storage-common-cpp=12.5.0
- beautifulsoup4=4.12.3
- blosc=1.21.5
- bokeh=3.3.4
- bottleneck=1.3.7
- branca=0.7.1
- brotli=1.1.0
- brotli-bin=1.1.0
- brotli-python=1.1.0
- bzip2=1.0.8
- c-ares=1.26.0
- c-blosc2=2.13.2
- ca-certificates=2024.2.2
- cairo=1.18.0
- cartopy=0.22.0
- cdsapi=0.6.1
- certifi=2024.2.2
- cffi=1.16.0
- cfgv=3.3.1
- cfitsio=4.3.1
- cftime=1.6.3
- charset-normalizer=3.3.2
- click=8.1.7
- click-plugins=1.1.1
- cligj=0.7.2
- cloudpickle=3.0.0
- coin-or-cbc=2.10.10
- coin-or-cgl=0.60.7
- coin-or-clp=1.17.8
- coin-or-osi=0.108.8
- coin-or-utils=2.11.9
- coincbc=2.10.10
- colorama=0.4.6
- configargparse=1.7
- connection_pool=0.0.3
- contourpy=1.2.0
- country_converter=1.2
- cppad=20240000.2
- cycler=0.12.1
- cytoolz=0.12.3
- dask=2024.2.0
- dask-core=2024.2.0
- datrie=0.8.2
- dbus=1.13.6
- decorator=5.1.1
- deprecation=2.1.0
- descartes=1.1.0
- distlib=0.3.8
- distributed=2024.2.0
- distro=1.9.0
- docutils=0.20.1
- dpath=2.1.6
- entsoe-py=0.6.6
- et_xmlfile=1.1.0
- exceptiongroup=1.2.0
- executing=2.0.1
- expat=2.5.0
- filelock=3.13.1
- fiona=1.9.5
- folium=0.15.1
- font-ttf-dejavu-sans-mono=2.37
- font-ttf-inconsolata=3.000
- font-ttf-source-code-pro=2.038
- font-ttf-ubuntu=0.83
- fontconfig=2.14.2
- fonts-conda-ecosystem=1
- fonts-conda-forge=1
- fonttools=4.49.0
- freetype=2.12.1
- freexl=2.0.0
- fribidi=1.0.10
- fsspec=2024.2.0
- gdal=3.8.4
- gdk-pixbuf=2.42.10
- geographiclib=1.52
- geojson-rewind=1.1.0
- geopandas=0.14.3
- geopandas-base=0.14.3
- geopy=2.4.1
- geos=3.12.1
- geotiff=1.7.1
- gettext=0.21.1
- gflags=2.2.2
- giflib=5.2.1
- gitdb=4.0.11
- gitpython=3.1.42
- glib=2.78.4
- glib-tools=2.78.4
- glog=0.6.0
- glpk=5.0
- gmp=6.3.0
- graphite2=1.3.13
- graphviz=9.0.0
- gst-plugins-base=1.22.9
- gstreamer=1.22.9
- gtk2=2.24.33
- gts=0.7.6
- harfbuzz=8.3.0
- hdf4=4.2.15
- hdf5=1.14.3
- humanfriendly=10.0
- icu=73.2
- identify=2.5.35
- idna=3.6
- importlib-metadata=7.0.1
- importlib_metadata=7.0.1
- importlib_resources=6.1.1
- iniconfig=2.0.0
- ipopt=3.14.14
- ipython=8.21.0
- jedi=0.19.1
- jinja2=3.1.3
- joblib=1.3.2
- json-c=0.17
- jsonschema=4.21.1
- jsonschema-specifications=2023.12.1
- jupyter_core=5.7.1
- kealib=1.5.3
- keyutils=1.6.1
- kiwisolver=1.4.5
- krb5=1.21.2
- lame=3.100
- lcms2=2.16
- ld_impl_linux-64=2.40
- lerc=4.0.0
- libabseil=20230802.1
- libaec=1.1.2
- libarchive=3.7.2
- libarrow=15.0.0
- libarrow-acero=15.0.0
- libarrow-dataset=15.0.0
- libarrow-flight=15.0.0
- libarrow-flight-sql=15.0.0
- libarrow-gandiva=15.0.0
- libarrow-substrait=15.0.0
- libblas=3.9.0
- libboost-headers=1.84.0
- libbrotlicommon=1.1.0
- libbrotlidec=1.1.0
- libbrotlienc=1.1.0
- libcap=2.69
- libcblas=3.9.0
- libclang=15.0.7
- libclang13=15.0.7
- libcrc32c=1.1.2
- libcups=2.3.3
- libcurl=8.5.0
- libdeflate=1.19
- libedit=3.1.20191231
- libev=4.33
- libevent=2.1.12
- libexpat=2.5.0
- libffi=3.4.2
- libflac=1.4.3
- libgcc-ng=13.2.0
- libgcrypt=1.10.3
- libgd=2.3.3
- libgdal=3.8.4
- libgfortran-ng=13.2.0
- libgfortran5=13.2.0
- libglib=2.78.4
- libgomp=13.2.0
- libgoogle-cloud=2.12.0
- libgpg-error=1.47
- libgrpc=1.60.1
- libhwloc=2.9.3
- libiconv=1.17
- libjpeg-turbo=3.0.0
- libkml=1.3.0
- liblapack=3.9.0
- liblapacke=3.9.0
- libllvm15=15.0.7
- libnetcdf=4.9.2
- libnghttp2=1.58.0
- libnl=3.9.0
- libnsl=2.0.1
- libnuma=2.0.16
- libogg=1.3.4
- libopenblas=0.3.26
- libopus=1.3.1
- libparquet=15.0.0
- libpng=1.6.42
- libpq=16.2
- libprotobuf=4.25.1
- libre2-11=2023.06.02
- librsvg=2.56.3
- librttopo=1.1.0
- libscotch=7.0.4
- libsndfile=1.2.2
- libspatialindex=1.9.3
- libspatialite=5.1.0
- libspral=2023.09.07
- libsqlite=3.45.1
- libssh2=1.11.0
- libstdcxx-ng=13.2.0
- libsystemd0=255
- libthrift=0.19.0
- libtiff=4.6.0
- libutf8proc=2.8.0
- libuuid=2.38.1
- libvorbis=1.3.7
- libwebp=1.3.2
- libwebp-base=1.3.2
- libxcb=1.15
- libxcrypt=4.4.36
- libxkbcommon=1.6.0
- libxml2=2.12.5
- libxslt=1.1.39
- libzip=1.10.1
- libzlib=1.2.13
- linopy=0.3.4
- locket=1.0.0
- lxml=5.1.0
- lz4=4.3.3
- lz4-c=1.9.4
- lzo=2.10
- mapclassify=2.6.1
- markupsafe=2.1.5
- matplotlib=3.8.3
- matplotlib-base=3.8.3
- matplotlib-inline=0.1.6
- memory_profiler=0.61.0
- metis=5.1.0
- minizip=4.0.4
- mpg123=1.32.4
- msgpack-python=1.0.7
- mumps-include=5.6.2
- mumps-seq=5.6.2
- munkres=1.1.4
- mysql-common=8.0.33
- mysql-libs=8.0.33
- nbformat=5.9.2
- ncurses=6.4
- netcdf4=1.6.5
- networkx=3.2.1
- nodeenv=1.8.0
- nomkl=1.0
- nspr=4.35
- nss=3.98
- numexpr=2.9.0
- numpy=1.26.4
- openjdk=21.0.2
- openjpeg=2.5.0
- openpyxl=3.1.2
- openssl=3.2.1
- orc=1.9.2
- packaging=23.2
- pandas=2.2.0
- pango=1.50.14
- parso=0.8.3
- partd=1.4.1
- patsy=0.5.6
- pcre2=10.42
- pexpect=4.9.0
- pickleshare=0.7.5
- pillow=10.2.0
- pip=24.0
- pixman=0.43.2
- pkgutil-resolve-name=1.3.10
- plac=1.4.2
- platformdirs=4.2.0
- pluggy=1.4.0
- ply=3.11
- poppler=24.02.0
- poppler-data=0.4.12
- postgresql=16.2
- powerplantmatching=0.5.11
- pre-commit=3.6.2
- progressbar2=4.3.2
- proj=9.3.1
- prompt-toolkit=3.0.42
- psutil=5.9.8
- pthread-stubs=0.4
- ptyprocess=0.7.0
- pulp=2.7.0
- pulseaudio-client=16.1
- pure_eval=0.2.2
- py-cpuinfo=9.0.0
- pyarrow=15.0.0
- pyarrow-hotfix=0.6
- pycountry=22.3.5
- pycparser=2.21
- pygments=2.17.2
- pyomo=6.6.1
- pyparsing=3.1.1
- pyproj=3.6.1
- pypsa=0.27.0
- pyqt=5.15.9
- pyqt5-sip=12.12.2
- pyscipopt=4.4.0
- pyshp=2.3.1
- pysocks=1.7.1
- pytables=3.9.2
- pytest=8.0.0
- python=3.11.8
- python-dateutil=2.8.2
- python-fastjsonschema=2.19.1
- python-tzdata=2024.1
- python-utils=3.8.2
- python_abi=3.11
- pytz=2024.1
- pyxlsb=1.0.10
- pyyaml=6.0.1
- qt-main=5.15.8
- rasterio=1.3.9
- rdma-core=50.0
- re2=2023.06.02
- readline=8.2
- referencing=0.33.0
- requests=2.31.0
- reretry=0.11.8
- rioxarray=0.15.1
- rpds-py=0.18.0
- rtree=1.2.0
- s2n=1.4.3
- scikit-learn=1.4.1.post1
- scip=8.1.0
- scipy=1.12.0
- scotch=7.0.4
- seaborn=0.13.2
- seaborn-base=0.13.2
- setuptools=69.1.0
- setuptools-scm=8.0.4
- setuptools_scm=8.0.4
- shapely=2.0.2
- sip=6.7.12
- six=1.16.0
- smart_open=6.4.0
- smmap=5.0.0
- snakemake-minimal=7.32.4
- snappy=1.1.10
- snuggs=1.4.7
- sortedcontainers=2.4.0
- soupsieve=2.5
- sqlite=3.45.1
- stack_data=0.6.2
- statsmodels=0.14.1
- stopit=1.1.2
- tabula-py=2.7.0
- tabulate=0.9.0
- tbb=2021.11.0
- tblib=3.0.0
- threadpoolctl=3.3.0
- throttler=1.2.2
- tiledb=2.20.0
- tk=8.6.13
- toml=0.10.2
- tomli=2.0.1
- toolz=0.12.1
- toposort=1.10
- tornado=6.3.3
- tqdm=4.66.2
- traitlets=5.14.1
- typing-extensions=4.9.0
- typing_extensions=4.9.0
- tzcode=2024a
- tzdata=2024a
- ucx=1.15.0
- ukkonen=1.0.1
- unidecode=1.3.8
- unixodbc=2.3.12
- uriparser=0.9.7
- urllib3=2.2.1
- validators=0.22.0
- virtualenv=20.25.0
- wcwidth=0.2.13
- wheel=0.42.0
- wrapt=1.16.0
- xarray=2024.2.0
- xcb-util=0.4.0
- xcb-util-image=0.4.0
- xcb-util-keysyms=0.4.0
- xcb-util-renderutil=0.3.9
- xcb-util-wm=0.4.1
- xerces-c=3.2.5
- xkeyboard-config=2.41
- xlrd=2.0.1
- xorg-fixesproto=5.0
- xorg-inputproto=2.3.2
- xorg-kbproto=1.0.7
- xorg-libice=1.1.1
- xorg-libsm=1.2.4
- xorg-libx11=1.8.7
- xorg-libxau=1.0.11
- xorg-libxdmcp=1.1.3
- xorg-libxext=1.3.4
- xorg-libxfixes=5.0.3
- xorg-libxi=1.7.10
- xorg-libxrender=0.9.11
- xorg-libxt=1.3.0
- xorg-libxtst=1.2.3
- xorg-recordproto=1.14.2
- xorg-renderproto=0.11.1
- xorg-xextproto=7.3.0
- xorg-xf86vidmodeproto=2.3.1
- xorg-xproto=7.0.31
- xyzservices=2023.10.1
- xz=5.2.6
- yaml=0.2.5
- yte=1.5.4
- zict=3.0.0
- zipp=3.17.0
- zlib=1.2.13
- zlib-ng=2.0.7
- zstd=1.5.5
- pip:
- highspy==1.5.3
- tsam==2.3.1
- _libgcc_mutex=0.1
- _openmp_mutex=4.5
- affine=2.4.0
- alsa-lib=1.2.10
- ampl-mp=3.1.0
- amply=0.1.6
- appdirs=1.4.4
- asttokens=2.4.1
- atk-1.0=2.38.0
- atlite=0.2.12
- attr=2.5.1
- attrs=23.2.0
- aws-c-auth=0.7.15
- aws-c-cal=0.6.9
- aws-c-common=0.9.12
- aws-c-compression=0.2.17
- aws-c-event-stream=0.4.1
- aws-c-http=0.8.0
- aws-c-io=0.14.3
- aws-c-mqtt=0.10.1
- aws-c-s3=0.5.0
- aws-c-sdkutils=0.1.14
- aws-checksums=0.1.17
- aws-crt-cpp=0.26.1
- aws-sdk-cpp=1.11.242
- azure-core-cpp=1.10.3
- azure-storage-blobs-cpp=12.10.0
- azure-storage-common-cpp=12.5.0
- beautifulsoup4=4.12.3
- blosc=1.21.5
- bokeh=3.3.4
- bottleneck=1.3.7
- branca=0.7.1
- brotli=1.1.0
- brotli-bin=1.1.0
- brotli-python=1.1.0
- bzip2=1.0.8
- c-ares=1.26.0
- c-blosc2=2.13.2
- ca-certificates=2024.2.2
- cairo=1.18.0
- cartopy=0.22.0
- cdsapi=0.6.1
- certifi=2024.2.2
- cffi=1.16.0
- cfgv=3.3.1
- cfitsio=4.3.1
- cftime=1.6.3
- charset-normalizer=3.3.2
- click=8.1.7
- click-plugins=1.1.1
- cligj=0.7.2
- cloudpickle=3.0.0
- coin-or-cbc=2.10.10
- coin-or-cgl=0.60.7
- coin-or-clp=1.17.8
- coin-or-osi=0.108.8
- coin-or-utils=2.11.9
- coincbc=2.10.10
- colorama=0.4.6
- configargparse=1.7
- connection_pool=0.0.3
- contourpy=1.2.0
- country_converter=1.2
- cppad=20240000.2
- cycler=0.12.1
- cytoolz=0.12.3
- dask=2024.2.0
- dask-core=2024.2.0
- datrie=0.8.2
- dbus=1.13.6
- decorator=5.1.1
- deprecation=2.1.0
- descartes=1.1.0
- distlib=0.3.8
- distributed=2024.2.0
- distro=1.9.0
- docutils=0.20.1
- dpath=2.1.6
- entsoe-py=0.6.6
- et_xmlfile=1.1.0
- exceptiongroup=1.2.0
- executing=2.0.1
- expat=2.5.0
- filelock=3.13.1
- fiona=1.9.5
- folium=0.15.1
- font-ttf-dejavu-sans-mono=2.37
- font-ttf-inconsolata=3.000
- font-ttf-source-code-pro=2.038
- font-ttf-ubuntu=0.83
- fontconfig=2.14.2
- fonts-conda-ecosystem=1
- fonts-conda-forge=1
- fonttools=4.49.0
- freetype=2.12.1
- freexl=2.0.0
- fribidi=1.0.10
- fsspec=2024.2.0
- gdal=3.8.4
- gdk-pixbuf=2.42.10
- geographiclib=1.52
- geojson-rewind=1.1.0
- geopandas=0.14.3
- geopandas-base=0.14.3
- geopy=2.4.1
- geos=3.12.1
- geotiff=1.7.1
- gettext=0.21.1
- gflags=2.2.2
- giflib=5.2.1
- gitdb=4.0.11
- gitpython=3.1.42
- glib=2.78.4
- glib-tools=2.78.4
- glog=0.6.0
- glpk=5.0
- gmp=6.3.0
- graphite2=1.3.13
- graphviz=9.0.0
- gst-plugins-base=1.22.9
- gstreamer=1.22.9
- gtk2=2.24.33
- gts=0.7.6
- harfbuzz=8.3.0
- hdf4=4.2.15
- hdf5=1.14.3
- humanfriendly=10.0
- icu=73.2
- identify=2.5.35
- idna=3.6
- importlib-metadata=7.0.1
- importlib_metadata=7.0.1
- importlib_resources=6.1.1
- iniconfig=2.0.0
- ipopt=3.14.14
- ipython=8.21.0
- jedi=0.19.1
- jinja2=3.1.3
- joblib=1.3.2
- json-c=0.17
- jsonschema=4.21.1
- jsonschema-specifications=2023.12.1
- jupyter_core=5.7.1
- kealib=1.5.3
- keyutils=1.6.1
- kiwisolver=1.4.5
- krb5=1.21.2
- lame=3.100
- lcms2=2.16
- ld_impl_linux-64=2.40
- lerc=4.0.0
- libabseil=20230802.1
- libaec=1.1.2
- libarchive=3.7.2
- libarrow=15.0.0
- libarrow-acero=15.0.0
- libarrow-dataset=15.0.0
- libarrow-flight=15.0.0
- libarrow-flight-sql=15.0.0
- libarrow-gandiva=15.0.0
- libarrow-substrait=15.0.0
- libblas=3.9.0
- libboost-headers=1.84.0
- libbrotlicommon=1.1.0
- libbrotlidec=1.1.0
- libbrotlienc=1.1.0
- libcap=2.69
- libcblas=3.9.0
- libclang=15.0.7
- libclang13=15.0.7
- libcrc32c=1.1.2
- libcups=2.3.3
- libcurl=8.5.0
- libdeflate=1.19
- libedit=3.1.20191231
- libev=4.33
- libevent=2.1.12
- libexpat=2.5.0
- libffi=3.4.2
- libflac=1.4.3
- libgcc-ng=13.2.0
- libgcrypt=1.10.3
- libgd=2.3.3
- libgdal=3.8.4
- libgfortran-ng=13.2.0
- libgfortran5=13.2.0
- libglib=2.78.4
- libgomp=13.2.0
- libgoogle-cloud=2.12.0
- libgpg-error=1.47
- libgrpc=1.60.1
- libhwloc=2.9.3
- libiconv=1.17
- libjpeg-turbo=3.0.0
- libkml=1.3.0
- liblapack=3.9.0
- liblapacke=3.9.0
- libllvm15=15.0.7
- libnetcdf=4.9.2
- libnghttp2=1.58.0
- libnl=3.9.0
- libnsl=2.0.1
- libnuma=2.0.16
- libogg=1.3.4
- libopenblas=0.3.26
- libopus=1.3.1
- libparquet=15.0.0
- libpng=1.6.42
- libpq=16.2
- libprotobuf=4.25.1
- libre2-11=2023.06.02
- librsvg=2.56.3
- librttopo=1.1.0
- libscotch=7.0.4
- libsndfile=1.2.2
- libspatialindex=1.9.3
- libspatialite=5.1.0
- libspral=2023.09.07
- libsqlite=3.45.1
- libssh2=1.11.0
- libstdcxx-ng=13.2.0
- libsystemd0=255
- libthrift=0.19.0
- libtiff=4.6.0
- libutf8proc=2.8.0
- libuuid=2.38.1
- libvorbis=1.3.7
- libwebp=1.3.2
- libwebp-base=1.3.2
- libxcb=1.15
- libxcrypt=4.4.36
- libxkbcommon=1.6.0
- libxml2=2.12.5
- libxslt=1.1.39
- libzip=1.10.1
- libzlib=1.2.13
- linopy=0.3.4
- locket=1.0.0
- lxml=5.1.0
- lz4=4.3.3
- lz4-c=1.9.4
- lzo=2.10
- mapclassify=2.6.1
- markupsafe=2.1.5
- matplotlib=3.8.3
- matplotlib-base=3.8.3
- matplotlib-inline=0.1.6
- memory_profiler=0.61.0
- metis=5.1.0
- minizip=4.0.4
- mpg123=1.32.4
- msgpack-python=1.0.7
- mumps-include=5.6.2
- mumps-seq=5.6.2
- munkres=1.1.4
- mysql-common=8.0.33
- mysql-libs=8.0.33
- nbformat=5.9.2
- ncurses=6.4
- netcdf4=1.6.5
- networkx=3.2.1
- nodeenv=1.8.0
- nomkl=1.0
- nspr=4.35
- nss=3.98
- numexpr=2.9.0
- numpy=1.26.4
- openjdk=21.0.2
- openjpeg=2.5.0
- openpyxl=3.1.2
- openssl=3.2.1
- orc=1.9.2
- packaging=23.2
- pandas=2.2.0
- pango=1.50.14
- parso=0.8.3
- partd=1.4.1
- patsy=0.5.6
- pcre2=10.42
- pexpect=4.9.0
- pickleshare=0.7.5
- pillow=10.2.0
- pip=24.0
- pixman=0.43.2
- pkgutil-resolve-name=1.3.10
- plac=1.4.2
- platformdirs=4.2.0
- pluggy=1.4.0
- ply=3.11
- poppler=24.02.0
- poppler-data=0.4.12
- postgresql=16.2
- powerplantmatching=0.5.11
- pre-commit=3.6.2
- progressbar2=4.3.2
- proj=9.3.1
- prompt-toolkit=3.0.42
- psutil=5.9.8
- pthread-stubs=0.4
- ptyprocess=0.7.0
- pulp=2.7.0
- pulseaudio-client=16.1
- pure_eval=0.2.2
- py-cpuinfo=9.0.0
- pyarrow=15.0.0
- pyarrow-hotfix=0.6
- pycountry=22.3.5
- pycparser=2.21
- pygments=2.17.2
- pyomo=6.6.1
- pyparsing=3.1.1
- pyproj=3.6.1
- pypsa=0.27.0
- pyqt=5.15.9
- pyqt5-sip=12.12.2
- pyscipopt=4.4.0
- pyshp=2.3.1
- pysocks=1.7.1
- pytables=3.9.2
- pytest=8.0.0
- python=3.11.8
- python-dateutil=2.8.2
- python-fastjsonschema=2.19.1
- python-tzdata=2024.1
- python-utils=3.8.2
- python_abi=3.11
- pytz=2024.1
- pyxlsb=1.0.10
- pyyaml=6.0.1
- qt-main=5.15.8
- rasterio=1.3.9
- rdma-core=50.0
- re2=2023.06.02
- readline=8.2
- referencing=0.33.0
- requests=2.31.0
- reretry=0.11.8
- rioxarray=0.15.1
- rpds-py=0.18.0
- rtree=1.2.0
- s2n=1.4.3
- scikit-learn=1.4.1.post1
- scip=8.1.0
- scipy=1.12.0
- scotch=7.0.4
- seaborn=0.13.2
- seaborn-base=0.13.2
- setuptools=69.1.0
- setuptools-scm=8.0.4
- setuptools_scm=8.0.4
- shapely=2.0.2
- sip=6.7.12
- six=1.16.0
- smart_open=6.4.0
- smmap=5.0.0
- snakemake-minimal=7.32.4
- snappy=1.1.10
- snuggs=1.4.7
- sortedcontainers=2.4.0
- soupsieve=2.5
- sqlite=3.45.1
- stack_data=0.6.2
- statsmodels=0.14.1
- stopit=1.1.2
- tabula-py=2.7.0
- tabulate=0.9.0
- tbb=2021.11.0
- tblib=3.0.0
- threadpoolctl=3.3.0
- throttler=1.2.2
- tiledb=2.20.0
- tk=8.6.13
- toml=0.10.2
- tomli=2.0.1
- toolz=0.12.1
- toposort=1.10
- tornado=6.3.3
- tqdm=4.66.2
- traitlets=5.14.1
- typing-extensions=4.9.0
- typing_extensions=4.9.0
- tzcode=2024a
- tzdata=2024a
- ucx=1.15.0
- ukkonen=1.0.1
- unidecode=1.3.8
- unixodbc=2.3.12
- uriparser=0.9.7
- urllib3=2.2.1
- validators=0.22.0
- virtualenv=20.25.0
- wcwidth=0.2.13
- wheel=0.42.0
- wrapt=1.16.0
- xarray=2024.2.0
- xcb-util=0.4.0
- xcb-util-image=0.4.0
- xcb-util-keysyms=0.4.0
- xcb-util-renderutil=0.3.9
- xcb-util-wm=0.4.1
- xerces-c=3.2.5
- xkeyboard-config=2.41
- xlrd=2.0.1
- xorg-fixesproto=5.0
- xorg-inputproto=2.3.2
- xorg-kbproto=1.0.7
- xorg-libice=1.1.1
- xorg-libsm=1.2.4
- xorg-libx11=1.8.7
- xorg-libxau=1.0.11
- xorg-libxdmcp=1.1.3
- xorg-libxext=1.3.4
- xorg-libxfixes=5.0.3
- xorg-libxi=1.7.10
- xorg-libxrender=0.9.11
- xorg-libxt=1.3.0
- xorg-libxtst=1.2.3
- xorg-recordproto=1.14.2
- xorg-renderproto=0.11.1
- xorg-xextproto=7.3.0
- xorg-xf86vidmodeproto=2.3.1
- xorg-xproto=7.0.31
- xyzservices=2023.10.1
- xz=5.2.6
- yaml=0.2.5
- yte=1.5.4
- zict=3.0.0
- zipp=3.17.0
- zlib=1.2.13
- zlib-ng=2.0.7
- zstd=1.5.5
- pip:
- highspy==1.5.3
- tsam==2.3.1

View File

@ -8,7 +8,7 @@ if config["enable"].get("prepare_links_p_nom", False):
output:
"data/links_p_nom.csv",
log:
LOGS + "prepare_links_p_nom.log",
logs("prepare_links_p_nom.log"),
threads: 1
resources:
mem_mb=1500,
@ -20,15 +20,15 @@ if config["enable"].get("prepare_links_p_nom", False):
rule build_electricity_demand:
params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
countries=config["countries"],
load=config["load"],
snapshots=config_provider("snapshots"),
countries=config_provider("countries"),
load=config_provider("load"),
input:
ancient("data/electricity_demand_raw.csv"),
output:
RESOURCES + "electricity_demand.csv",
resources("electricity_demand.csv"),
log:
LOGS + "build_electricity_demand.log",
logs("build_electricity_demand.log"),
resources:
mem_mb=5000,
conda:
@ -39,17 +39,17 @@ rule build_electricity_demand:
rule build_powerplants:
params:
powerplants_filter=config["electricity"]["powerplants_filter"],
custom_powerplants=config["electricity"]["custom_powerplants"],
everywhere_powerplants=config["electricity"]["everywhere_powerplants"],
countries=config["countries"],
powerplants_filter=config_provider("electricity", "powerplants_filter"),
custom_powerplants=config_provider("electricity", "custom_powerplants"),
everywhere_powerplants=config_provider("electricity", "everywhere_powerplants"),
countries=config_provider("countries"),
input:
base_network=RESOURCES + "networks/base.nc",
base_network=resources("networks/base.nc"),
custom_powerplants="data/custom_powerplants.csv",
output:
RESOURCES + "powerplants.csv",
resources("powerplants.csv"),
log:
LOGS + "build_powerplants.log",
logs("build_powerplants.log"),
threads: 1
resources:
mem_mb=5000,
@ -61,11 +61,11 @@ rule build_powerplants:
rule base_network:
params:
countries=config["countries"],
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
lines=config["lines"],
links=config["links"],
transformers=config["transformers"],
countries=config_provider("countries"),
snapshots=config_provider("snapshots"),
lines=config_provider("lines"),
links=config_provider("links"),
transformers=config_provider("transformers"),
input:
eg_buses="data/entsoegridkit/buses.csv",
eg_lines="data/entsoegridkit/lines.csv",
@ -75,15 +75,15 @@ rule base_network:
parameter_corrections="data/parameter_corrections.yaml",
links_p_nom="data/links_p_nom.csv",
links_tyndp="data/links_tyndp.csv",
country_shapes=RESOURCES + "country_shapes.geojson",
offshore_shapes=RESOURCES + "offshore_shapes.geojson",
europe_shape=RESOURCES + "europe_shape.geojson",
country_shapes=resources("country_shapes.geojson"),
offshore_shapes=resources("offshore_shapes.geojson"),
europe_shape=resources("europe_shape.geojson"),
output:
RESOURCES + "networks/base.nc",
resources("networks/base.nc"),
log:
LOGS + "base_network.log",
logs("base_network.log"),
benchmark:
BENCHMARKS + "base_network"
benchmarks("base_network")
threads: 1
resources:
mem_mb=1500,
@ -95,7 +95,7 @@ rule base_network:
rule build_shapes:
params:
countries=config["countries"],
countries=config_provider("countries"),
input:
naturalearth=ancient("data/bundle/naturalearth/ne_10m_admin_0_countries.shp"),
eez=ancient("data/bundle/eez/World_EEZ_v8_2014.shp"),
@ -105,12 +105,12 @@ rule build_shapes:
ch_cantons=ancient("data/bundle/ch_cantons.csv"),
ch_popgdp=ancient("data/bundle/je-e-21.03.02.xls"),
output:
country_shapes=RESOURCES + "country_shapes.geojson",
offshore_shapes=RESOURCES + "offshore_shapes.geojson",
europe_shape=RESOURCES + "europe_shape.geojson",
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
country_shapes=resources("country_shapes.geojson"),
offshore_shapes=resources("offshore_shapes.geojson"),
europe_shape=resources("europe_shape.geojson"),
nuts3_shapes=resources("nuts3_shapes.geojson"),
log:
LOGS + "build_shapes.log",
logs("build_shapes.log"),
threads: 1
resources:
mem_mb=1500,
@ -122,16 +122,16 @@ rule build_shapes:
rule build_bus_regions:
params:
countries=config["countries"],
countries=config_provider("countries"),
input:
country_shapes=RESOURCES + "country_shapes.geojson",
offshore_shapes=RESOURCES + "offshore_shapes.geojson",
base_network=RESOURCES + "networks/base.nc",
country_shapes=resources("country_shapes.geojson"),
offshore_shapes=resources("offshore_shapes.geojson"),
base_network=resources("networks/base.nc"),
output:
regions_onshore=RESOURCES + "regions_onshore.geojson",
regions_offshore=RESOURCES + "regions_offshore.geojson",
regions_onshore=resources("regions_onshore.geojson"),
regions_offshore=resources("regions_offshore.geojson"),
log:
LOGS + "build_bus_regions.log",
logs("build_bus_regions.log"),
threads: 1
resources:
mem_mb=1000,
@ -145,20 +145,20 @@ if config["enable"].get("build_cutout", False):
rule build_cutout:
params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
cutouts=config["atlite"]["cutouts"],
snapshots=config_provider("snapshots"),
cutouts=config_provider("atlite", "cutouts"),
input:
regions_onshore=RESOURCES + "regions_onshore.geojson",
regions_offshore=RESOURCES + "regions_offshore.geojson",
regions_onshore=resources("regions_onshore.geojson"),
regions_offshore=resources("regions_offshore.geojson"),
output:
protected("cutouts/" + CDIR + "{cutout}.nc"),
log:
"logs/" + CDIR + "build_cutout/{cutout}.log",
logs(CDIR + "build_cutout/{cutout}.log"),
benchmark:
"benchmarks/" + CDIR + "build_cutout_{cutout}"
threads: ATLITE_NPROCESSES
threads: config["atlite"].get("nprocesses", 4)
resources:
mem_mb=ATLITE_NPROCESSES * 1000,
mem_mb=config["atlite"].get("nprocesses", 4) * 1000,
conda:
"../envs/environment.yaml"
script:
@ -170,13 +170,15 @@ if config["enable"].get("build_natura_raster", False):
rule build_natura_raster:
input:
natura=ancient("data/bundle/natura/Natura2000_end2015.shp"),
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]),
cutouts=lambda w: expand(
"cutouts/" + CDIR + "{cutouts}.nc", **config_provider("atlite")(w)
),
output:
RESOURCES + "natura.tiff",
resources("natura.tiff"),
resources:
mem_mb=5000,
log:
LOGS + "build_natura_raster.log",
logs("build_natura_raster.log"),
conda:
"../envs/environment.yaml"
script:
@ -186,21 +188,21 @@ if config["enable"].get("build_natura_raster", False):
rule build_ship_raster:
input:
ship_density="data/shipdensity_global.zip",
cutouts=expand(
cutouts=lambda w: expand(
"cutouts/" + CDIR + "{cutout}.nc",
cutout=[
config["renewable"][k]["cutout"]
for k in config["electricity"]["renewable_carriers"]
config_provider("renewable", k, "cutout")(w)
for k in config_provider("electricity", "renewable_carriers")(w)
],
),
output:
RESOURCES + "shipdensity_raster.tif",
resources("shipdensity_raster.tif"),
log:
LOGS + "build_ship_raster.log",
logs("build_ship_raster.log"),
resources:
mem_mb=5000,
benchmark:
BENCHMARKS + "build_ship_raster"
benchmarks("build_ship_raster")
conda:
"../envs/environment.yaml"
script:
@ -214,33 +216,33 @@ rule determine_availability_matrix_MD_UA:
wdpa_marine="data/WDPA_WDOECM_marine.gpkg",
gebco=lambda w: (
"data/bundle/GEBCO_2014_2D.nc"
if "max_depth" in config["renewable"][w.technology].keys()
if config_provider("renewable", w.technology)(w).get("max_depth")
else []
),
ship_density=lambda w: (
RESOURCES + "shipdensity_raster.tif"
if "ship_threshold" in config["renewable"][w.technology].keys()
resources("shipdensity_raster.tif")
if "ship_threshold" in config_provider("renewable", w.technology)(w).keys()
else []
),
country_shapes=RESOURCES + "country_shapes.geojson",
offshore_shapes=RESOURCES + "offshore_shapes.geojson",
country_shapes=resources("country_shapes.geojson"),
offshore_shapes=resources("offshore_shapes.geojson"),
regions=lambda w: (
RESOURCES + "regions_onshore.geojson"
resources("regions_onshore.geojson")
if w.technology in ("onwind", "solar")
else RESOURCES + "regions_offshore.geojson"
else resources("regions_offshore.geojson")
),
cutout=lambda w: "cutouts/"
+ CDIR
+ config["renewable"][w.technology]["cutout"]
+ config_provider("renewable", w.technology, "cutout")(w)
+ ".nc",
output:
availability_matrix=RESOURCES + "availability_matrix_MD-UA_{technology}.nc",
availability_map=RESOURCES + "availability_matrix_MD-UA_{technology}.png",
availability_matrix=resources("availability_matrix_MD-UA_{technology}.nc"),
availability_map=resources("availability_matrix_MD-UA_{technology}.png"),
log:
LOGS + "determine_availability_matrix_MD_UA_{technology}.log",
threads: ATLITE_NPROCESSES
logs("determine_availability_matrix_MD_UA_{technology}.log"),
threads: config["atlite"].get("nprocesses", 4)
resources:
mem_mb=ATLITE_NPROCESSES * 5000,
mem_mb=config["atlite"].get("nprocesses", 4) * 5000,
conda:
"../envs/environment.yaml"
script:
@ -248,65 +250,67 @@ rule determine_availability_matrix_MD_UA:
# Optional input when having Ukraine (UA) or Moldova (MD) in the countries list
if {"UA", "MD"}.intersection(set(config["countries"])):
opt = {
"availability_matrix_MD_UA": RESOURCES
+ "availability_matrix_MD-UA_{technology}.nc"
}
else:
opt = {}
def input_ua_md_availability_matrix(w):
countries = set(config_provider("countries")(w))
if {"UA", "MD"}.intersection(countries):
return {
"availability_matrix_MD_UA": resources(
"availability_matrix_MD-UA_{technology}.nc"
)
}
return {}
rule build_renewable_profiles:
params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
renewable=config["renewable"],
snapshots=config_provider("snapshots"),
renewable=config_provider("renewable"),
input:
**opt,
base_network=RESOURCES + "networks/base.nc",
unpack(input_ua_md_availability_matrix),
base_network=resources("networks/base.nc"),
corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"),
natura=lambda w: (
RESOURCES + "natura.tiff"
if config["renewable"][w.technology]["natura"]
resources("natura.tiff")
if config_provider("renewable", w.technology, "natura")(w)
else []
),
luisa=lambda w: (
"data/LUISA_basemap_020321_50m.tif"
if config["renewable"][w.technology].get("luisa")
if config_provider("renewable", w.technology, "luisa")(w)
else []
),
gebco=ancient(
lambda w: (
"data/bundle/GEBCO_2014_2D.nc"
if config["renewable"][w.technology].get("max_depth")
if config_provider("renewable", w.technology)(w).get("max_depth")
else []
)
),
ship_density=lambda w: (
RESOURCES + "shipdensity_raster.tif"
if config["renewable"][w.technology].get("ship_threshold", False)
resources("shipdensity_raster.tif")
if "ship_threshold" in config_provider("renewable", w.technology)(w).keys()
else []
),
country_shapes=RESOURCES + "country_shapes.geojson",
offshore_shapes=RESOURCES + "offshore_shapes.geojson",
country_shapes=resources("country_shapes.geojson"),
offshore_shapes=resources("offshore_shapes.geojson"),
regions=lambda w: (
RESOURCES + "regions_onshore.geojson"
resources("regions_onshore.geojson")
if w.technology in ("onwind", "solar")
else RESOURCES + "regions_offshore.geojson"
else resources("regions_offshore.geojson")
),
cutout=lambda w: "cutouts/"
+ CDIR
+ config["renewable"][w.technology]["cutout"]
+ config_provider("renewable", w.technology, "cutout")(w)
+ ".nc",
output:
profile=RESOURCES + "profile_{technology}.nc",
profile=resources("profile_{technology}.nc"),
log:
LOGS + "build_renewable_profile_{technology}.log",
logs("build_renewable_profile_{technology}.log"),
benchmark:
BENCHMARKS + "build_renewable_profiles_{technology}"
threads: ATLITE_NPROCESSES
benchmarks("build_renewable_profiles_{technology}")
threads: config["atlite"].get("nprocesses", 4)
resources:
mem_mb=ATLITE_NPROCESSES * 5000,
mem_mb=config["atlite"].get("nprocesses", 4) * 5000,
wildcard_constraints:
technology="(?!hydro).*", # Any technology other than hydro
conda:
@ -320,10 +324,10 @@ rule build_monthly_prices:
co2_price_raw="data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
fuel_price_raw="data/validation/energy-price-trends-xlsx-5619002.xlsx",
output:
co2_price=RESOURCES + "co2_price.csv",
fuel_price=RESOURCES + "monthly_fuel_price.csv",
co2_price=resources("co2_price.csv"),
fuel_price=resources("monthly_fuel_price.csv"),
log:
LOGS + "build_monthly_prices.log",
logs("build_monthly_prices.log"),
threads: 1
resources:
mem_mb=5000,
@ -335,16 +339,19 @@ rule build_monthly_prices:
rule build_hydro_profile:
params:
hydro=config["renewable"]["hydro"],
countries=config["countries"],
hydro=config_provider("renewable", "hydro"),
countries=config_provider("countries"),
input:
country_shapes=RESOURCES + "country_shapes.geojson",
country_shapes=resources("country_shapes.geojson"),
eia_hydro_generation="data/eia_hydro_annual_generation.csv",
cutout=f"cutouts/" + CDIR + config["renewable"]["hydro"]["cutout"] + ".nc",
cutout=lambda w: f"cutouts/"
+ CDIR
+ config_provider("renewable", "hydro", "cutout")(w)
+ ".nc",
output:
RESOURCES + "profile_hydro.nc",
resources("profile_hydro.nc"),
log:
LOGS + "build_hydro_profile.log",
logs("build_hydro_profile.log"),
resources:
mem_mb=5000,
conda:
@ -353,79 +360,87 @@ rule build_hydro_profile:
"../scripts/build_hydro_profile.py"
if config["lines"]["dynamic_line_rating"]["activate"]:
rule build_line_rating:
params:
snapshots=config_provider("snapshots"),
input:
base_network=resources("networks/base.nc"),
cutout=lambda w: "cutouts/"
+ CDIR
+ config_provider("lines", "dynamic_line_rating", "cutout")(w)
+ ".nc",
output:
output=resources("networks/line_rating.nc"),
log:
logs("build_line_rating.log"),
benchmark:
benchmarks("build_line_rating")
threads: config["atlite"].get("nprocesses", 4)
resources:
mem_mb=config["atlite"].get("nprocesses", 4) * 1000,
conda:
"../envs/environment.yaml"
script:
"../scripts/build_line_rating.py"
rule build_line_rating:
params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
input:
base_network=RESOURCES + "networks/base.nc",
cutout="cutouts/"
+ CDIR
+ config["lines"]["dynamic_line_rating"]["cutout"]
+ ".nc",
output:
output=RESOURCES + "networks/line_rating.nc",
log:
LOGS + "build_line_rating.log",
benchmark:
BENCHMARKS + "build_line_rating"
threads: ATLITE_NPROCESSES
resources:
mem_mb=ATLITE_NPROCESSES * 1000,
conda:
"../envs/environment.yaml"
script:
"../scripts/build_line_rating.py"
def input_profile_tech(w):
return {
f"profile_{tech}": resources(f"profile_{tech}.nc")
for tech in config_provider("electricity", "renewable_carriers")(w)
}
def input_conventional(w):
return {
f"conventional_{carrier}_{attr}": fn
for carrier, d in config_provider("conventional", default={None: {}})(w).items()
if carrier in config_provider("electricity", "conventional_carriers")(w)
for attr, fn in d.items()
if str(fn).startswith("data/")
}
rule add_electricity:
params:
length_factor=config["lines"]["length_factor"],
scaling_factor=config["load"]["scaling_factor"],
countries=config["countries"],
renewable=config["renewable"],
electricity=config["electricity"],
conventional=config["conventional"],
costs=config["costs"],
length_factor=config_provider("lines", "length_factor"),
scaling_factor=config_provider("load", "scaling_factor"),
countries=config_provider("countries"),
renewable=config_provider("renewable"),
electricity=config_provider("electricity"),
conventional=config_provider("conventional"),
costs=config_provider("costs"),
input:
**{
f"profile_{tech}": RESOURCES + f"profile_{tech}.nc"
for tech in config["electricity"]["renewable_carriers"]
},
**{
f"conventional_{carrier}_{attr}": fn
for carrier, d in config.get("conventional", {None: {}}).items()
if carrier in config["electricity"]["conventional_carriers"]
for attr, fn in d.items()
if str(fn).startswith("data/")
},
base_network=RESOURCES + "networks/base.nc",
line_rating=(
RESOURCES + "networks/line_rating.nc"
if config["lines"]["dynamic_line_rating"]["activate"]
else RESOURCES + "networks/base.nc"
unpack(input_profile_tech),
unpack(input_conventional),
base_network=resources("networks/base.nc"),
line_rating=lambda w: (
resources("networks/line_rating.nc")
if config_provider("lines", "dynamic_line_rating", "activate")(w)
else resources("networks/base.nc")
),
tech_costs=COSTS,
regions=RESOURCES + "regions_onshore.geojson",
powerplants=RESOURCES + "powerplants.csv",
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
regions=resources("regions_onshore.geojson"),
powerplants=resources("powerplants.csv"),
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
unit_commitment="data/unit_commitment.csv",
fuel_price=(
RESOURCES + "monthly_fuel_price.csv"
if config["conventional"]["dynamic_fuel_price"]
fuel_price=lambda w: (
resources("monthly_fuel_price.csv")
if config_provider("conventional", "dynamic_fuel_price")(w)
else []
),
load=RESOURCES + "electricity_demand.csv",
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
load=resources("electricity_demand.csv"),
nuts3_shapes=resources("nuts3_shapes.geojson"),
ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv",
output:
RESOURCES + "networks/elec.nc",
resources("networks/elec.nc"),
log:
LOGS + "add_electricity.log",
logs("add_electricity.log"),
benchmark:
BENCHMARKS + "add_electricity"
benchmarks("add_electricity")
threads: 1
resources:
mem_mb=10000,
@ -437,31 +452,33 @@ rule add_electricity:
rule simplify_network:
params:
simplify_network=config["clustering"]["simplify_network"],
aggregation_strategies=config["clustering"].get("aggregation_strategies", {}),
focus_weights=config["clustering"].get(
"focus_weights", config.get("focus_weights")
simplify_network=config_provider("clustering", "simplify_network"),
aggregation_strategies=config_provider(
"clustering", "aggregation_strategies", default={}
),
renewable_carriers=config["electricity"]["renewable_carriers"],
max_hours=config["electricity"]["max_hours"],
length_factor=config["lines"]["length_factor"],
p_max_pu=config["links"].get("p_max_pu", 1.0),
costs=config["costs"],
focus_weights=config_provider("clustering", "focus_weights", default=None),
renewable_carriers=config_provider("electricity", "renewable_carriers"),
max_hours=config_provider("electricity", "max_hours"),
length_factor=config_provider("lines", "length_factor"),
p_max_pu=config_provider("links", "p_max_pu", default=1.0),
costs=config_provider("costs"),
input:
network=RESOURCES + "networks/elec.nc",
tech_costs=COSTS,
regions_onshore=RESOURCES + "regions_onshore.geojson",
regions_offshore=RESOURCES + "regions_offshore.geojson",
network=resources("networks/elec.nc"),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
regions_onshore=resources("regions_onshore.geojson"),
regions_offshore=resources("regions_offshore.geojson"),
output:
network=RESOURCES + "networks/elec_s{simpl}.nc",
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson",
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}.geojson",
busmap=RESOURCES + "busmap_elec_s{simpl}.csv",
connection_costs=RESOURCES + "connection_costs_s{simpl}.csv",
network=resources("networks/elec_s{simpl}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"),
busmap=resources("busmap_elec_s{simpl}.csv"),
connection_costs=resources("connection_costs_s{simpl}.csv"),
log:
LOGS + "simplify_network/elec_s{simpl}.log",
logs("simplify_network/elec_s{simpl}.log"),
benchmark:
BENCHMARKS + "simplify_network/elec_s{simpl}"
benchmarks("simplify_network/elec_s{simpl}")
threads: 1
resources:
mem_mb=12000,
@ -473,38 +490,42 @@ rule simplify_network:
rule cluster_network:
params:
cluster_network=config["clustering"]["cluster_network"],
aggregation_strategies=config["clustering"].get("aggregation_strategies", {}),
custom_busmap=config["enable"].get("custom_busmap", False),
focus_weights=config["clustering"].get(
"focus_weights", config.get("focus_weights")
cluster_network=config_provider("clustering", "cluster_network"),
aggregation_strategies=config_provider(
"clustering", "aggregation_strategies", default={}
),
renewable_carriers=config["electricity"]["renewable_carriers"],
conventional_carriers=config["electricity"].get("conventional_carriers", []),
max_hours=config["electricity"]["max_hours"],
length_factor=config["lines"]["length_factor"],
costs=config["costs"],
custom_busmap=config_provider("enable", "custom_busmap", default=False),
focus_weights=config_provider("clustering", "focus_weights", default=None),
renewable_carriers=config_provider("electricity", "renewable_carriers"),
conventional_carriers=config_provider(
"electricity", "conventional_carriers", default=[]
),
max_hours=config_provider("electricity", "max_hours"),
length_factor=config_provider("lines", "length_factor"),
costs=config_provider("costs"),
input:
network=RESOURCES + "networks/elec_s{simpl}.nc",
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson",
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}.geojson",
busmap=ancient(RESOURCES + "busmap_elec_s{simpl}.csv"),
custom_busmap=(
network=resources("networks/elec_s{simpl}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"),
busmap=ancient(resources("busmap_elec_s{simpl}.csv")),
custom_busmap=lambda w: (
"data/custom_busmap_elec_s{simpl}_{clusters}.csv"
if config["enable"].get("custom_busmap", False)
if config_provider("enable", "custom_busmap", default=False)(w)
else []
),
tech_costs=COSTS,
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
output:
network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc",
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson",
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
linemap=RESOURCES + "linemap_elec_s{simpl}_{clusters}.csv",
network=resources("networks/elec_s{simpl}_{clusters}.nc"),
regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"),
busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
linemap=resources("linemap_elec_s{simpl}_{clusters}.csv"),
log:
LOGS + "cluster_network/elec_s{simpl}_{clusters}.log",
logs("cluster_network/elec_s{simpl}_{clusters}.log"),
benchmark:
BENCHMARKS + "cluster_network/elec_s{simpl}_{clusters}"
benchmarks("cluster_network/elec_s{simpl}_{clusters}")
threads: 1
resources:
mem_mb=10000,
@ -516,18 +537,20 @@ rule cluster_network:
rule add_extra_components:
params:
extendable_carriers=config["electricity"]["extendable_carriers"],
max_hours=config["electricity"]["max_hours"],
costs=config["costs"],
extendable_carriers=config_provider("electricity", "extendable_carriers"),
max_hours=config_provider("electricity", "max_hours"),
costs=config_provider("costs"),
input:
network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc",
tech_costs=COSTS,
network=resources("networks/elec_s{simpl}_{clusters}.nc"),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
output:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc",
resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
log:
LOGS + "add_extra_components/elec_s{simpl}_{clusters}.log",
logs("add_extra_components/elec_s{simpl}_{clusters}.log"),
benchmark:
BENCHMARKS + "add_extra_components/elec_s{simpl}_{clusters}_ec"
benchmarks("add_extra_components/elec_s{simpl}_{clusters}_ec")
threads: 1
resources:
mem_mb=4000,
@ -539,30 +562,30 @@ rule add_extra_components:
rule prepare_network:
params:
snapshots={
"resolution": config["snapshots"].get("resolution", False),
"segmentation": config["snapshots"].get("segmentation", False),
},
links=config["links"],
lines=config["lines"],
co2base=config["electricity"]["co2base"],
co2limit_enable=config["electricity"].get("co2limit_enable", False),
co2limit=config["electricity"]["co2limit"],
gaslimit_enable=config["electricity"].get("gaslimit_enable", False),
gaslimit=config["electricity"].get("gaslimit"),
max_hours=config["electricity"]["max_hours"],
costs=config["costs"],
autarky=config["electricity"].get("autarky", {}),
time_resolution=config_provider("clustering", "temporal", "resolution_elec"),
links=config_provider("links"),
lines=config_provider("lines"),
co2base=config_provider("electricity", "co2base"),
co2limit_enable=config_provider("electricity", "co2limit_enable", default=False),
co2limit=config_provider("electricity", "co2limit"),
gaslimit_enable=config_provider("electricity", "gaslimit_enable", default=False),
gaslimit=config_provider("electricity", "gaslimit"),
max_hours=config_provider("electricity", "max_hours"),
costs=config_provider("costs"),
adjustments=config_provider("adjustments", "electricity"),
autarky=config_provider("electricity", "autarky", default={}),
input:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc",
tech_costs=COSTS,
co2_price=lambda w: RESOURCES + "co2_price.csv" if "Ept" in w.opts else [],
resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
co2_price=lambda w: resources("co2_price.csv") if "Ept" in w.opts else [],
output:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
log:
LOGS + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log",
logs("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"),
benchmark:
(BENCHMARKS + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}")
(benchmarks("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"))
threads: 1
resources:
mem_mb=4000,

File diff suppressed because it is too large Load Diff

View File

@ -15,21 +15,28 @@ localrules:
rule cluster_networks:
input:
expand(RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]),
expand(
resources("networks/elec_s{simpl}_{clusters}.nc"),
**config["scenario"],
run=config["run"]["name"],
),
rule extra_components_networks:
input:
expand(
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", **config["scenario"]
resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
**config["scenario"],
run=config["run"]["name"],
),
rule prepare_elec_networks:
input:
expand(
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
**config["scenario"],
run=config["run"]["name"],
),
@ -39,6 +46,7 @@ rule prepare_sector_networks:
RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"],
run=config["run"]["name"],
),
@ -47,6 +55,7 @@ rule solve_elec_networks:
expand(
RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
**config["scenario"],
run=config["run"]["name"],
),
@ -56,6 +65,7 @@ rule solve_sector_networks:
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"],
run=config["run"]["name"],
),
@ -63,8 +73,9 @@ rule solve_sector_networks_perfect:
input:
expand(
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config["scenario"],
run=config["run"]["name"],
),
@ -74,10 +85,12 @@ rule validate_elec_networks:
RESULTS
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
**config["scenario"],
run=config["run"]["name"],
),
expand(
RESULTS
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
**config["scenario"],
run=config["run"]["name"],
kind=["production", "prices", "cross_border"],
),

View File

@ -2,17 +2,85 @@
#
# SPDX-License-Identifier: MIT
import copy
from functools import partial, lru_cache
import os, sys, glob
path = workflow.source_path("../scripts/_helpers.py")
sys.path.insert(0, os.path.dirname(path))
from _helpers import validate_checksum
from _helpers import validate_checksum, update_config_from_wildcards
from snakemake.utils import update_config
def get_config(config, keys, default=None):
"""Retrieve a nested value from a dictionary using a tuple of keys."""
value = config
for key in keys:
if isinstance(value, list):
value = value[key]
else:
value = value.get(key, default)
if value == default:
return default
return value
def merge_configs(base_config, scenario_config):
"""Merge base config with a specific scenario without modifying the original."""
merged = copy.deepcopy(base_config)
update_config(merged, scenario_config)
return merged
@lru_cache
def scenario_config(scenario_name):
"""Retrieve a scenario config based on the overrides from the scenario file."""
return merge_configs(config, scenarios[scenario_name])
def static_getter(wildcards, keys, default):
"""Getter function for static config values."""
config_with_wildcards = update_config_from_wildcards(
config, wildcards, inplace=False
)
return get_config(config_with_wildcards, keys, default)
def dynamic_getter(wildcards, keys, default):
"""Getter function for dynamic config values based on scenario."""
if "run" not in wildcards.keys():
return get_config(config, keys, default)
scenario_name = wildcards.run
if scenario_name not in scenarios:
raise ValueError(
f"Scenario {scenario_name} not found in file {config['run']['scenario']['file']}."
)
config_with_scenario = scenario_config(scenario_name)
config_with_wildcards = update_config_from_wildcards(
config_with_scenario, wildcards, inplace=False
)
return get_config(config_with_wildcards, keys, default)
def config_provider(*keys, default=None):
"""Dynamically provide config values based on 'run' -> 'name'.
Usage in Snakemake rules would look something like:
params:
my_param=config_provider("key1", "key2", default="some_default_value")
"""
# Using functools.partial to freeze certain arguments in our getter functions.
if config["run"].get("scenarios", {}).get("enable", False):
return partial(dynamic_getter, keys=keys, default=default)
else:
return partial(static_getter, keys=keys, default=default)
def solver_threads(w):
solver_options = config["solving"]["solver_options"]
option_set = config["solving"]["solver"]["options"]
solver_options = config_provider("solving", "solver_options")(w)
option_set = config_provider("solving", "solver", "options")(w)
threads = solver_options[option_set].get("threads", 4)
return threads
@ -38,7 +106,9 @@ def memory(w):
def input_custom_extra_functionality(w):
path = config["solving"]["options"].get("custom_extra_functionality", False)
path = config_provider(
"solving", "options", "custom_extra_functionality", default=False
)(w)
if path:
return os.path.join(os.path.dirname(workflow.snakefile), path)
return []
@ -62,14 +132,15 @@ def has_internet_access(url="www.zenodo.org") -> bool:
def input_eurostat(w):
# 2016 includes BA, 2017 does not
report_year = config["energy"]["eurostat_report_year"]
report_year = config_provider("energy", "eurostat_report_year")(w)
return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition"
def solved_previous_horizon(wildcards):
planning_horizons = config["scenario"]["planning_horizons"]
i = planning_horizons.index(int(wildcards.planning_horizons))
def solved_previous_horizon(w):
planning_horizons = config_provider("scenario", "planning_horizons")(w)
i = planning_horizons.index(int(w.planning_horizons))
planning_horizon_p = str(planning_horizons[i - 1])
return (
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"

View File

@ -11,18 +11,19 @@ if config["foresight"] != "perfect":
rule plot_power_network_clustered:
params:
plotting=config["plotting"],
plotting=config_provider("plotting"),
input:
network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc",
regions_onshore=RESOURCES
+ "regions_onshore_elec_s{simpl}_{clusters}.geojson",
network=resources("networks/elec_s{simpl}_{clusters}.nc"),
regions_onshore=resources(
"regions_onshore_elec_s{simpl}_{clusters}.geojson"
),
output:
map=RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
map=resources("maps/power-network-s{simpl}-{clusters}.pdf"),
threads: 1
resources:
mem_mb=4000,
benchmark:
BENCHMARKS + "plot_power_network_clustered/elec_s{simpl}_{clusters}"
benchmarks("plot_power_network_clustered/elec_s{simpl}_{clusters}")
conda:
"../envs/environment.yaml"
script:
@ -30,11 +31,11 @@ if config["foresight"] != "perfect":
rule plot_power_network:
params:
plotting=config["plotting"],
plotting=config_provider("plotting"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output:
map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
@ -42,14 +43,12 @@ if config["foresight"] != "perfect":
resources:
mem_mb=10000,
log:
(
LOGS
+ "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
RESULTS
+ "logs/plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
RESULTS
+ "benchmarksplot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -58,12 +57,12 @@ if config["foresight"] != "perfect":
rule plot_hydrogen_network:
params:
plotting=config["plotting"],
foresight=config["foresight"],
plotting=config_provider("plotting"),
foresight=config_provider("foresight"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output:
map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf",
@ -71,14 +70,12 @@ if config["foresight"] != "perfect":
resources:
mem_mb=10000,
log:
(
LOGS
+ "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
RESULTS
+ "logs/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
RESULTS
+ "benchmarks/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -87,11 +84,11 @@ if config["foresight"] != "perfect":
rule plot_gas_network:
params:
plotting=config["plotting"],
plotting=config_provider("plotting"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output:
map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf",
@ -99,14 +96,12 @@ if config["foresight"] != "perfect":
resources:
mem_mb=10000,
log:
(
LOGS
+ "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
RESULTS
+ "logs/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
RESULTS
+ "benchmarks/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -116,26 +111,26 @@ if config["foresight"] != "perfect":
if config["foresight"] == "perfect":
def output_map_year(w):
return {
f"map_{year}": RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ f"{year}.pdf"
for year in config_provider("scenario", "planning_horizons")(w)
}
rule plot_power_network_perfect:
params:
plotting=config["plotting"],
plotting=config_provider("plotting"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output:
**{
f"map_{year}": RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ f"{year}.pdf"
for year in config["scenario"]["planning_horizons"]
},
unpack(output_map_year),
threads: 2
resources:
mem_mb=10000,
benchmark:
BENCHMARKS
+"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
conda:
"../envs/environment.yaml"
script:
@ -150,8 +145,6 @@ rule copy_config:
threads: 1
resources:
mem_mb=1000,
benchmark:
BENCHMARKS + "copy_config"
conda:
"../envs/environment.yaml"
script:
@ -160,52 +153,57 @@ rule copy_config:
rule make_summary:
params:
foresight=config["foresight"],
costs=config["costs"],
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
scenario=config["scenario"],
foresight=config_provider("foresight"),
costs=config_provider("costs"),
snapshots=config_provider("snapshots"),
scenario=config_provider("scenario"),
RDIR=RDIR,
input:
expand(
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
**config["scenario"],
),
networks=expand(
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"],
allow_missing=True,
),
costs=(
"data/costs_{}.csv".format(config["costs"]["year"])
if config["foresight"] == "overnight"
else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0])
costs=lambda w: (
resources("costs_{}.csv".format(config_provider("costs", "year")(w)))
if config_provider("foresight")(w) == "overnight"
else resources(
"costs_{}.csv".format(
config_provider("scenario", "planning_horizons", 0)(w)
)
)
),
ac_plot=expand(
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
resources("maps/power-network-s{simpl}-{clusters}.pdf"),
**config["scenario"],
allow_missing=True,
),
costs_plot=expand(
RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config["scenario"],
allow_missing=True,
),
h2_plot=expand(
h2_plot=lambda w: expand(
(
RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf"
if config["sector"]["H2_network"]
if config_provider("sector", "H2_network")(w)
else []
),
**config["scenario"],
allow_missing=True,
),
ch4_plot=expand(
ch4_plot=lambda w: expand(
(
RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf"
if config["sector"]["gas_network"]
if config_provider("sector", "gas_network")(w)
else []
),
**config["scenario"],
allow_missing=True,
),
output:
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
@ -227,9 +225,7 @@ rule make_summary:
resources:
mem_mb=10000,
log:
LOGS + "make_summary.log",
benchmark:
BENCHMARKS + "make_summary"
RESULTS + "logs/make_summary.log",
conda:
"../envs/environment.yaml"
script:
@ -238,12 +234,14 @@ rule make_summary:
rule plot_summary:
params:
countries=config["countries"],
planning_horizons=config["scenario"]["planning_horizons"],
sector_opts=config["scenario"]["sector_opts"],
emissions_scope=config["energy"]["emissions"],
eurostat_report_year=config["energy"]["eurostat_report_year"],
plotting=config["plotting"],
countries=config_provider("countries"),
planning_horizons=config_provider("scenario", "planning_horizons"),
emissions_scope=config_provider("energy", "emissions"),
eurostat_report_year=config_provider("energy", "eurostat_report_year"),
plotting=config_provider("plotting"),
foresight=config_provider("foresight"),
co2_budget=config_provider("co2_budget"),
sector=config_provider("sector"),
RDIR=RDIR,
input:
costs=RESULTS + "csvs/costs.csv",
@ -259,9 +257,7 @@ rule plot_summary:
resources:
mem_mb=10000,
log:
LOGS + "plot_summary.log",
benchmark:
BENCHMARKS + "plot_summary"
RESULTS + "logs/plot_summary.log",
conda:
"../envs/environment.yaml"
script:
@ -283,7 +279,7 @@ STATISTICS_BARPLOTS = [
rule plot_elec_statistics:
params:
plotting=config["plotting"],
plotting=config_provider("plotting"),
barplots=STATISTICS_BARPLOTS,
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",

View File

@ -32,7 +32,7 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle",
output:
protected(expand("data/bundle/{file}", file=datafiles)),
log:
LOGS + "retrieve_databundle.log",
"logs/retrieve_databundle.log",
resources:
mem_mb=1000,
retries: 2
@ -50,7 +50,7 @@ if config["enable"].get("retrieve_irena"):
onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
log:
LOGS + "retrieve_irena.log",
logs("retrieve_irena.log"),
resources:
mem_mb=1000,
retries: 2
@ -83,23 +83,19 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True
if config["enable"]["retrieve"] and config["enable"].get("retrieve_cost_data", True):
rule retrieve_cost_data:
input:
HTTP.remote(
"raw.githubusercontent.com/PyPSA/technology-data/{}/outputs/".format(
config["costs"]["version"]
)
+ "costs_{year}.csv",
keep_local=True,
),
params:
version=config_provider("costs", "version"),
output:
"data/costs_{year}.csv",
resources("costs_{year}.csv"),
log:
LOGS + "retrieve_cost_data_{year}.log",
logs("retrieve_cost_data_{year}.log"),
resources:
mem_mb=1000,
retries: 2
run:
move(input[0], output[0])
conda:
"../envs/retrieve.yaml"
script:
"../scripts/retrieve_cost_data.py"
if config["enable"]["retrieve"] and config["enable"].get(
@ -114,9 +110,9 @@ if config["enable"]["retrieve"] and config["enable"].get(
static=True,
),
output:
RESOURCES + "natura.tiff",
resources("natura.tiff"),
log:
LOGS + "retrieve_natura_raster.log",
logs("retrieve_natura_raster.log"),
resources:
mem_mb=5000,
retries: 2
@ -154,7 +150,7 @@ if config["enable"]["retrieve"] and config["enable"].get(
protected(expand("data/bundle-sector/{files}", files=datafiles)),
*datafolders,
log:
LOGS + "retrieve_sector_databundle.log",
"logs/retrieve_sector_databundle.log",
retries: 2
conda:
"../envs/retrieve.yaml"
@ -173,11 +169,9 @@ if config["enable"]["retrieve"]:
rule retrieve_gas_infrastructure_data:
output:
protected(
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles)
),
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles),
log:
LOGS + "retrieve_gas_infrastructure_data.log",
"logs/retrieve_gas_infrastructure_data.log",
retries: 2
conda:
"../envs/retrieve.yaml"
@ -193,10 +187,12 @@ if config["enable"]["retrieve"]:
output:
"data/electricity_demand_raw.csv",
log:
LOGS + "retrieve_electricity_demand.log",
"logs/retrieve_electricity_demand.log",
resources:
mem_mb=5000,
retries: 2
conda:
"../envs/retrieve.yaml"
script:
"../scripts/retrieve_electricity_demand.py"
@ -213,7 +209,7 @@ if config["enable"]["retrieve"]:
output:
protected("data/shipdensity_global.zip"),
log:
LOGS + "retrieve_ship_raster.log",
"logs/retrieve_ship_raster.log",
resources:
mem_mb=5000,
retries: 2
@ -349,7 +345,7 @@ if config["enable"]["retrieve"]:
output:
"data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
log:
LOGS + "retrieve_monthly_co2_prices.log",
"logs/retrieve_monthly_co2_prices.log",
resources:
mem_mb=5000,
retries: 2
@ -363,7 +359,7 @@ if config["enable"]["retrieve"]:
output:
"data/validation/energy-price-trends-xlsx-5619002.xlsx",
log:
LOGS + "retrieve_monthly_fuel_prices.log",
"logs/retrieve_monthly_fuel_prices.log",
resources:
mem_mb=5000,
retries: 2

View File

@ -5,30 +5,31 @@
rule solve_network:
params:
solving=config["solving"],
foresight=config["foresight"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
solving=config_provider("solving"),
foresight=config_provider("foresight"),
planning_horizons=config_provider("scenario", "planning_horizons"),
co2_sequestration_potential=config_provider(
"sector", "co2_sequestration_potential", default=200
),
custom_extra_functionality=input_custom_extra_functionality,
input:
network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
config=RESULTS + "config.yaml",
output:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
solver=normpath(
LOGS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
RESULTS
+ "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
),
python=LOGS
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
python=RESULTS
+ "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
benchmark:
BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
RESULTS + "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
threads: solver_threads
resources:
mem_mb=memory,
walltime=config["solving"].get("walltime", "12:00:00"),
walltime=config_provider("solving", "walltime", default="12:00:00"),
shadow:
"minimal"
conda:
@ -39,27 +40,27 @@ rule solve_network:
rule solve_operations_network:
params:
options=config["solving"]["options"],
options=config_provider("solving", "options"),
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
log:
solver=normpath(
LOGS
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
RESULTS
+ "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
),
python=LOGS
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
python=RESULTS
+ "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
benchmark:
(
BENCHMARKS
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
RESULTS
+ "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 4
resources:
mem_mb=(lambda w: 10000 + 372 * int(w.clusters)),
walltime=config["solving"].get("walltime", "12:00:00"),
walltime=config_provider("solving", "walltime", default="12:00:00"),
shadow:
"minimal"
conda:

View File

@ -5,22 +5,27 @@
rule add_existing_baseyear:
params:
baseyear=config["scenario"]["planning_horizons"][0],
sector=config["sector"],
existing_capacities=config["existing_capacities"],
costs=config["costs"],
baseyear=config_provider("scenario", "planning_horizons", 0),
sector=config_provider("sector"),
existing_capacities=config_provider("existing_capacities"),
costs=config_provider("costs"),
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv",
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
existing_heating_distribution=RESOURCES
+ "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
powerplants=resources("powerplants.csv"),
busmap_s=resources("busmap_elec_s{simpl}.csv"),
busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
costs=lambda w: resources(
"costs_{}.csv".format(
config_provider("scenario", "planning_horizons", 0)(w)
)
),
cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"),
cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"),
existing_heating_distribution=resources(
"existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
),
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
@ -28,17 +33,20 @@ rule add_existing_baseyear:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
wildcard_constraints:
# TODO: The first planning_horizon needs to be aligned across scenarios
# snakemake does not support passing functions to wildcard_constraints
# reference: https://github.com/snakemake/snakemake/issues/2703
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
threads: 1
resources:
mem_mb=2000,
log:
LOGS
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
RESULTS
+ "logs/add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
RESULTS
+ "benchmarks/add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -46,27 +54,33 @@ rule add_existing_baseyear:
"../scripts/add_existing_baseyear.py"
def input_profile_tech_brownfield(w):
return {
f"profile_{tech}": resources(f"profile_{tech}.nc")
for tech in config_provider("electricity", "renewable_carriers")(w)
if tech != "hydro"
}
rule add_brownfield:
params:
H2_retrofit=config["sector"]["H2_retrofit"],
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
carriers=config["electricity"]["renewable_carriers"],
H2_retrofit=config_provider("sector", "H2_retrofit"),
H2_retrofit_capacity_per_CH4=config_provider(
"sector", "H2_retrofit_capacity_per_CH4"
),
threshold_capacity=config_provider("existing_capacities", " threshold_capacity"),
snapshots=config_provider("snapshots"),
carriers=config_provider("electricity", "renewable_carriers"),
input:
**{
f"profile_{tech}": RESOURCES + f"profile_{tech}.nc"
for tech in config["electricity"]["renewable_carriers"]
if tech != "hydro"
},
simplify_busmap=RESOURCES + "busmap_elec_s{simpl}.csv",
cluster_busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
unpack(input_profile_tech_brownfield),
simplify_busmap=resources("busmap_elec_s{simpl}.csv"),
cluster_busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
network_p=solved_previous_horizon, #solved network at previous time step
costs="data/costs_{planning_horizons}.csv",
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
costs=resources("costs_{planning_horizons}.csv"),
cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"),
cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
@ -74,12 +88,12 @@ rule add_brownfield:
resources:
mem_mb=10000,
log:
LOGS
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
RESULTS
+ "logs/add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
RESULTS
+ "benchmarks/add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -92,17 +106,17 @@ ruleorder: add_existing_baseyear > add_brownfield
rule solve_sector_network_myopic:
params:
solving=config["solving"],
foresight=config["foresight"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
solving=config_provider("solving"),
foresight=config_provider("foresight"),
planning_horizons=config_provider("scenario", "planning_horizons"),
co2_sequestration_potential=config_provider(
"sector", "co2_sequestration_potential", default=200
),
custom_extra_functionality=input_custom_extra_functionality,
input:
network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs="data/costs_{planning_horizons}.csv",
costs=resources("costs_{planning_horizons}.csv"),
config=RESULTS + "config.yaml",
output:
RESULTS
@ -110,18 +124,18 @@ rule solve_sector_network_myopic:
shadow:
"shallow"
log:
solver=LOGS
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=LOGS
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
solver=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
threads: solver_threads
resources:
mem_mb=config["solving"]["mem"],
walltime=config["solving"].get("walltime", "12:00:00"),
mem_mb=config_provider("solving", "mem"),
walltime=config_provider("solving", "walltime", default="12:00:00"),
benchmark:
(
BENCHMARKS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
RESULTS
+ "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"

View File

@ -5,11 +5,11 @@
rule solve_sector_network:
params:
solving=config["solving"],
foresight=config["foresight"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
solving=config_provider("solving"),
foresight=config_provider("foresight"),
planning_horizons=config_provider("scenario", "planning_horizons"),
co2_sequestration_potential=config_provider(
"sector", "co2_sequestration_potential", default=200
),
custom_extra_functionality=input_custom_extra_functionality,
input:
@ -30,13 +30,12 @@ rule solve_sector_network:
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
threads: solver_threads
resources:
mem_mb=config["solving"]["mem"],
walltime=config["solving"].get("walltime", "12:00:00"),
mem_mb=config_provider("solving", "mem"),
walltime=config_provider("solving", "walltime", default="12:00:00"),
benchmark:
(
RESULTS
+ BENCHMARKS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"

View File

@ -3,22 +3,27 @@
# SPDX-License-Identifier: MIT
rule add_existing_baseyear:
params:
baseyear=config["scenario"]["planning_horizons"][0],
sector=config["sector"],
existing_capacities=config["existing_capacities"],
costs=config["costs"],
baseyear=config_provider("scenario", "planning_horizons", 0),
sector=config_provider("sector"),
existing_capacities=config_provider("existing_capacities"),
costs=config_provider("costs"),
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv",
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
existing_heating_distribution=RESOURCES
+ "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
powerplants=resources("powerplants.csv"),
busmap_s=resources("busmap_elec_s{simpl}.csv"),
busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
costs=lambda w: resources(
"costs_{}.csv".format(
config_provider("scenario", "planning_horizons", 0)(w)
)
),
cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"),
cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"),
existing_heating_distribution=resources(
"existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
),
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
@ -32,12 +37,12 @@ rule add_existing_baseyear:
resources:
mem_mb=2000,
log:
LOGS
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
logs(
"add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
benchmark:
(
BENCHMARKS
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
benchmarks(
"add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
@ -45,19 +50,28 @@ rule add_existing_baseyear:
"../scripts/add_existing_baseyear.py"
def input_network_year(w):
return {
f"network_{year}": RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
+ f"_{year}.nc"
for year in config_provider("scenario", "planning_horizons")(w)[1:]
}
rule prepare_perfect_foresight:
params:
costs=config_provider("costs"),
time_resolution=config_provider("clustering", "temporal", "sector"),
input:
**{
f"network_{year}": RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ f"{year}.nc"
for year in config["scenario"]["planning_horizons"][1:]
},
unpack(input_network_year),
brownfield_network=lambda w: (
RESULTS
+ "prenetworks-brownfield/"
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "{}.nc".format(str(config["scenario"]["planning_horizons"][0]))
+ "{}.nc".format(
str(config_provider("scenario", "planning_horizons", 0)(w))
)
),
output:
RESULTS
@ -66,12 +80,12 @@ rule prepare_perfect_foresight:
resources:
mem_mb=10000,
log:
LOGS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log",
logs(
"prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log"
),
benchmark:
(
BENCHMARKS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
benchmarks(
"prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
)
conda:
"../envs/environment.yaml"
@ -81,25 +95,25 @@ rule prepare_perfect_foresight:
rule solve_sector_network_perfect:
params:
solving=config["solving"],
foresight=config["foresight"],
sector=config["sector"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
solving=config_provider("solving"),
foresight=config_provider("foresight"),
sector=config_provider("sector"),
planning_horizons=config_provider("scenario", "planning_horizons"),
co2_sequestration_potential=config_provider(
"sector", "co2_sequestration_potential", default=200
),
custom_extra_functionality=input_custom_extra_functionality,
input:
network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
costs="data/costs_2030.csv",
costs=resources("costs_2030.csv"),
config=RESULTS + "config.yaml",
output:
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: solver_threads
resources:
mem_mb=config["solving"]["mem"],
mem_mb=config_provider("solving", "mem"),
shadow:
"shallow"
log:
@ -111,8 +125,8 @@ rule solve_sector_network_perfect:
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
benchmark:
(
BENCHMARKS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
RESULTS
+ "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
)
conda:
"../envs/environment.yaml"
@ -120,18 +134,22 @@ rule solve_sector_network_perfect:
"../scripts/solve_network.py"
def input_networks_make_summary_perfect(w):
return {
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in config_provider("scenario", "simpl")(w)
for clusters in config_provider("scenario", "clusters")(w)
for opts in config_provider("scenario", "opts")(w)
for sector_opts in config_provider("scenario", "sector_opts")(w)
for ll in config_provider("scenario", "ll")(w)
}
rule make_summary_perfect:
input:
**{
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in config["scenario"]["simpl"]
for clusters in config["scenario"]["clusters"]
for opts in config["scenario"]["opts"]
for sector_opts in config["scenario"]["sector_opts"]
for ll in config["scenario"]["ll"]
},
costs="data/costs_2020.csv",
unpack(input_networks_make_summary_perfect),
costs=resources("costs_2020.csv"),
output:
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
nodal_capacities=RESULTS + "csvs/nodal_capacities.csv",
@ -153,9 +171,9 @@ rule make_summary_perfect:
resources:
mem_mb=10000,
log:
LOGS + "make_summary_perfect.log",
logs("make_summary_perfect.log"),
benchmark:
(BENCHMARKS + "make_summary_perfect")
benchmarks("make_summary_perfect")
conda:
"../envs/environment.yaml"
script:

View File

@ -17,12 +17,12 @@ rule build_electricity_production:
The data is used for validation of the optimization results.
"""
params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
countries=config["countries"],
snapshots=config_provider("snapshots"),
countries=config_provider("countries"),
output:
RESOURCES + "historical_electricity_production.csv",
resources("historical_electricity_production.csv"),
log:
LOGS + "build_electricity_production.log",
logs("build_electricity_production.log"),
resources:
mem_mb=5000,
script:
@ -35,14 +35,14 @@ rule build_cross_border_flows:
The data is used for validation of the optimization results.
"""
params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
countries=config["countries"],
snapshots=config_provider("snapshots"),
countries=config_provider("countries"),
input:
network=RESOURCES + "networks/base.nc",
network=resources("networks/base.nc"),
output:
RESOURCES + "historical_cross_border_flows.csv",
resources("historical_cross_border_flows.csv"),
log:
LOGS + "build_cross_border_flows.log",
logs("build_cross_border_flows.log"),
resources:
mem_mb=5000,
script:
@ -55,12 +55,12 @@ rule build_electricity_prices:
The data is used for validation of the optimization results.
"""
params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]},
countries=config["countries"],
snapshots=config_provider("snapshots"),
countries=config_provider("countries"),
output:
RESOURCES + "historical_electricity_prices.csv",
resources("historical_electricity_prices.csv"),
log:
LOGS + "build_electricity_prices.log",
logs("build_electricity_prices.log"),
resources:
mem_mb=5000,
script:
@ -70,7 +70,7 @@ rule build_electricity_prices:
rule plot_validation_electricity_production:
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
electricity_production=RESOURCES + "historical_electricity_production.csv",
electricity_production=resources("historical_electricity_production.csv"),
output:
**{
plot: RESULTS
@ -85,10 +85,10 @@ rule plot_validation_electricity_production:
rule plot_validation_cross_border_flows:
params:
countries=config["countries"],
countries=config_provider("countries"),
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
cross_border_flows=RESOURCES + "historical_cross_border_flows.csv",
cross_border_flows=resources("historical_cross_border_flows.csv"),
output:
**{
plot: RESULTS
@ -104,7 +104,7 @@ rule plot_validation_cross_border_flows:
rule plot_validation_electricity_prices:
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
electricity_prices=RESOURCES + "historical_electricity_prices.csv",
electricity_prices=resources("historical_electricity_prices.csv"),
output:
**{
plot: RESULTS

4
scripts/__init__.py Normal file
View File

@ -0,0 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT

View File

@ -4,17 +4,20 @@
# SPDX-License-Identifier: MIT
import contextlib
import copy
import hashlib
import logging
import os
import re
import urllib
from functools import partial
from pathlib import Path
import pandas as pd
import pytz
import requests
import yaml
from snakemake.utils import update_config
from tqdm import tqdm
logger = logging.getLogger(__name__)
@ -22,6 +25,77 @@ logger = logging.getLogger(__name__)
REGION_COLS = ["geometry", "name", "x", "y", "country"]
def get_run_path(fn, dir, rdir, shared_resources):
"""
Dynamically provide paths based on shared resources and filename.
Use this function for snakemake rule inputs or outputs that should be
optionally shared across runs or created individually for each run.
Parameters
----------
fn : str
The filename for the path to be generated.
dir : str
The base directory.
rdir : str
Relative directory for non-shared resources.
shared_resources : str or bool
Specifies which resources should be shared.
- If string is "base", special handling for shared "base" resources (see notes).
- If random string other than "base", this folder is used instead of the `rdir` keyword.
- If boolean, directly specifies if the resource is shared.
Returns
-------
str
Full path where the resource should be stored.
Notes
-----
Special case for "base" allows no wildcards other than "technology", "year"
and "scope" and excludes filenames starting with "networks/elec" or
"add_electricity". All other resources are shared.
"""
if shared_resources == "base":
pattern = r"\{([^{}]+)\}"
existing_wildcards = set(re.findall(pattern, fn))
irrelevant_wildcards = {"technology", "year", "scope"}
no_relevant_wildcards = not existing_wildcards - irrelevant_wildcards
no_elec_rule = not fn.startswith("networks/elec") and not fn.startswith(
"add_electricity"
)
is_shared = no_relevant_wildcards and no_elec_rule
elif isinstance(shared_resources, str):
rdir = shared_resources + "/"
is_shared = True
elif isinstance(shared_resources, bool):
is_shared = shared_resources
else:
raise ValueError(
"shared_resources must be a boolean, str, or 'base' for special handling."
)
if is_shared:
return f"{dir}{fn}"
else:
return f"{dir}{rdir}{fn}"
def path_provider(dir, rdir, shared_resources):
"""
Returns a partial function that dynamically provides paths based on shared
resources and the filename.
Returns
-------
partial function
A partial function that takes a filename as input and
returns the path to the file based on the shared_resources parameter.
"""
return partial(get_run_path, dir=dir, rdir=rdir, shared_resources=shared_resources)
def get_opt(opts, expr, flags=None):
"""
Return the first option matching the regular expression.
@ -43,9 +117,9 @@ def find_opt(opts, expr):
"""
for o in opts:
if expr in o:
m = re.findall("[0-9]*\.?[0-9]+$", o)
m = re.findall(r"m?\d+(?:[\.p]\d+)?", o)
if len(m) > 0:
return True, float(m[0])
return True, float(m[-1].replace("p", ".").replace("m", "-"))
else:
return True, None
return False, None
@ -59,6 +133,21 @@ def mute_print():
yield
def set_scenario_config(snakemake):
scenario = snakemake.config["run"].get("scenarios", {})
if scenario.get("enable") and "run" in snakemake.wildcards.keys():
try:
with open(scenario["file"], "r") as f:
scenario_config = yaml.safe_load(f)
except FileNotFoundError:
# fallback for mock_snakemake
script_dir = Path(__file__).parent.resolve()
root_dir = script_dir.parent
with open(root_dir / scenario["file"], "r") as f:
scenario_config = yaml.safe_load(f)
update_config(snakemake.config, scenario_config[snakemake.wildcards.run])
def configure_logging(snakemake, skip_handlers=False):
"""
Configure the basic behaviour for the logging module.
@ -235,7 +324,7 @@ def progress_retrieve(url, file, disable=False):
def mock_snakemake(
rulename,
root_dir=None,
configfiles=[],
configfiles=None,
submodule_dir="workflow/submodules/pypsa-eur",
**wildcards,
):
@ -289,7 +378,9 @@ def mock_snakemake(
if os.path.exists(p):
snakefile = p
break
if isinstance(configfiles, str):
if configfiles is None:
configfiles = []
elif isinstance(configfiles, str):
configfiles = [configfiles]
workflow = sm.Workflow(
@ -311,7 +402,7 @@ def mock_snakemake(
def make_accessable(*ios):
for io in ios:
for i in range(len(io)):
for i, _ in enumerate(io):
io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log)
@ -379,13 +470,181 @@ def parse(infix):
return {infix.pop(0): parse(infix)}
def update_config_with_sector_opts(config, sector_opts):
from snakemake.utils import update_config
def update_config_from_wildcards(config, w, inplace=True):
"""
Parses configuration settings from wildcards and updates the config.
"""
for o in sector_opts.split("-"):
if o.startswith("CF+"):
infix = o.split("+")[1:]
update_config(config, parse(infix))
if not inplace:
config = copy.deepcopy(config)
if w.get("opts"):
opts = w.opts.split("-")
if nhours := get_opt(opts, r"^\d+(h|seg)$"):
config["clustering"]["temporal"]["resolution_elec"] = nhours
co2l_enable, co2l_value = find_opt(opts, "Co2L")
if co2l_enable:
config["electricity"]["co2limit_enable"] = True
if co2l_value is not None:
config["electricity"]["co2limit"] = (
co2l_value * config["electricity"]["co2base"]
)
gasl_enable, gasl_value = find_opt(opts, "CH4L")
if gasl_enable:
config["electricity"]["gaslimit_enable"] = True
if gasl_value is not None:
config["electricity"]["gaslimit"] = gasl_value * 1e6
if "Ept" in opts:
config["costs"]["emission_prices"]["co2_monthly_prices"] = True
ep_enable, ep_value = find_opt(opts, "Ep")
if ep_enable:
config["costs"]["emission_prices"]["enable"] = True
if ep_value is not None:
config["costs"]["emission_prices"]["co2"] = ep_value
if "ATK" in opts:
config["autarky"]["enable"] = True
if "ATKc" in opts:
config["autarky"]["by_country"] = True
attr_lookup = {
"p": "p_nom_max",
"e": "e_nom_max",
"c": "capital_cost",
"m": "marginal_cost",
}
for o in opts:
flags = ["+e", "+p", "+m", "+c"]
if all(flag not in o for flag in flags):
continue
carrier, attr_factor = o.split("+")
attr = attr_lookup[attr_factor[0]]
factor = float(attr_factor[1:])
if not isinstance(config["adjustments"]["electricity"], dict):
config["adjustments"]["electricity"] = dict()
update_config(
config["adjustments"]["electricity"], {attr: {carrier: factor}}
)
if w.get("sector_opts"):
opts = w.sector_opts.split("-")
if "T" in opts:
config["sector"]["transport"] = True
if "H" in opts:
config["sector"]["heating"] = True
if "B" in opts:
config["sector"]["biomass"] = True
if "I" in opts:
config["sector"]["industry"] = True
if "A" in opts:
config["sector"]["agriculture"] = True
if "CCL" in opts:
config["solving"]["constraints"]["CCL"] = True
eq_value = get_opt(opts, r"^EQ+\d*\.?\d+(c|)")
for o in opts:
if eq_value is not None:
config["solving"]["constraints"]["EQ"] = eq_value
elif "EQ" in o:
config["solving"]["constraints"]["EQ"] = True
break
if "BAU" in opts:
config["solving"]["constraints"]["BAU"] = True
if "SAFE" in opts:
config["solving"]["constraints"]["SAFE"] = True
if nhours := get_opt(opts, r"^\d+(h|sn|seg)$"):
config["clustering"]["temporal"]["resolution_sector"] = nhours
if "decentral" in opts:
config["sector"]["electricity_transmission_grid"] = False
if "noH2network" in opts:
config["sector"]["H2_network"] = False
if "nowasteheat" in opts:
config["sector"]["use_fischer_tropsch_waste_heat"] = False
config["sector"]["use_methanolisation_waste_heat"] = False
config["sector"]["use_haber_bosch_waste_heat"] = False
config["sector"]["use_methanation_waste_heat"] = False
config["sector"]["use_fuel_cell_waste_heat"] = False
config["sector"]["use_electrolysis_waste_heat"] = False
if "nodistrict" in opts:
config["sector"]["district_heating"]["progress"] = 0.0
dg_enable, dg_factor = find_opt(opts, "dist")
if dg_enable:
config["sector"]["electricity_distribution_grid"] = True
if dg_factor is not None:
config["sector"][
"electricity_distribution_grid_cost_factor"
] = dg_factor
if "biomasstransport" in opts:
config["sector"]["biomass_transport"] = True
_, maxext = find_opt(opts, "linemaxext")
if maxext is not None:
config["lines"]["max_extension"] = maxext * 1e3
config["links"]["max_extension"] = maxext * 1e3
_, co2l_value = find_opt(opts, "Co2L")
if co2l_value is not None:
config["co2_budget"] = float(co2l_value)
if co2_distribution := get_opt(opts, r"^(cb)\d+(\.\d+)?(ex|be)$"):
config["co2_budget"] = co2_distribution
if co2_budget := get_opt(opts, r"^(cb)\d+(\.\d+)?$"):
config["co2_budget"] = float(co2_budget[2:])
attr_lookup = {
"p": "p_nom_max",
"e": "e_nom_max",
"c": "capital_cost",
"m": "marginal_cost",
}
for o in opts:
flags = ["+e", "+p", "+m", "+c"]
if all(flag not in o for flag in flags):
continue
carrier, attr_factor = o.split("+")
attr = attr_lookup[attr_factor[0]]
factor = float(attr_factor[1:])
if not isinstance(config["adjustments"]["sector"], dict):
config["adjustments"]["sector"] = dict()
update_config(config["adjustments"]["sector"], {attr: {carrier: factor}})
_, sdr_value = find_opt(opts, "sdr")
if sdr_value is not None:
config["costs"]["social_discountrate"] = sdr_value / 100
_, seq_limit = find_opt(opts, "seq")
if seq_limit is not None:
config["sector"]["co2_sequestration_potential"] = seq_limit
# any config option can be represented in wildcard
for o in opts:
if o.startswith("CF+"):
infix = o.split("+")[1:]
update_config(config, parse(infix))
if not inplace:
return config
def get_checksum_from_zenodo(file_url):

View File

@ -12,7 +12,11 @@ import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _helpers import update_config_with_sector_opts
from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.clustering.spatial import normed_or_uniform
@ -210,9 +214,10 @@ if __name__ == "__main__":
planning_horizons=2030,
)
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
update_config_from_wildcards(snakemake.config, snakemake.wildcards)
logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}")

View File

@ -93,7 +93,7 @@ import powerplantmatching as pm
import pypsa
import scipy.sparse as sparse
import xarray as xr
from _helpers import configure_logging, update_p_nom_max
from _helpers import configure_logging, set_scenario_config, update_p_nom_max
from powerplantmatching.export import map_country_bus
from shapely.prepared import prep
@ -790,6 +790,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("add_electricity")
configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params

View File

@ -15,7 +15,11 @@ import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _helpers import update_config_with_sector_opts
from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_electricity import sanitize_carriers
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
@ -552,12 +556,12 @@ if __name__ == "__main__":
planning_horizons=2020,
)
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
update_config_from_wildcards(snakemake.config, snakemake.wildcards)
options = snakemake.params.sector
opts = snakemake.wildcards.sector_opts.split("-")
baseyear = snakemake.params.baseyear
@ -580,7 +584,7 @@ if __name__ == "__main__":
n, grouping_years_power, costs, baseyear
)
if "H" in opts:
if options["heating"]:
time_dep_hp_cop = options["time_dep_hp_cop"]
ashp_cop = (
xr.open_dataarray(snakemake.input.cop_air_total)

View File

@ -55,7 +55,7 @@ import logging
import numpy as np
import pandas as pd
import pypsa
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from add_electricity import load_costs, sanitize_carriers, sanitize_locations
idx = pd.IndexSlice
@ -230,6 +230,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)
extendable_carriers = snakemake.params.extendable_carriers

View File

@ -77,7 +77,7 @@ import shapely
import shapely.prepared
import shapely.wkt
import yaml
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from packaging.version import Version, parse
from scipy import spatial
from scipy.sparse import csgraph
@ -769,6 +769,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("base_network")
configure_logging(snakemake)
set_scenario_config(snakemake)
n = base_network(
snakemake.input.eg_buses,

View File

@ -8,6 +8,7 @@ Build historical annual ammonia production per country in ktonNH3/a.
import country_converter as coco
import pandas as pd
from _helpers import set_scenario_config
cc = coco.CountryConverter()
@ -18,6 +19,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_ammonia_production")
set_scenario_config(snakemake)
ammonia = pd.read_excel(
snakemake.input.usgs,
sheet_name="T12",

View File

@ -16,6 +16,8 @@ import pandas as pd
logger = logging.getLogger(__name__)
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]
from _helpers import configure_logging, set_scenario_config
def build_nuts_population_data(year=2013):
pop = pd.read_csv(
@ -221,6 +223,9 @@ if __name__ == "__main__":
planning_horizons=2050,
)
configure_logging(snakemake)
set_scenario_config(snakemake)
overnight = snakemake.config["foresight"] == "overnight"
params = snakemake.params.biomass
investment_year = int(snakemake.wildcards.planning_horizons)

View File

@ -47,7 +47,7 @@ import geopandas as gpd
import numpy as np
import pandas as pd
import pypsa
from _helpers import REGION_COLS, configure_logging
from _helpers import REGION_COLS, configure_logging, set_scenario_config
from scipy.spatial import Voronoi
from shapely.geometry import Polygon
@ -115,6 +115,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_bus_regions")
configure_logging(snakemake)
set_scenario_config(snakemake)
countries = snakemake.params.countries

View File

@ -11,6 +11,7 @@ import atlite
import geopandas as gpd
import pandas as pd
import xarray as xr
from _helpers import set_scenario_config
if __name__ == "__main__":
if "snakemake" not in globals():
@ -22,6 +23,8 @@ if __name__ == "__main__":
clusters=48,
)
set_scenario_config(snakemake)
cutout = atlite.Cutout(snakemake.input.cutout)
clustered_regions = (

View File

@ -14,6 +14,7 @@ https://doi.org/10.1039/C2EE22653G.
"""
import xarray as xr
from _helpers import set_scenario_config
def coefficient_of_performance(delta_T, source="air"):
@ -35,6 +36,8 @@ if __name__ == "__main__":
clusters=48,
)
set_scenario_config(snakemake)
for area in ["total", "urban", "rural"]:
for source in ["air", "soil"]:
source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"])

View File

@ -8,7 +8,7 @@ import logging
import pandas as pd
import pypsa
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from entsoe import EntsoePandasClient
from entsoe.exceptions import InvalidBusinessParameterError, NoMatchingDataError
from requests import HTTPError
@ -21,6 +21,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_cross_border_flows")
configure_logging(snakemake)
set_scenario_config(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)

View File

@ -95,7 +95,7 @@ import logging
import atlite
import geopandas as gpd
import pandas as pd
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__)
@ -105,6 +105,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5")
configure_logging(snakemake)
set_scenario_config(snakemake)
cutout_params = snakemake.params.cutouts[snakemake.wildcards.cutout]

View File

@ -11,6 +11,7 @@ import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
from _helpers import set_scenario_config
from dask.distributed import Client, LocalCluster
if __name__ == "__main__":
@ -23,6 +24,7 @@ if __name__ == "__main__":
simpl="",
clusters=48,
)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)

View File

@ -9,6 +9,7 @@ Build district heat shares at each node, depending on investment year.
import logging
import pandas as pd
from _helpers import configure_logging, set_scenario_config
from prepare_sector_network import get
logger = logging.getLogger(__name__)
@ -24,6 +25,8 @@ if __name__ == "__main__":
clusters=48,
planning_horizons="2050",
)
configure_logging(snakemake)
set_scenario_config(snakemake)
investment_year = int(snakemake.wildcards.planning_horizons[-4:])

View File

@ -39,7 +39,7 @@ import logging
import numpy as np
import pandas as pd
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from pandas import Timedelta as Delta
logger = logging.getLogger(__name__)
@ -261,6 +261,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_electricity_demand")
configure_logging(snakemake)
set_scenario_config(snakemake)
interpolate_limit = snakemake.params.load["interpolate_limit"]
countries = snakemake.params.countries

View File

@ -7,7 +7,7 @@
import logging
import pandas as pd
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from entsoe import EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError
@ -19,6 +19,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_cross_border_flows")
configure_logging(snakemake)
set_scenario_config(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)

View File

@ -7,7 +7,7 @@
import logging
import pandas as pd
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from entsoe import EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError
@ -39,6 +39,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_electricity_production")
configure_logging(snakemake)
set_scenario_config(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)

View File

@ -14,7 +14,7 @@ import country_converter as coco
import geopandas as gpd
import numpy as np
import pandas as pd
from _helpers import mute_print
from _helpers import configure_logging, mute_print, set_scenario_config
from tqdm import tqdm
cc = coco.CountryConverter()
@ -743,7 +743,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_energy_totals")
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params.energy

View File

@ -9,6 +9,7 @@ horizon.
import country_converter as coco
import numpy as np
import pandas as pd
from _helpers import set_scenario_config
cc = coco.CountryConverter()
@ -126,5 +127,6 @@ if __name__ == "__main__":
clusters=48,
planning_horizons=2050,
)
set_scenario_config(snakemake)
build_existing_heating()

View File

@ -11,6 +11,7 @@ import logging
import geopandas as gpd
import pandas as pd
from _helpers import configure_logging, set_scenario_config
from cluster_gas_network import load_bus_regions
logger = logging.getLogger(__name__)
@ -134,7 +135,8 @@ if __name__ == "__main__":
clusters="128",
)
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
regions = load_bus_regions(
snakemake.input.regions_onshore, snakemake.input.regions_offshore

View File

@ -11,6 +11,7 @@ import logging
import geopandas as gpd
import pandas as pd
from _helpers import configure_logging, set_scenario_config
from pypsa.geo import haversine_pts
from shapely.geometry import Point
@ -143,7 +144,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_gas_network")
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
gas_network = load_dataset(snakemake.input.gas_network)

View File

@ -10,7 +10,7 @@ from itertools import product
import pandas as pd
import xarray as xr
from _helpers import generate_periodic_profiles
from _helpers import generate_periodic_profiles, set_scenario_config
if __name__ == "__main__":
if "snakemake" not in globals():
@ -22,6 +22,7 @@ if __name__ == "__main__":
simpl="",
clusters=48,
)
set_scenario_config(snakemake)
snapshots = pd.date_range(freq="h", **snakemake.params.snapshots)

View File

@ -65,7 +65,7 @@ import atlite
import country_converter as coco
import geopandas as gpd
import pandas as pd
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
cc = coco.CountryConverter()
@ -131,6 +131,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_hydro_profile")
configure_logging(snakemake)
set_scenario_config(snakemake)
params_hydro = snakemake.params.hydro
cutout = atlite.Cutout(snakemake.input.cutout)

View File

@ -13,6 +13,7 @@ from itertools import product
import country_converter as coco
import geopandas as gpd
import pandas as pd
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__)
cc = coco.CountryConverter()
@ -148,8 +149,8 @@ if __name__ == "__main__":
simpl="",
clusters=128,
)
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
countries = snakemake.params.countries

View File

@ -11,6 +11,7 @@ from functools import partial
import country_converter as coco
import pandas as pd
from _helpers import set_scenario_config
from tqdm import tqdm
cc = coco.CountryConverter()
@ -174,6 +175,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today")
set_scenario_config(snakemake)
params = snakemake.params.industry
year = params.get("reference_year", 2015)

View File

@ -7,6 +7,7 @@ Build industrial energy demand per model region.
"""
import pandas as pd
from _helpers import set_scenario_config
if __name__ == "__main__":
if "snakemake" not in globals():
@ -18,6 +19,7 @@ if __name__ == "__main__":
clusters=48,
planning_horizons=2030,
)
set_scenario_config(snakemake)
# import ratios
fn = snakemake.input.industry_sector_ratios

View File

@ -10,6 +10,7 @@ from itertools import product
import numpy as np
import pandas as pd
from _helpers import set_scenario_config
# map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = {
@ -75,5 +76,6 @@ if __name__ == "__main__":
simpl="",
clusters=48,
)
set_scenario_config(snakemake)
build_nodal_industrial_energy_demand()

View File

@ -13,7 +13,7 @@ from functools import partial
import country_converter as coco
import numpy as np
import pandas as pd
from _helpers import mute_print
from _helpers import configure_logging, mute_print, set_scenario_config
from tqdm import tqdm
logger = logging.getLogger(__name__)
@ -278,8 +278,8 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_industrial_production_per_country")
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
countries = snakemake.params.countries

View File

@ -7,6 +7,7 @@ Build future industrial production per country.
"""
import pandas as pd
from _helpers import set_scenario_config
from prepare_sector_network import get
if __name__ == "__main__":
@ -14,6 +15,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow")
set_scenario_config(snakemake)
params = snakemake.params.industry

View File

@ -9,6 +9,7 @@ Build industrial production per model region.
from itertools import product
import pandas as pd
from _helpers import set_scenario_config
# map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = {
@ -72,5 +73,6 @@ if __name__ == "__main__":
simpl="",
clusters=48,
)
set_scenario_config(snakemake)
build_nodal_industrial_production()

View File

@ -7,7 +7,7 @@ Build specific energy consumption by carrier and industries.
"""
import pandas as pd
from _helpers import mute_print
from _helpers import mute_print, set_scenario_config
# GWh/ktoe OR MWh/toe
toe_to_MWh = 11.630
@ -1464,6 +1464,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_industry_sector_ratios")
set_scenario_config(snakemake)
# TODO make params option
year = 2015

View File

@ -58,7 +58,7 @@ import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from shapely.geometry import LineString as Line
from shapely.geometry import Point
@ -144,6 +144,7 @@ if __name__ == "__main__":
opts="Co2L-4H",
)
configure_logging(snakemake)
set_scenario_config(snakemake)
snapshots = snakemake.params.snapshots

View File

@ -43,7 +43,7 @@ Data was accessed at 16.5.2023
import logging
import pandas as pd
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__)
@ -111,6 +111,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_monthly_prices")
configure_logging(snakemake)
set_scenario_config(snakemake)
fuel_price = get_fuel_price()
fuel_price.to_csv(snakemake.output.fuel_price)

View File

@ -46,7 +46,7 @@ import logging
import atlite
import geopandas as gpd
import rasterio as rio
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from rasterio.features import geometry_mask
from rasterio.warp import transform_bounds
@ -92,6 +92,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_natura_raster")
configure_logging(snakemake)
set_scenario_config(snakemake)
cutouts = snakemake.input.cutouts
xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts))

View File

@ -13,6 +13,7 @@ import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__)
@ -22,7 +23,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_population_layouts")
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
cutout = atlite.Cutout(snakemake.input.cutout)

View File

@ -7,6 +7,7 @@ Distribute country-level energy demands by population.
"""
import pandas as pd
from _helpers import set_scenario_config
if __name__ == "__main__":
if "snakemake" not in globals():
@ -17,6 +18,7 @@ if __name__ == "__main__":
simpl="",
clusters=48,
)
set_scenario_config(snakemake)
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)

View File

@ -91,7 +91,7 @@ import numpy as np
import pandas as pd
import powerplantmatching as pm
import pypsa
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from powerplantmatching.export import map_country_bus
logger = logging.getLogger(__name__)
@ -165,6 +165,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_powerplants")
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.base_network)
countries = snakemake.params.countries

View File

@ -188,7 +188,7 @@ import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from dask.distributed import Client
from pypsa.geo import haversine
from shapely.geometry import LineString
@ -202,6 +202,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_renewable_profiles", technology="offwind-dc")
configure_logging(snakemake)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)
noprogress = snakemake.config["run"].get("disable_progressbar", True)

View File

@ -68,6 +68,7 @@ The script has the following structure:
"""
import pandas as pd
import xarray as xr
from _helpers import set_scenario_config
# (i) --- FIXED PARAMETER / STANDARD VALUES -----------------------------------
@ -1053,6 +1054,7 @@ if __name__ == "__main__":
ll="v1.0",
sector_opts="Co2L0-168H-T-H-B-I-solar3-dist1",
)
set_scenario_config(snakemake)
# ******** config *********************************************************

View File

@ -24,6 +24,7 @@ onshore (>50km from sea), offshore (Figure 7).
import geopandas as gpd
import pandas as pd
from _helpers import set_scenario_config
def concat_gdf(gdf_list, crs="EPSG:4326"):
@ -77,6 +78,8 @@ if __name__ == "__main__":
"build_salt_cavern_potentials", simpl="", clusters="37"
)
set_scenario_config(snakemake)
fn_onshore = snakemake.input.regions_onshore
fn_offshore = snakemake.input.regions_offshore

View File

@ -10,6 +10,7 @@ database_en>`_.
import geopandas as gpd
import pandas as pd
from _helpers import set_scenario_config
def area(gdf):
@ -39,6 +40,8 @@ if __name__ == "__main__":
"build_sequestration_potentials", simpl="", clusters="181"
)
set_scenario_config(snakemake)
cf = snakemake.params.sequestration_potential
gdf = gpd.read_file(snakemake.input.sequestration_potential[0])

View File

@ -77,7 +77,7 @@ import geopandas as gpd
import numpy as np
import pandas as pd
import pycountry as pyc
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from shapely.geometry import MultiPolygon, Polygon
logger = logging.getLogger(__name__)
@ -254,6 +254,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_shapes")
configure_logging(snakemake)
set_scenario_config(snakemake)
country_shapes = countries(snakemake.input.naturalearth, snakemake.params.countries)
country_shapes.reset_index().to_file(snakemake.output.country_shapes)

View File

@ -46,7 +46,7 @@ import zipfile
from pathlib import Path
import rioxarray
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from build_natura_raster import determine_cutout_xXyY
logger = logging.getLogger(__name__)
@ -57,6 +57,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_ship_raster")
configure_logging(snakemake)
set_scenario_config(snakemake)
cutouts = snakemake.input.cutouts
xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts))

View File

@ -11,6 +11,7 @@ import json
import geopandas as gpd
import pandas as pd
from _helpers import set_scenario_config
if __name__ == "__main__":
if "snakemake" not in globals():
@ -21,6 +22,7 @@ if __name__ == "__main__":
simpl="",
clusters=48,
)
set_scenario_config(snakemake)
scope = gpd.read_file(snakemake.input.scope).geometry[0]
regions = gpd.read_file(snakemake.input.regions).set_index("name")

View File

@ -11,6 +11,7 @@ import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
from _helpers import set_scenario_config
from dask.distributed import Client, LocalCluster
if __name__ == "__main__":
@ -22,6 +23,7 @@ if __name__ == "__main__":
simpl="",
clusters=48,
)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)

View File

@ -11,6 +11,7 @@ import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
from _helpers import set_scenario_config
from dask.distributed import Client, LocalCluster
if __name__ == "__main__":
@ -22,6 +23,7 @@ if __name__ == "__main__":
simpl="",
clusters=48,
)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads)
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)

View File

@ -13,7 +13,7 @@ import logging
import numpy as np
import pandas as pd
import xarray as xr
from _helpers import configure_logging, generate_periodic_profiles
from _helpers import configure_logging, generate_periodic_profiles, set_scenario_config
logger = logging.getLogger(__name__)
@ -171,6 +171,7 @@ if __name__ == "__main__":
clusters=48,
)
configure_logging(snakemake)
set_scenario_config(snakemake)
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)

View File

@ -10,6 +10,7 @@ import logging
import geopandas as gpd
import pandas as pd
from _helpers import configure_logging, set_scenario_config
from pypsa.geo import haversine_pts
from shapely import wkt
@ -105,8 +106,8 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37")
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
fn = snakemake.input.cleaned_gas_network
df = pd.read_csv(fn, index_col=0)

View File

@ -133,7 +133,7 @@ import numpy as np
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging, update_p_nom_max
from _helpers import configure_logging, set_scenario_config, update_p_nom_max
from add_electricity import load_costs
from packaging.version import Version, parse
from pypsa.clustering.spatial import (
@ -456,6 +456,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("cluster_network", simpl="", clusters="37")
configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params
solver_name = snakemake.config["solving"]["solver"]["name"]

View File

@ -8,6 +8,7 @@ Copy used configuration files and important scripts for archiving.
import yaml
from _helpers import set_scenario_config
if __name__ == "__main__":
if "snakemake" not in globals():
@ -15,6 +16,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("copy_config")
set_scenario_config(snakemake)
with open(snakemake.output[0], "w") as yaml_file:
yaml.dump(
snakemake.config,

View File

@ -15,7 +15,7 @@ import fiona
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from atlite.gis import shape_availability
from rasterio.plot import show
@ -38,6 +38,7 @@ if __name__ == "__main__":
"determine_availability_matrix_MD_UA", technology="solar"
)
configure_logging(snakemake)
set_scenario_config(snakemake)
nprocesses = None # snakemake.config["atlite"].get("nprocesses")
noprogress = not snakemake.config["atlite"].get("show_progress", True)

View File

@ -13,6 +13,7 @@ import sys
import numpy as np
import pandas as pd
import pypsa
from _helpers import configure_logging, set_scenario_config
from prepare_sector_network import prepare_costs
idx = pd.IndexSlice
@ -673,7 +674,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("make_summary")
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
networks_dict = {
(cluster, ll, opt + sector_opt, planning_horizon): "results/"

View File

@ -12,6 +12,7 @@ other metrics.
import numpy as np
import pandas as pd
import pypsa
from _helpers import set_scenario_config
from make_summary import calculate_cfs # noqa: F401
from make_summary import calculate_nodal_cfs # noqa: F401
from make_summary import calculate_nodal_costs # noqa: F401
@ -722,6 +723,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake("make_summary_perfect")
set_scenario_config(snakemake)
run = snakemake.config["run"]["name"]
if run != "":

View File

@ -13,7 +13,7 @@ import geopandas as gpd
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from plot_power_network import assign_location, load_projection
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
@ -237,6 +237,7 @@ if __name__ == "__main__":
)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)

View File

@ -13,7 +13,7 @@ import geopandas as gpd
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from plot_power_network import assign_location, load_projection
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
@ -257,6 +257,7 @@ if __name__ == "__main__":
)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)

View File

@ -14,7 +14,7 @@ import geopandas as gpd
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
@ -257,6 +257,7 @@ if __name__ == "__main__":
)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)

View File

@ -6,10 +6,10 @@
Plot clustered electricity transmission network.
"""
import cartopy.crs as ccrs
import geopandas as gpd
import matplotlib.pyplot as plt
import pypsa
from _helpers import set_scenario_config
from matplotlib.lines import Line2D
from plot_power_network import load_projection
from pypsa.plot import add_legend_lines
@ -23,6 +23,7 @@ if __name__ == "__main__":
clusters=128,
configfiles=["../../config/config.test.yaml"],
)
set_scenario_config(snakemake)
lw_factor = 2e3

View File

@ -13,7 +13,7 @@ import geopandas as gpd
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from plot_power_network import assign_location, load_projection, rename_techs_tyndp
from plot_summary import preferred_order
from pypsa.plot import add_legend_circles, add_legend_lines
@ -184,6 +184,7 @@ if __name__ == "__main__":
)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)

View File

@ -7,7 +7,7 @@
import matplotlib.pyplot as plt
import pypsa
import seaborn as sns
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
sns.set_theme("paper", style="whitegrid")
@ -24,6 +24,7 @@ if __name__ == "__main__":
ll="v1.0",
)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)
@ -58,7 +59,7 @@ if __name__ == "__main__":
fig, ax = plt.subplots()
ds = n.statistics.installed_capacity().dropna()
ds = ds.drop("Line")
ds = ds.drop(("Generator", "Load"))
ds = ds.drop(("Generator", "Load"), errors="ignore")
ds = ds / 1e3
ds.attrs["unit"] = "GW"
plot_static_per_carrier(ds, ax)
@ -67,7 +68,7 @@ if __name__ == "__main__":
fig, ax = plt.subplots()
ds = n.statistics.optimal_capacity()
ds = ds.drop("Line")
ds = ds.drop(("Generator", "Load"))
ds = ds.drop(("Generator", "Load"), errors="ignore")
ds = ds / 1e3
ds.attrs["unit"] = "GW"
plot_static_per_carrier(ds, ax)

View File

@ -11,6 +11,7 @@ import logging
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
from _helpers import configure_logging, set_scenario_config
from prepare_sector_network import co2_emissions_year
logger = logging.getLogger(__name__)
@ -427,13 +428,13 @@ def historical_emissions(countries):
)
emissions = co2_totals.loc["electricity"]
if "T" in opts:
if options["transport"]:
emissions += co2_totals.loc[[i + " non-elec" for i in ["rail", "road"]]].sum()
if "H" in opts:
if options["heating"]:
emissions += co2_totals.loc[
[i + " non-elec" for i in ["residential", "services"]]
].sum()
if "I" in opts:
if options["industry"]:
emissions += co2_totals.loc[
[
"industrial non-elec",
@ -447,7 +448,7 @@ def historical_emissions(countries):
return emissions
def plot_carbon_budget_distribution(input_eurostat):
def plot_carbon_budget_distribution(input_eurostat, options):
"""
Plot historical carbon emissions in the EU and decarbonization path.
"""
@ -469,7 +470,7 @@ def plot_carbon_budget_distribution(input_eurostat):
e_1990 = co2_emissions_year(
countries,
input_eurostat,
opts,
options,
emissions_scope,
report_year,
input_co2,
@ -572,7 +573,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("plot_summary")
logging.basicConfig(level=snakemake.config["logging"]["level"])
configure_logging(snakemake)
set_scenario_config(snakemake)
n_header = 4
@ -582,7 +584,9 @@ if __name__ == "__main__":
plot_balances()
for sector_opts in snakemake.params.sector_opts:
opts = sector_opts.split("-")
if any("cb" in o for o in opts) or snakemake.config["foresight"] == "perfect":
plot_carbon_budget_distribution(snakemake.input.eurostat)
co2_budget = snakemake.params["co2_budget"]
if (
isinstance(co2_budget, str) and co2_budget.startswith("cb")
) or snakemake.params["foresight"] == "perfect":
options = snakemake.params.sector
plot_carbon_budget_distribution(snakemake.input.eurostat, options)

View File

@ -9,7 +9,7 @@ import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
sns.set_theme("paper", style="whitegrid")
@ -187,6 +187,7 @@ if __name__ == "__main__":
ll="v1.0",
)
configure_logging(snakemake)
set_scenario_config(snakemake)
countries = snakemake.params.countries

View File

@ -8,7 +8,7 @@ import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
sns.set_theme("paper", style="whitegrid")
@ -24,6 +24,7 @@ if __name__ == "__main__":
ll="v1.0",
)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"

View File

@ -8,7 +8,7 @@ import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
from pypsa.statistics import get_bus_and_carrier
sns.set_theme("paper", style="whitegrid")
@ -35,6 +35,7 @@ if __name__ == "__main__":
ll="v1.0",
)
configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"

View File

@ -40,7 +40,7 @@ Description
import logging
import pandas as pd
from _helpers import configure_logging
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__)
@ -69,6 +69,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("prepare_links_p_nom", simpl="")
configure_logging(snakemake)
set_scenario_config(snakemake)
links_p_nom = pd.read_html(
"https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol"

View File

@ -62,7 +62,11 @@ import logging
import numpy as np
import pandas as pd
import pypsa
from _helpers import configure_logging, find_opt, get_opt
from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_electricity import load_costs, update_transmission_costs
from pypsa.descriptors import expand_series
@ -71,6 +75,28 @@ idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def maybe_adjust_costs_and_potentials(n, adjustments):
if not adjustments:
return
for attr, carrier_factor in adjustments.items():
for carrier, factor in carrier_factor.items():
# beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan
if carrier == "AC": # lines do not have carrier
n.lines[attr] *= factor
continue
comps = {
"p_nom_max": {"Generator", "Link", "StorageUnit"},
"e_nom_max": {"Store"},
"capital_cost": {"Generator", "Link", "StorageUnit", "Store"},
"marginal_cost": {"Generator", "Link", "StorageUnit", "Store"},
}
for c in n.iterate_components(comps[attr]):
sel = c.df.index[c.df.carrier == carrier]
c.df.loc[sel, attr] *= factor
logger.info(f"changing {attr} for {carrier} by factor {factor}")
def add_co2limit(n, co2limit, Nyears=1.0):
n.add(
"GlobalConstraint",
@ -265,7 +291,7 @@ def set_line_nom_max(
n.lines["s_nom_max"] = n.lines["s_nom"] + s_nom_max_ext
if np.isfinite(p_nom_max_ext) and p_nom_max_ext > 0:
logger.info(f"Limiting line extensions to {p_nom_max_ext} MW")
logger.info(f"Limiting link extensions to {p_nom_max_ext} MW")
hvdc = n.links.index[n.links.carrier == "DC"]
n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext
@ -278,11 +304,11 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"prepare_network", simpl="", clusters="37", ll="v1.0", opts="Ept"
"prepare_network", simpl="", clusters="37", ll="v1.0", opts="Co2L-4H"
)
configure_logging(snakemake)
opts = snakemake.wildcards.opts.split("-")
set_scenario_config(snakemake)
update_config_from_wildcards(snakemake.config, snakemake.wildcards)
n = pypsa.Network(snakemake.input[0])
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
@ -296,81 +322,35 @@ if __name__ == "__main__":
set_line_s_max_pu(n, snakemake.params.lines["s_max_pu"])
# temporal averaging
nhours_config = snakemake.params.snapshots.get("resolution", False)
nhours_wildcard = get_opt(opts, r"^\d+h$")
nhours = nhours_wildcard or nhours_config
if nhours:
n = average_every_nhours(n, nhours)
time_resolution = snakemake.params.time_resolution
is_string = isinstance(time_resolution, str)
if is_string and time_resolution.lower().endswith("h"):
n = average_every_nhours(n, time_resolution)
# segments with package tsam
time_seg_config = snakemake.params.snapshots.get("segmentation", False)
time_seg_wildcard = get_opt(opts, r"^\d+seg$")
time_seg = time_seg_wildcard or time_seg_config
if time_seg:
if is_string and time_resolution.lower().endswith("seg"):
solver_name = snakemake.config["solving"]["solver"]["name"]
n = apply_time_segmentation(n, time_seg.replace("seg", ""), solver_name)
segments = int(time_resolution.replace("seg", ""))
n = apply_time_segmentation(n, segments, solver_name)
Co2L_config = snakemake.params.co2limit_enable
Co2L_wildcard, co2limit_wildcard = find_opt(opts, "Co2L")
if Co2L_wildcard or Co2L_config:
if co2limit_wildcard is not None:
co2limit = co2limit_wildcard * snakemake.params.co2base
add_co2limit(n, co2limit, Nyears)
logger.info("Setting CO2 limit according to wildcard value.")
else:
add_co2limit(n, snakemake.params.co2limit, Nyears)
logger.info("Setting CO2 limit according to config value.")
if snakemake.params.co2limit_enable:
add_co2limit(n, snakemake.params.co2limit, Nyears)
CH4L_config = snakemake.params.gaslimit_enable
CH4L_wildcard, gaslimit_wildcard = find_opt(opts, "CH4L")
if CH4L_wildcard or CH4L_config:
if gaslimit_wildcard is not None:
gaslimit = gaslimit_wildcard * 1e6
add_gaslimit(n, gaslimit, Nyears)
logger.info("Setting gas usage limit according to wildcard value.")
else:
add_gaslimit(n, snakemake.params.gaslimit, Nyears)
logger.info("Setting gas usage limit according to config value.")
if snakemake.params.gaslimit_enable:
add_gaslimit(n, snakemake.params.gaslimit, Nyears)
for o in opts:
if "+" not in o:
continue
oo = o.split("+")
suptechs = map(lambda c: c.split("-", 2)[0], n.carriers.index)
if oo[0].startswith(tuple(suptechs)):
carrier = oo[0]
# handles only p_nom_max as stores and lines have no potentials
attr_lookup = {"p": "p_nom_max", "c": "capital_cost", "m": "marginal_cost"}
attr = attr_lookup[oo[1][0]]
factor = float(oo[1][1:])
if carrier == "AC": # lines do not have carrier
n.lines[attr] *= factor
else:
comps = {"Generator", "Link", "StorageUnit", "Store"}
for c in n.iterate_components(comps):
sel = c.df.carrier.str.contains(carrier)
c.df.loc[sel, attr] *= factor
maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"])
emission_prices = snakemake.params.costs["emission_prices"]
Ept_config = emission_prices.get("co2_monthly_prices", False)
Ept_wildcard = "Ept" in opts
Ep_config = emission_prices.get("enable", False)
Ep_wildcard, co2_wildcard = find_opt(opts, "Ep")
if Ept_wildcard or Ept_config:
if emission_prices["co2_monthly_prices"]:
logger.info(
"Setting time dependent emission prices according spot market price"
)
add_dynamic_emission_prices(n)
elif Ep_wildcard or Ep_config:
if co2_wildcard is not None:
logger.info("Setting CO2 prices according to wildcard value.")
add_emission_prices(n, dict(co2=co2_wildcard))
else:
logger.info("Setting CO2 prices according to config value.")
add_emission_prices(
n, dict(co2=snakemake.params.costs["emission_prices"]["co2"])
)
elif emission_prices["enable"]:
add_emission_prices(
n, dict(co2=snakemake.params.costs["emission_prices"]["co2"])
)
ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:]
set_transmission_limit(n, ll_type, factor, costs, Nyears)
@ -383,11 +363,8 @@ if __name__ == "__main__":
p_nom_max_ext=snakemake.params.links.get("max_extension", np.inf),
)
autarky_config = snakemake.params.autarky
if "ATK" in opts or autarky_config.get("enable", False):
only_crossborder = False
if "ATKc" in opts or autarky_config.get("by_country", False):
only_crossborder = True
if snakemake.params.autarky["enable"]:
only_crossborder = snakemake.params.autarky["by_country"]
enforce_autarky(n, only_crossborder=only_crossborder)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))

View File

@ -12,7 +12,11 @@ import re
import numpy as np
import pandas as pd
import pypsa
from _helpers import update_config_with_sector_opts
from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.descriptors import expand_series
from pypsa.io import import_components_from_dataframe
@ -304,17 +308,14 @@ def set_all_phase_outs(n):
n.mremove("Link", remove_i)
def set_carbon_constraints(n, opts):
def set_carbon_constraints(n):
"""
Add global constraints for carbon emissions.
"""
budget = None
for o in opts:
# other budgets
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
if m is not None:
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
if budget is not None:
budget = snakemake.config["co2_budget"]
if budget and isinstance(budget, float):
budget *= 1e9 # convert to t CO2
logger.info(f"add carbon budget of {budget}")
n.add(
"GlobalConstraint",
@ -341,7 +342,7 @@ def set_carbon_constraints(n, opts):
)
# set minimum CO2 emission constraint to avoid too fast reduction
if "co2min" in opts:
if "co2min" in snakemake.wildcards.sector_opts.split("-"):
emissions_1990 = 4.53693
emissions_2019 = 3.344096
target_2030 = 0.45 * emissions_1990
@ -487,21 +488,6 @@ def apply_time_segmentation_perfect(
return n
def set_temporal_aggregation_SEG(n, opts, solver_name):
"""
Aggregate network temporally with tsam.
"""
for o in opts:
# segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
segments = int(m[1])
logger.info(f"Use temporal segmentation with {segments} segments")
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
break
return n
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -514,15 +500,13 @@ if __name__ == "__main__":
ll="v1.5",
sector_opts="1p7-4380H-T-H-B-I-A-dist1",
)
configure_logging(snakemake)
set_scenario_config(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
update_config_from_wildcards(snakemake.config, snakemake.wildcards)
# parameters -----------------------------------------------------------
years = snakemake.config["scenario"]["planning_horizons"]
opts = snakemake.wildcards.sector_opts.split("-")
social_discountrate = snakemake.config["costs"]["social_discountrate"]
for o in opts:
if "sdr" in o:
social_discountrate = float(o.replace("sdr", "")) / 100
social_discountrate = snakemake.params.costs["social_discountrate"]
logger.info(
f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%"
@ -532,9 +516,10 @@ if __name__ == "__main__":
n = concat_networks(years)
# temporal aggregate
opts = snakemake.wildcards.sector_opts.split("-")
solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation_SEG(n, opts, solver_name)
segments = snakemake.params.time_resolution
if isinstance(segments, (int, float)):
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
# adjust global constraints lv limit if the same for all years
n = adjust_lvlimit(n)
@ -550,8 +535,7 @@ if __name__ == "__main__":
add_H2_boilers(n)
# set carbon constraints
opts = snakemake.wildcards.sector_opts.split("-")
n = set_carbon_constraints(n, opts)
n = set_carbon_constraints(n)
# export network
n.export_to_netcdf(snakemake.output[0])

View File

@ -9,7 +9,6 @@ technologies for the buildings, transport and industry sectors.
import logging
import os
import re
from itertools import product
from types import SimpleNamespace
@ -18,11 +17,16 @@ import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _helpers import update_config_with_sector_opts
from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locations
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
from networkx.algorithms import complement
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from prepare_network import maybe_adjust_costs_and_potentials
from pypsa.geo import haversine_pts
from pypsa.io import import_components_from_dataframe
from scipy.stats import beta
@ -190,13 +194,13 @@ def define_spatial(nodes, options):
spatial = SimpleNamespace()
def emission_sectors_from_opts(opts):
def determine_emission_sectors(options):
sectors = ["electricity"]
if "T" in opts:
if options["transport"]:
sectors += ["rail non-elec", "road non-elec"]
if "H" in opts:
if options["heating"]:
sectors += ["residential non-elec", "services non-elec"]
if "I" in opts:
if options["industry"]:
sectors += [
"industrial non-elec",
"industrial processes",
@ -205,7 +209,7 @@ def emission_sectors_from_opts(opts):
"domestic navigation",
"international navigation",
]
if "A" in opts:
if options["agriculture"]:
sectors += ["agriculture"]
return sectors
@ -215,11 +219,36 @@ def get(item, investment_year=None):
"""
Check whether item depends on investment year.
"""
return item[investment_year] if isinstance(item, dict) else item
if not isinstance(item, dict):
return item
elif investment_year in item.keys():
return item[investment_year]
else:
logger.warning(
f"Investment key {investment_year} not found in dictionary {item}."
)
keys = sorted(item.keys())
if investment_year < keys[0]:
logger.warning(f"Lower than minimum key. Taking minimum key {keys[0]}")
return item[keys[0]]
elif investment_year > keys[-1]:
logger.warning(f"Higher than maximum key. Taking maximum key {keys[0]}")
return item[keys[-1]]
else:
logger.warning(
"Interpolate linearly between the next lower and next higher year."
)
lower_key = max(k for k in keys if k < investment_year)
higher_key = min(k for k in keys if k > investment_year)
lower = item[lower_key]
higher = item[higher_key]
return lower + (higher - lower) * (investment_year - lower_key) / (
higher_key - lower_key
)
def co2_emissions_year(
countries, input_eurostat, opts, emissions_scope, report_year, input_co2, year
countries, input_eurostat, options, emissions_scope, report_year, input_co2, year
):
"""
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
@ -237,7 +266,7 @@ def co2_emissions_year(
co2_totals = build_co2_totals(countries, eea_co2, eurostat_co2)
sectors = emission_sectors_from_opts(opts)
sectors = determine_emission_sectors(options)
co2_emissions = co2_totals.loc[countries, sectors].sum().sum()
@ -248,11 +277,12 @@ def co2_emissions_year(
# TODO: move to own rule with sector-opts wildcard?
def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
def build_carbon_budget(
o, input_eurostat, fn, emissions_scope, report_year, input_co2, options
):
"""
Distribute carbon budget following beta or exponential transition path.
"""
# opts?
if "be" in o:
# beta decay
@ -268,7 +298,7 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
e_1990 = co2_emissions_year(
countries,
input_eurostat,
opts,
options,
emissions_scope,
report_year,
input_co2,
@ -279,7 +309,7 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
e_0 = co2_emissions_year(
countries,
input_eurostat,
opts,
options,
emissions_scope,
report_year,
input_co2,
@ -758,12 +788,12 @@ def add_dac(n, costs):
)
def add_co2limit(n, nyears=1.0, limit=0.0):
def add_co2limit(n, options, nyears=1.0, limit=0.0):
logger.info(f"Adding CO2 budget limit as per unit of 1990 levels of {limit}")
countries = snakemake.params.countries
sectors = emission_sectors_from_opts(opts)
sectors = determine_emission_sectors(options)
# convert Mt to tCO2
co2_totals = 1e6 * pd.read_csv(snakemake.input.co2_totals_name, index_col=0)
@ -2002,13 +2032,6 @@ def add_heat(n, costs):
if options["retrofitting"]["retro_endogen"]:
logger.info("Add retrofitting endogenously")
# resample heat demand temporal 'heat_demand_r' depending on in config
# specified temporal resolution, to not overestimate retrofitting
hours = list(filter(re.compile(r"^\d+h$", re.IGNORECASE).search, opts))
if len(hours) == 0:
hours = [n.snapshots[1] - n.snapshots[0]]
heat_demand_r = heat_demand.resample(hours[0]).mean()
# retrofitting data 'retro_data' with 'costs' [EUR/m^2] and heat
# demand 'dE' [per unit of original heat demand] for each country and
# different retrofitting strengths [additional insulation thickness in m]
@ -2026,12 +2049,12 @@ def add_heat(n, costs):
# share of space heat demand 'w_space' of total heat demand
w_space = {}
for sector in sectors:
w_space[sector] = heat_demand_r[sector + " space"] / (
heat_demand_r[sector + " space"] + heat_demand_r[sector + " water"]
w_space[sector] = heat_demand[sector + " space"] / (
heat_demand[sector + " space"] + heat_demand[sector + " water"]
)
w_space["tot"] = (
heat_demand_r["services space"] + heat_demand_r["residential space"]
) / heat_demand_r.T.groupby(level=[1]).sum().T
heat_demand["services space"] + heat_demand["residential space"]
) / heat_demand.T.groupby(level=[1]).sum().T
for name in n.loads[
n.loads.carrier.isin([x + " heat" for x in heat_systems])
@ -2061,7 +2084,7 @@ def add_heat(n, costs):
pop_layout.loc[node].fraction * floor_area.loc[ct, "value"] * 10**6
).loc[sec] * f
# total heat demand at node [MWh]
demand = n.loads_t.p_set[name].resample(hours[0]).mean()
demand = n.loads_t.p_set[name]
# space heat demand at node [MWh]
space_heat_demand = demand * w_space[sec][node]
@ -3061,6 +3084,8 @@ def add_industry(n, costs):
+ mwh_coal_per_mwh_coke * industrial_demand["coke"]
) / nhours
p_set.rename(lambda x: x + " coal for industry", inplace=True)
if not options["regional_coal_demand"]:
p_set = p_set.sum()
@ -3294,52 +3319,6 @@ def remove_h2_network(n):
n.stores.drop("EU H2 Store", inplace=True)
def maybe_adjust_costs_and_potentials(n, opts):
for o in opts:
flags = ["+e", "+p", "+m", "+c"]
if all(flag not in o for flag in flags):
continue
oo = o.split("+")
carrier_list = np.hstack(
(
n.generators.carrier.unique(),
n.links.carrier.unique(),
n.stores.carrier.unique(),
n.storage_units.carrier.unique(),
)
)
suptechs = map(lambda c: c.split("-", 2)[0], carrier_list)
if oo[0].startswith(tuple(suptechs)):
carrier = oo[0]
attr_lookup = {
"p": "p_nom_max",
"e": "e_nom_max",
"c": "capital_cost",
"m": "marginal_cost",
}
attr = attr_lookup[oo[1][0]]
factor = float(oo[1][1:])
# beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan
if carrier == "AC": # lines do not have carrier
n.lines[attr] *= factor
else:
if attr == "p_nom_max":
comps = {"Generator", "Link", "StorageUnit"}
elif attr == "e_nom_max":
comps = {"Store"}
else:
comps = {"Generator", "Link", "StorageUnit", "Store"}
for c in n.iterate_components(comps):
if carrier == "solar":
sel = c.df.carrier.str.contains(
carrier
) & ~c.df.carrier.str.contains("solar rooftop")
else:
sel = c.df.carrier.str.contains(carrier)
c.df.loc[sel, attr] *= factor
logger.info(f"changing {attr} for {carrier} by factor {factor}")
def limit_individual_line_extension(n, maxext):
logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW")
n.lines["s_nom_max"] = n.lines["s_nom"] + maxext
@ -3509,31 +3488,31 @@ def apply_time_segmentation(
return n
def set_temporal_aggregation(n, opts, solver_name):
def set_temporal_aggregation(n, resolution, solver_name):
"""
Aggregate network temporally.
"""
for o in opts:
# temporal averaging
m = re.match(r"^\d+h$", o, re.IGNORECASE)
if m is not None:
n = average_every_nhours(n, m.group(0))
break
# representative snapshots
m = re.match(r"(^\d+)sn$", o, re.IGNORECASE)
if m is not None:
sn = int(m[1])
logger.info(f"Use every {sn} snapshot as representative")
n.set_snapshots(n.snapshots[::sn])
n.snapshot_weightings *= sn
break
# segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
segments = int(m[1])
logger.info(f"Use temporal segmentation with {segments} segments")
n = apply_time_segmentation(n, segments, solver_name=solver_name)
break
if not resolution:
return n
# representative snapshots
if "sn" in resolution.lower():
sn = int(resolution[:-2])
logger.info("Use every %s snapshot as representative", sn)
n.set_snapshots(n.snapshots[::sn])
n.snapshot_weightings *= sn
# segments with package tsam
elif "seg" in resolution.lower():
segments = int(resolution[:-3])
logger.info("Use temporal segmentation with %s segments", segments)
n = apply_time_segmentation(n, segments, solver_name=solver_name)
# temporal averaging
elif "h" in resolution.lower():
logger.info("Aggregate to frequency %s", resolution)
n = average_every_nhours(n, resolution)
return n
@ -3602,14 +3581,12 @@ if __name__ == "__main__":
planning_horizons="2030",
)
logging.basicConfig(level=snakemake.config["logging"]["level"])
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
configure_logging(snakemake)
set_scenario_config(snakemake)
update_config_from_wildcards(snakemake.config, snakemake.wildcards)
options = snakemake.params.sector
opts = snakemake.wildcards.sector_opts.split("-")
investment_year = int(snakemake.wildcards.planning_horizons[-4:])
n = pypsa.Network(snakemake.input.network)
@ -3647,56 +3624,34 @@ if __name__ == "__main__":
add_storage_and_grids(n, costs)
# TODO merge with opts cost adjustment below
for o in opts:
if o[:4] == "dist":
options["electricity_distribution_grid"] = True
options["electricity_distribution_grid_cost_factor"] = float(
o[4:].replace("p", ".").replace("m", "-")
)
if o == "biomasstransport":
options["biomass_transport"] = True
if "nodistrict" in opts:
options["district_heating"]["progress"] = 0.0
if "nowasteheat" in opts:
logger.info("Disabling waste heat.")
options["use_fischer_tropsch_waste_heat"] = False
options["use_methanolisation_waste_heat"] = False
options["use_haber_bosch_waste_heat"] = False
options["use_methanation_waste_heat"] = False
options["use_fuel_cell_waste_heat"] = False
options["use_electrolysis_waste_heat"] = False
if "T" in opts:
if options["transport"]:
add_land_transport(n, costs)
if "H" in opts:
if options["heating"]:
add_heat(n, costs)
if "B" in opts:
if options["biomass"]:
add_biomass(n, costs)
if options["ammonia"]:
add_ammonia(n, costs)
if "I" in opts:
if options["industry"]:
add_industry(n, costs)
if "H" in opts:
if options["heating"]:
add_waste_heat(n)
if "A" in opts: # requires H and I
if options["agriculture"]: # requires H and I
add_agriculture(n, costs)
if options["dac"]:
add_dac(n, costs)
if "decentral" in opts:
if not options["electricity_transmission_grid"]:
decentral(n)
if "noH2network" in opts:
if not options["H2_network"]:
remove_h2_network(n)
if options["co2network"]:
@ -3706,51 +3661,39 @@ if __name__ == "__main__":
add_allam(n, costs)
solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation(n, opts, solver_name)
resolution = snakemake.params.time_resolution
n = set_temporal_aggregation(n, resolution, solver_name)
limit_type = "config"
limit = get(snakemake.params.co2_budget, investment_year)
for o in opts:
if "cb" not in o:
continue
limit_type = "carbon budget"
co2_budget = snakemake.params.co2_budget
if isinstance(co2_budget, str) and co2_budget.startswith("cb"):
fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv"
if not os.path.exists(fn):
emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year
input_co2 = snakemake.input.co2
build_carbon_budget(
o,
co2_budget,
snakemake.input.eurostat,
fn,
emissions_scope,
report_year,
input_co2,
options,
)
co2_cap = pd.read_csv(fn, index_col=0).squeeze()
limit = co2_cap.loc[investment_year]
break
for o in opts:
if "Co2L" not in o:
continue
limit_type = "wildcard"
limit = o[o.find("Co2L") + 4 :]
limit = float(limit.replace("p", ".").replace("m", "-"))
break
logger.info(f"Add CO2 limit from {limit_type}")
add_co2limit(n, nyears, limit)
else:
limit = get(co2_budget, investment_year)
add_co2limit(n, options, nyears, limit)
for o in opts:
if not o[:10] == "linemaxext":
continue
maxext = float(o[10:]) * 1e3
maxext = snakemake.params["lines"]["max_extension"]
if maxext is not None:
limit_individual_line_extension(n, maxext)
break
if options["electricity_distribution_grid"]:
insert_electricity_distribution_grid(n, costs)
maybe_adjust_costs_and_potentials(n, opts)
maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"])
if options["gas_distribution_grid"]:
insert_gas_distribution_costs(n, costs)

Some files were not shown because too many files have changed in this diff Show More