Merge remote-tracking branch 'upstream/master' into no-offwind-fix

This commit is contained in:
Koen van Greevenbroek 2024-02-20 14:41:24 +01:00
commit 87088904ee
111 changed files with 2549 additions and 1777 deletions

19
.gitignore vendored
View File

@ -20,9 +20,16 @@ gurobi.log
/notebooks /notebooks
/data /data
/cutouts /cutouts
/tmp
doc/_build doc/_build
/scripts/old
/scripts/create_scenarios.py
/config/create_scenarios.py
config/config.yaml
config/scenarios.yaml
config.yaml config.yaml
config/config.yaml config/config.yaml
@ -54,25 +61,15 @@ d1gam3xoknrgr2.cloudfront.net/
*.nc *.nc
*~ *~
/scripts/old
*.pyc *.pyc
/cutouts
/tmp
/pypsa
*.xlsx *.xlsx
config.yaml
doc/_build
*.xls *.xls
*.geojson *.geojson
*.ipynb *.ipynb
data/costs_*
merger-todos.md merger-todos.md

View File

@ -51,7 +51,7 @@ repos:
# Formatting with "black" coding style # Formatting with "black" coding style
- repo: https://github.com/psf/black-pre-commit-mirror - repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.1.1 rev: 24.2.0
hooks: hooks:
# Format Python files # Format Python files
- id: black - id: black

View File

@ -4,40 +4,50 @@
from os.path import normpath, exists from os.path import normpath, exists
from shutil import copyfile, move, rmtree from shutil import copyfile, move, rmtree
from pathlib import Path
import yaml
from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider
HTTP = HTTPRemoteProvider()
from snakemake.utils import min_version from snakemake.utils import min_version
min_version("7.7") from scripts._helpers import path_provider
conf_file = os.path.join(workflow.current_basedir, "config/config.yaml") min_version("7.7")
conf_default_file = os.path.join(workflow.current_basedir, "config/config.default.yaml") HTTP = HTTPRemoteProvider()
if not exists(conf_file) and exists(conf_default_file):
copyfile(conf_default_file, conf_file) default_files = {
"config/config.default.yaml": "config/config.yaml",
"config/scenarios.template.yaml": "config/scenarios.yaml",
}
for template, target in default_files.items():
target = os.path.join(workflow.current_basedir, target)
template = os.path.join(workflow.current_basedir, template)
if not exists(target) and exists(template):
copyfile(template, target)
configfile: "config/config.default.yaml" configfile: "config/config.default.yaml"
configfile: "config/config.yaml" configfile: "config/config.yaml"
COSTS = f"data/costs_{config['costs']['year']}.csv" run = config["run"]
ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4) scenarios = run.get("scenarios", {})
if run["name"] and scenarios.get("enable"):
run = config.get("run", {}) fn = Path(scenarios["file"])
RDIR = run["name"] + "/" if run.get("name") else "" scenarios = yaml.safe_load(fn.read_text())
CDIR = RDIR if not run.get("shared_cutouts") else "" RDIR = "{run}/"
if run["name"] == "all":
LOGS = "logs/" + RDIR config["run"]["name"] = list(scenarios.keys())
BENCHMARKS = "benchmarks/" + RDIR elif run["name"]:
if not (shared_resources := run.get("shared_resources")): RDIR = run["name"] + "/"
RESOURCES = "resources/" + RDIR
elif isinstance(shared_resources, str):
RESOURCES = "resources/" + shared_resources + "/"
else: else:
RESOURCES = "resources/" RDIR = ""
logs = path_provider("logs/", RDIR, run["shared_resources"])
benchmarks = path_provider("benchmarks/", RDIR, run["shared_resources"])
resources = path_provider("resources/", RDIR, run["shared_resources"])
CDIR = "" if run["shared_cutouts"] else RDIR
RESULTS = "results/" + RDIR RESULTS = "results/" + RDIR
@ -80,10 +90,19 @@ if config["foresight"] == "perfect":
rule all: rule all:
input: input:
RESULTS + "graphs/costs.pdf", expand(RESULTS + "graphs/costs.pdf", run=config["run"]["name"]),
default_target: True default_target: True
rule create_scenarios:
output:
config["run"]["scenarios"]["file"],
conda:
"envs/retrieve.yaml"
script:
"config/create_scenarios.py"
rule purge: rule purge:
run: run:
import builtins import builtins
@ -104,9 +123,9 @@ rule dag:
message: message:
"Creating DAG of workflow." "Creating DAG of workflow."
output: output:
dot=RESOURCES + "dag.dot", dot=resources("dag.dot"),
pdf=RESOURCES + "dag.pdf", pdf=resources("dag.pdf"),
png=RESOURCES + "dag.png", png=resources("dag.png"),
conda: conda:
"envs/environment.yaml" "envs/environment.yaml"
shell: shell:

View File

@ -21,6 +21,9 @@ remote:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
run: run:
name: "" name: ""
scenarios:
enable: false
file: config/scenarios.yaml
disable_progressbar: false disable_progressbar: false
shared_resources: false shared_resources: false
shared_cutouts: true shared_cutouts: true
@ -59,9 +62,6 @@ snapshots:
start: "2013-01-01" start: "2013-01-01"
end: "2014-01-01" end: "2014-01-01"
inclusive: 'left' inclusive: 'left'
resolution: false
segmentation: false
#representative: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#enable # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#enable
enable: enable:
@ -366,6 +366,11 @@ existing_capacities:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#sector # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#sector
sector: sector:
transport: true
heating: true
biomass: true
industry: true
agriculture: true
district_heating: district_heating:
potential: 0.6 potential: 0.6
progress: progress:
@ -531,6 +536,7 @@ sector:
use_methanation_waste_heat: true use_methanation_waste_heat: true
use_fuel_cell_waste_heat: true use_fuel_cell_waste_heat: true
use_electrolysis_waste_heat: true use_electrolysis_waste_heat: true
electricity_transmission_grid: true
electricity_distribution_grid: true electricity_distribution_grid: true
electricity_distribution_grid_cost_factor: 1.0 electricity_distribution_grid_cost_factor: 1.0
electricity_grid_connection: true electricity_grid_connection: true
@ -712,6 +718,14 @@ clustering:
committable: any committable: any
ramp_limit_up: max ramp_limit_up: max
ramp_limit_down: max ramp_limit_down: max
temporal:
resolution_elec: false
resolution_sector: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#adjustments
adjustments:
electricity: false
sector: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving
solving: solving:

View File

@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# This script helps to generate a scenarios.yaml file for PyPSA-Eur.
# You can modify the template to your needs and define all possible combinations of config values that should be considered.
if "snakemake" in globals():
filename = snakemake.output[0]
else:
filename = "../config/scenarios.yaml"
import itertools
# Insert your config values that should be altered in the template.
# Change `config_section` and `config_section2` to the actual config sections.
template = """
scenario{scenario_number}:
config_section:
config_key: {config_value}
config_section2:
config_key2: {config_value2}
"""
# Define all possible combinations of config values.
# This must define all config values that are used in the template.
config_values = dict(config_value=["true", "false"], config_value2=[1, 2, 3, 4])
combinations = [
dict(zip(config_values.keys(), values))
for values in itertools.product(*config_values.values())
]
with open(filename, "w") as f:
for i, config in enumerate(combinations):
f.write(template.format(scenario_number=i, **config))

View File

@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# This file is used to define the scenarios that are run by snakemake. Each entry on the first level is a scenario. Each scenario can contain configuration overrides with respect to the config/config.yaml settings.
#
# Example
#
# custom-scenario: # name of the scenario
# electricity:
# renewable_carriers: [wind, solar] # override the list of renewable carriers
normal:
electricity:
renewable_carriers:
- solar
- onwind
- offwind-ac
- offwind-dc
- hydro
no-offwind:
electricity:
renewable_carriers:
- solar
- onwind
- hydro

View File

@ -0,0 +1,60 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
tutorial: true
run:
name:
- test-elec-no-offshore-wind
- test-elec-no-onshore-wind
scenarios:
enable: true
file: "config/test/scenarios.yaml"
disable_progressbar: true
shared_resources: base
shared_cutouts: true
scenario:
clusters:
- 5
opts:
- Co2L-24H
countries: ['BE']
snapshots:
start: "2013-03-01"
end: "2013-03-08"
electricity:
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery, H2]
Store: []
atlite:
default_cutout: be-03-2013-era5
cutouts:
be-03-2013-era5:
module: era5
x: [4., 15.]
y: [46., 56.]
time: ["2013-03-01", "2013-03-08"]
renewable:
onwind:
cutout: be-03-2013-era5
offwind-ac:
cutout: be-03-2013-era5
max_depth: false
offwind-dc:
cutout: be-03-2013-era5
max_depth: false
solar:
cutout: be-03-2013-era5
solving:
solver:
name: glpk
options: "glpk-default"

View File

@ -0,0 +1,11 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
test-elec-no-offshore-wind:
electricity:
renewable_carriers: [solar, onwind]
test-elec-no-onshore-wind:
electricity:
renewable_carriers: [solar, offwind-ac, offwind-dc]

View File

@ -0,0 +1,8 @@
,Unit,Values,Description
adjustments,,,
-- electricity,bool or dict,,"Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_network.`"
-- -- {attr},,,"Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``"
-- -- -- {carrier},float,per-unit,"Any carrier of the network to which parameter adjustment factor should be applied."
-- sector,bool or dict,,"Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_sector_network.`"
-- -- {attr},,,"Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``"
-- -- -- {carrier},float,per-unit,"Any carrier of the network to which parameter adjustment factor should be applied."
1 Unit Values Description
2 adjustments
3 -- electricity bool or dict Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_network.`
4 -- -- {attr} Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``
5 -- -- -- {carrier} float per-unit Any carrier of the network to which parameter adjustment factor should be applied.
6 -- sector bool or dict Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_sector_network.`
7 -- -- {attr} Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``
8 -- -- -- {carrier} float per-unit Any carrier of the network to which parameter adjustment factor should be applied.

View File

@ -17,3 +17,6 @@ aggregation_strategies,,,
-- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator." -- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
-- buses,,, -- buses,,,
-- -- {key},str,"{key} can be any of the component of the bus (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus." -- -- {key},str,"{key} can be any of the component of the bus (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."
temporal,,,Options for temporal resolution
-- resolution_elec,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks."
-- resolution_sector,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_sector_network`."

1 Unit Values Description
17 -- -- {key} str {key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator.
18 -- buses
19 -- -- {key} str {key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus.
20 temporal Options for temporal resolution
21 -- resolution_elec -- {false,``nH``; i.e. ``2H``-``6H``} Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks.
22 -- resolution_sector -- {false,``nH``; i.e. ``2H``-``6H``} Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_sector_network`.

View File

@ -1,5 +1,8 @@
,Unit,Values,Description ,Unit,Values,Description
name,--,"any string","Specify a name for your run. Results will be stored under this name." name,--,str/list,"Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run."
disable_progrssbar,bool,"{true, false}","Switch to select whether progressbar should be disabled." scenarios,,,
shared_resources,bool,"{true, false}","Switch to select whether resources should be shared across runs." -- enable,bool,"{true, false}","Switch to select whether workflow should generate scenarios based on ``file``."
-- file,str,,"Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory."
disable_progressbar,bool,"{true, false}","Switch to select whether progressbar should be disabled."
shared_resources,bool/str,,"Switch to select whether resources should be shared across runs. If a string is passed, this is used as a subdirectory name for shared resources. If set to 'base', only resources before creating the elec.nc file are shared."
shared_cutouts,bool,"{true, false}","Switch to select whether cutouts should be shared across runs." shared_cutouts,bool,"{true, false}","Switch to select whether cutouts should be shared across runs."

1 Unit Values Description
2 name -- any string str/list Specify a name for your run. Results will be stored under this name. Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run.
3 disable_progrssbar scenarios bool {true, false} Switch to select whether progressbar should be disabled.
4 shared_resources -- enable bool {true, false} Switch to select whether resources should be shared across runs. Switch to select whether workflow should generate scenarios based on ``file``.
5 -- file str Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory.
6 disable_progressbar bool {true, false} Switch to select whether progressbar should be disabled.
7 shared_resources bool/str Switch to select whether resources should be shared across runs. If a string is passed, this is used as a subdirectory name for shared resources. If set to 'base', only resources before creating the elec.nc file are shared.
8 shared_cutouts bool {true, false} Switch to select whether cutouts should be shared across runs.

View File

@ -7,5 +7,5 @@ Trigger, Description, Definition, Status
``B``,Add biomass,,In active use ``B``,Add biomass,,In active use
``I``,Add industry sector,,In active use ``I``,Add industry sector,,In active use
``A``,Add agriculture sector,,In active use ``A``,Add agriculture sector,,In active use
``dist``+``n``,Add distribution grid with investment costs of ``n`` times costs in ``data/costs_{cost_year}.csv``,,In active use ``dist``+``n``,Add distribution grid with investment costs of ``n`` times costs in ``resources/costs_{cost_year}.csv``,,In active use
``seq``+``n``,Sets the CO2 sequestration potential to ``n`` Mt CO2 per year,,In active use ``seq``+``n``,Sets the CO2 sequestration potential to ``n`` Mt CO2 per year,,In active use

1 Trigger Description Definition Status
7 ``B`` Add biomass In active use
8 ``I`` Add industry sector In active use
9 ``A`` Add agriculture sector In active use
10 ``dist``+``n`` Add distribution grid with investment costs of ``n`` times costs in ``data/costs_{cost_year}.csv`` Add distribution grid with investment costs of ``n`` times costs in ``resources/costs_{cost_year}.csv`` In active use
11 ``seq``+``n`` Sets the CO2 sequestration potential to ``n`` Mt CO2 per year In active use

View File

@ -1,4 +1,9 @@
,Unit,Values,Description ,Unit,Values,Description
transport,--,"{true, false}",Flag to include transport sector.
heating,--,"{true, false}",Flag to include heating sector.
biomass,--,"{true, false}",Flag to include biomass sector.
industry,--,"{true, false}",Flag to include industry sector.
agriculture,--,"{true, false}",Flag to include agriculture sector.
district_heating,--,,`prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_ district_heating,--,,`prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_
-- potential,--,float,maximum fraction of urban demand which can be supplied by district heating -- potential,--,float,maximum fraction of urban demand which can be supplied by district heating
-- progress,--,Dictionary with planning horizons as keys., Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating -- progress,--,Dictionary with planning horizons as keys., Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating
@ -109,6 +114,7 @@ min_part_load _methanolisation,per unit of p_nom ,float,The minimum unit dispatc
use_fischer_tropsch _waste_heat,--,"{true, false}",Add option for using waste heat of Fischer Tropsch in district heating networks use_fischer_tropsch _waste_heat,--,"{true, false}",Add option for using waste heat of Fischer Tropsch in district heating networks
use_fuel_cell_waste_heat,--,"{true, false}",Add option for using waste heat of fuel cells in district heating networks use_fuel_cell_waste_heat,--,"{true, false}",Add option for using waste heat of fuel cells in district heating networks
use_electrolysis_waste _heat,--,"{true, false}",Add option for using waste heat of electrolysis in district heating networks use_electrolysis_waste _heat,--,"{true, false}",Add option for using waste heat of electrolysis in district heating networks
electricity_transmission _grid,--,"{true, false}",Switch for enabling/disabling the electricity transmission grid.
electricity_distribution _grid,--,"{true, false}",Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link. electricity_distribution _grid,--,"{true, false}",Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link.
electricity_distribution _grid_cost_factor,,,Multiplies the investment cost of the electricity distribution grid electricity_distribution _grid_cost_factor,,,Multiplies the investment cost of the electricity distribution grid
,,, ,,,

1 Unit Values Description
2 transport -- {true, false} Flag to include transport sector.
3 heating -- {true, false} Flag to include heating sector.
4 biomass -- {true, false} Flag to include biomass sector.
5 industry -- {true, false} Flag to include industry sector.
6 agriculture -- {true, false} Flag to include agriculture sector.
7 district_heating -- `prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_
8 -- potential -- float maximum fraction of urban demand which can be supplied by district heating
9 -- progress -- Dictionary with planning horizons as keys. Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating
114 use_fischer_tropsch _waste_heat -- {true, false} Add option for using waste heat of Fischer Tropsch in district heating networks
115 use_fuel_cell_waste_heat -- {true, false} Add option for using waste heat of fuel cells in district heating networks
116 use_electrolysis_waste _heat -- {true, false} Add option for using waste heat of electrolysis in district heating networks
117 electricity_transmission _grid -- {true, false} Switch for enabling/disabling the electricity transmission grid.
118 electricity_distribution _grid -- {true, false} Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link.
119 electricity_distribution _grid_cost_factor Multiplies the investment cost of the electricity distribution grid
120

View File

@ -2,5 +2,3 @@
start,--,str or datetime-like; e.g. YYYY-MM-DD,Left bound of date range start,--,str or datetime-like; e.g. YYYY-MM-DD,Left bound of date range
end,--,str or datetime-like; e.g. YYYY-MM-DD,Right bound of date range end,--,str or datetime-like; e.g. YYYY-MM-DD,Right bound of date range
inclusive,--,"One of {'neither', 'both', left, right}","Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``." inclusive,--,"One of {'neither', 'both', left, right}","Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``."
resolution ,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks."
segmentation,--,"{false,``n``; e.g. ``4380``}","Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks."

1 Unit Values Description
2 start -- str or datetime-like; e.g. YYYY-MM-DD Left bound of date range
3 end -- str or datetime-like; e.g. YYYY-MM-DD Right bound of date range
4 inclusive -- One of {'neither', 'both', ‘left’, ‘right’} Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``.
resolution -- {false,``nH``; i.e. ``2H``-``6H``} Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks.
segmentation -- {false,``n``; e.g. ``4380``} Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks.

View File

@ -561,6 +561,21 @@ The list of available biomass is given by the category in `ENSPRESO_BIOMASS <htt
use ``min`` in ``p_nom_max:`` for more ` use ``min`` in ``p_nom_max:`` for more `
conservative assumptions. conservative assumptions.
.. _adjustments_cf:
``adjustments``
=============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: adjustments:
:end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/adjustments.csv
.. _solving_cf: .. _solving_cf:
``solving`` ``solving``

View File

@ -9,7 +9,7 @@ Techno-Economic Assumptions
The database of cost assumptions is retrieved from the repository The database of cost assumptions is retrieved from the repository
`PyPSA/technology-data <https://github.com/pypsa/technology-data>`_ and then `PyPSA/technology-data <https://github.com/pypsa/technology-data>`_ and then
saved to a file ``data/costs_{year}.csv``. The ``config/config.yaml`` provides options saved to a file ``resources/costs_{year}.csv``. The ``config/config.yaml`` provides options
to choose a reference year and use a specific version of the repository. to choose a reference year and use a specific version of the repository.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
@ -50,7 +50,7 @@ Modifying Assumptions
Some cost assumptions (e.g. marginal cost and capital cost) can be directly Some cost assumptions (e.g. marginal cost and capital cost) can be directly
set in the ``config/config.yaml`` (cf. Section :ref:`costs_cf` in set in the ``config/config.yaml`` (cf. Section :ref:`costs_cf` in
:ref:`config`). To change cost assumptions in more detail, make a copy of :ref:`config`). To change cost assumptions in more detail, make a copy of
``data/costs_{year}.csv`` and reference the new cost file in the ``Snakefile``: ``resources/costs_{year}.csv`` and reference the new cost file in the ``Snakefile``:
.. literalinclude:: ../Snakefile .. literalinclude:: ../Snakefile
:start-at: COSTS :start-at: COSTS

View File

@ -31,7 +31,7 @@ Install Python Dependencies
PyPSA-Eur relies on a set of other Python packages to function. PyPSA-Eur relies on a set of other Python packages to function.
We recommend using the package manager `mamba <https://mamba.readthedocs.io/en/latest/>`_ to install them and manage your environments. We recommend using the package manager `mamba <https://mamba.readthedocs.io/en/latest/>`_ to install them and manage your environments.
For instructions for your operating system follow the ``mamba`` `installation guide <https://mamba.readthedocs.io/en/latest/installation.html>`_. For instructions for your operating system follow the ``mamba`` `installation guide <https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html>`_.
You can also use ``conda`` equivalently. You can also use ``conda`` equivalently.
The package requirements are curated in the `envs/environment.yaml <https://github.com/PyPSA/pypsa-eur/blob/master/envs/environment.yaml>`_ file. The package requirements are curated in the `envs/environment.yaml <https://github.com/PyPSA/pypsa-eur/blob/master/envs/environment.yaml>`_ file.

View File

@ -7,9 +7,77 @@
Release Notes Release Notes
########################################## ##########################################
.. Upcoming Release Upcoming Release
.. ================ ================
..
* Linearly interpolate missing investment periods in year-dependent
configuration options.
* Added new scenario management that supports the simultaneous execution of
multiple scenarios with a single ``snakemake`` call. For this purpose, a
``scenarios.yaml`` file is introduced which contains customizable scenario
names with configuration overrides. To enable it, set the ``run: scenarios:
true`` and define the list of scenario names to run under ``run: name:`` in
the configuration file. The latter must be a subset of toplevel keys in the
scenario file.
- To get started, a scenarios template file ``config/scenarios.template.yaml``
is included in the repository, which is copied to ``config/scenarios.yaml``
on first use.
- The scenario file can be changed via ``run: scenarios: file:``.
- If scenario management is activated with ``run: scenarios: enable: true``, a
new wildcard ``{run}`` is introduced. This means that the configuration
settings may depend on the new ``{run}`` wildcard. Therefore, a new
``config_provider()`` function is used in the ``Snakefile`` and ``.smk``
files, which takes wildcard values into account. The calls to the ``config``
object have been reduced in ``.smk`` files since there is no awareness of
wildcard values outside rule definitions.
- The scenario files can also be programmatically created using the template
script ``config/create_scenarios.py``. This script can be run with
``snakemake -j1 create_scenarios`` and creates the scenarios file referenced
under ``run: scenarios: file:``.
- The setting ``run: name: all`` will run all scenarios in
``config/scenarios.yaml``. Otherwise, it will run those passed as list in
``run: name:`` as long as ``run: scenarios: enable: true``.
- The setting ``run: shared_resources:`` indicates via a boolean whether the
resources should be encapsulated by the ``run: name:``. The special setting
``run: shared_resources: base`` shares resources until ``add_electricity``
that do not contain wildcards other than ``{"technology", "year",
"scope"}``.
- Added new configuration options for all ``{opts}`` and ``{sector_opts}``
wildcard values to create a unique configuration file (``config.yaml``) per
PyPSA network file. This is done with the help of a new function
``update_config_from_wildcards()`` which parses configuration settings from
wildcards and updates the ``snakemake.config`` object. These updated
configuration settings are used in the scripts rather than directly parsed
values from ``snakemake.wildcards``.
- The cost data was moved from ``data/costs_{year}.csv`` to
``resources/costs_{year}.csv`` since it depends on configuration settings.
The ``retrieve_cost_data`` rule was changed to calling a Python script.
- Moved time clustering settings to ``clustering: temporal:`` from
``snapshots:`` so that the latter is only used to define the
``pandas.DatetimeIndex`` which simplifies the scenario management.
- Collection rules get a new wildcard ``run=config["run"]["name"]`` so they
can collect outputs across different scenarios.
- **Warning:** One caveat remains for the scenario management with myopic or
perfect foresight pathway optimisation. The first investment period must be
shared across all scenarios. The reason is that the ``wildcard_constraints``
defined for the rule ``add_existing_baseyear`` do not accept wildcard-aware
input functions (cf.
`https://github.com/snakemake/snakemake/issues/2703`_).
* The outputs of the rule ``retrieve_gas_infrastructure_data`` no longer
marked as ``protected()`` as the download size is small.
PyPSA-Eur 0.10.0 (19th February 2024) PyPSA-Eur 0.10.0 (19th February 2024)
===================================== =====================================

View File

@ -91,7 +91,7 @@ None.
**Outputs** **Outputs**
- ``resources/electricity_demand.csv`` - ``data/electricity_demand_raw.csv``
Rule ``retrieve_cost_data`` Rule ``retrieve_cost_data``

View File

@ -8,7 +8,7 @@ if config["enable"].get("prepare_links_p_nom", False):
output: output:
"data/links_p_nom.csv", "data/links_p_nom.csv",
log: log:
LOGS + "prepare_links_p_nom.log", logs("prepare_links_p_nom.log"),
threads: 1 threads: 1
resources: resources:
mem_mb=1500, mem_mb=1500,
@ -20,15 +20,15 @@ if config["enable"].get("prepare_links_p_nom", False):
rule build_electricity_demand: rule build_electricity_demand:
params: params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
countries=config["countries"], countries=config_provider("countries"),
load=config["load"], load=config_provider("load"),
input: input:
ancient("data/electricity_demand_raw.csv"), ancient("data/electricity_demand_raw.csv"),
output: output:
RESOURCES + "electricity_demand.csv", resources("electricity_demand.csv"),
log: log:
LOGS + "build_electricity_demand.log", logs("build_electricity_demand.log"),
resources: resources:
mem_mb=5000, mem_mb=5000,
conda: conda:
@ -39,17 +39,17 @@ rule build_electricity_demand:
rule build_powerplants: rule build_powerplants:
params: params:
powerplants_filter=config["electricity"]["powerplants_filter"], powerplants_filter=config_provider("electricity", "powerplants_filter"),
custom_powerplants=config["electricity"]["custom_powerplants"], custom_powerplants=config_provider("electricity", "custom_powerplants"),
everywhere_powerplants=config["electricity"]["everywhere_powerplants"], everywhere_powerplants=config_provider("electricity", "everywhere_powerplants"),
countries=config["countries"], countries=config_provider("countries"),
input: input:
base_network=RESOURCES + "networks/base.nc", base_network=resources("networks/base.nc"),
custom_powerplants="data/custom_powerplants.csv", custom_powerplants="data/custom_powerplants.csv",
output: output:
RESOURCES + "powerplants.csv", resources("powerplants.csv"),
log: log:
LOGS + "build_powerplants.log", logs("build_powerplants.log"),
threads: 1 threads: 1
resources: resources:
mem_mb=5000, mem_mb=5000,
@ -61,11 +61,11 @@ rule build_powerplants:
rule base_network: rule base_network:
params: params:
countries=config["countries"], countries=config_provider("countries"),
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
lines=config["lines"], lines=config_provider("lines"),
links=config["links"], links=config_provider("links"),
transformers=config["transformers"], transformers=config_provider("transformers"),
input: input:
eg_buses="data/entsoegridkit/buses.csv", eg_buses="data/entsoegridkit/buses.csv",
eg_lines="data/entsoegridkit/lines.csv", eg_lines="data/entsoegridkit/lines.csv",
@ -75,15 +75,15 @@ rule base_network:
parameter_corrections="data/parameter_corrections.yaml", parameter_corrections="data/parameter_corrections.yaml",
links_p_nom="data/links_p_nom.csv", links_p_nom="data/links_p_nom.csv",
links_tyndp="data/links_tyndp.csv", links_tyndp="data/links_tyndp.csv",
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=resources("country_shapes.geojson"),
offshore_shapes=RESOURCES + "offshore_shapes.geojson", offshore_shapes=resources("offshore_shapes.geojson"),
europe_shape=RESOURCES + "europe_shape.geojson", europe_shape=resources("europe_shape.geojson"),
output: output:
RESOURCES + "networks/base.nc", resources("networks/base.nc"),
log: log:
LOGS + "base_network.log", logs("base_network.log"),
benchmark: benchmark:
BENCHMARKS + "base_network" benchmarks("base_network")
threads: 1 threads: 1
resources: resources:
mem_mb=1500, mem_mb=1500,
@ -95,7 +95,7 @@ rule base_network:
rule build_shapes: rule build_shapes:
params: params:
countries=config["countries"], countries=config_provider("countries"),
input: input:
naturalearth=ancient("data/bundle/naturalearth/ne_10m_admin_0_countries.shp"), naturalearth=ancient("data/bundle/naturalearth/ne_10m_admin_0_countries.shp"),
eez=ancient("data/bundle/eez/World_EEZ_v8_2014.shp"), eez=ancient("data/bundle/eez/World_EEZ_v8_2014.shp"),
@ -105,12 +105,12 @@ rule build_shapes:
ch_cantons=ancient("data/bundle/ch_cantons.csv"), ch_cantons=ancient("data/bundle/ch_cantons.csv"),
ch_popgdp=ancient("data/bundle/je-e-21.03.02.xls"), ch_popgdp=ancient("data/bundle/je-e-21.03.02.xls"),
output: output:
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=resources("country_shapes.geojson"),
offshore_shapes=RESOURCES + "offshore_shapes.geojson", offshore_shapes=resources("offshore_shapes.geojson"),
europe_shape=RESOURCES + "europe_shape.geojson", europe_shape=resources("europe_shape.geojson"),
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", nuts3_shapes=resources("nuts3_shapes.geojson"),
log: log:
LOGS + "build_shapes.log", logs("build_shapes.log"),
threads: 1 threads: 1
resources: resources:
mem_mb=1500, mem_mb=1500,
@ -122,16 +122,16 @@ rule build_shapes:
rule build_bus_regions: rule build_bus_regions:
params: params:
countries=config["countries"], countries=config_provider("countries"),
input: input:
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=resources("country_shapes.geojson"),
offshore_shapes=RESOURCES + "offshore_shapes.geojson", offshore_shapes=resources("offshore_shapes.geojson"),
base_network=RESOURCES + "networks/base.nc", base_network=resources("networks/base.nc"),
output: output:
regions_onshore=RESOURCES + "regions_onshore.geojson", regions_onshore=resources("regions_onshore.geojson"),
regions_offshore=RESOURCES + "regions_offshore.geojson", regions_offshore=resources("regions_offshore.geojson"),
log: log:
LOGS + "build_bus_regions.log", logs("build_bus_regions.log"),
threads: 1 threads: 1
resources: resources:
mem_mb=1000, mem_mb=1000,
@ -145,20 +145,20 @@ if config["enable"].get("build_cutout", False):
rule build_cutout: rule build_cutout:
params: params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
cutouts=config["atlite"]["cutouts"], cutouts=config_provider("atlite", "cutouts"),
input: input:
regions_onshore=RESOURCES + "regions_onshore.geojson", regions_onshore=resources("regions_onshore.geojson"),
regions_offshore=RESOURCES + "regions_offshore.geojson", regions_offshore=resources("regions_offshore.geojson"),
output: output:
protected("cutouts/" + CDIR + "{cutout}.nc"), protected("cutouts/" + CDIR + "{cutout}.nc"),
log: log:
"logs/" + CDIR + "build_cutout/{cutout}.log", logs(CDIR + "build_cutout/{cutout}.log"),
benchmark: benchmark:
"benchmarks/" + CDIR + "build_cutout_{cutout}" "benchmarks/" + CDIR + "build_cutout_{cutout}"
threads: ATLITE_NPROCESSES threads: config["atlite"].get("nprocesses", 4)
resources: resources:
mem_mb=ATLITE_NPROCESSES * 1000, mem_mb=config["atlite"].get("nprocesses", 4) * 1000,
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -170,13 +170,15 @@ if config["enable"].get("build_natura_raster", False):
rule build_natura_raster: rule build_natura_raster:
input: input:
natura=ancient("data/bundle/natura/Natura2000_end2015.shp"), natura=ancient("data/bundle/natura/Natura2000_end2015.shp"),
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]), cutouts=lambda w: expand(
"cutouts/" + CDIR + "{cutouts}.nc", **config_provider("atlite")(w)
),
output: output:
RESOURCES + "natura.tiff", resources("natura.tiff"),
resources: resources:
mem_mb=5000, mem_mb=5000,
log: log:
LOGS + "build_natura_raster.log", logs("build_natura_raster.log"),
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -186,21 +188,21 @@ if config["enable"].get("build_natura_raster", False):
rule build_ship_raster: rule build_ship_raster:
input: input:
ship_density="data/shipdensity_global.zip", ship_density="data/shipdensity_global.zip",
cutouts=expand( cutouts=lambda w: expand(
"cutouts/" + CDIR + "{cutout}.nc", "cutouts/" + CDIR + "{cutout}.nc",
cutout=[ cutout=[
config["renewable"][k]["cutout"] config_provider("renewable", k, "cutout")(w)
for k in config["electricity"]["renewable_carriers"] for k in config_provider("electricity", "renewable_carriers")(w)
], ],
), ),
output: output:
RESOURCES + "shipdensity_raster.tif", resources("shipdensity_raster.tif"),
log: log:
LOGS + "build_ship_raster.log", logs("build_ship_raster.log"),
resources: resources:
mem_mb=5000, mem_mb=5000,
benchmark: benchmark:
BENCHMARKS + "build_ship_raster" benchmarks("build_ship_raster")
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -214,33 +216,33 @@ rule determine_availability_matrix_MD_UA:
wdpa_marine="data/WDPA_WDOECM_marine.gpkg", wdpa_marine="data/WDPA_WDOECM_marine.gpkg",
gebco=lambda w: ( gebco=lambda w: (
"data/bundle/GEBCO_2014_2D.nc" "data/bundle/GEBCO_2014_2D.nc"
if "max_depth" in config["renewable"][w.technology].keys() if config_provider("renewable", w.technology)(w).get("max_depth")
else [] else []
), ),
ship_density=lambda w: ( ship_density=lambda w: (
RESOURCES + "shipdensity_raster.tif" resources("shipdensity_raster.tif")
if "ship_threshold" in config["renewable"][w.technology].keys() if "ship_threshold" in config_provider("renewable", w.technology)(w).keys()
else [] else []
), ),
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=resources("country_shapes.geojson"),
offshore_shapes=RESOURCES + "offshore_shapes.geojson", offshore_shapes=resources("offshore_shapes.geojson"),
regions=lambda w: ( regions=lambda w: (
RESOURCES + "regions_onshore.geojson" resources("regions_onshore.geojson")
if w.technology in ("onwind", "solar") if w.technology in ("onwind", "solar")
else RESOURCES + "regions_offshore.geojson" else resources("regions_offshore.geojson")
), ),
cutout=lambda w: "cutouts/" cutout=lambda w: "cutouts/"
+ CDIR + CDIR
+ config["renewable"][w.technology]["cutout"] + config_provider("renewable", w.technology, "cutout")(w)
+ ".nc", + ".nc",
output: output:
availability_matrix=RESOURCES + "availability_matrix_MD-UA_{technology}.nc", availability_matrix=resources("availability_matrix_MD-UA_{technology}.nc"),
availability_map=RESOURCES + "availability_matrix_MD-UA_{technology}.png", availability_map=resources("availability_matrix_MD-UA_{technology}.png"),
log: log:
LOGS + "determine_availability_matrix_MD_UA_{technology}.log", logs("determine_availability_matrix_MD_UA_{technology}.log"),
threads: ATLITE_NPROCESSES threads: config["atlite"].get("nprocesses", 4)
resources: resources:
mem_mb=ATLITE_NPROCESSES * 5000, mem_mb=config["atlite"].get("nprocesses", 4) * 5000,
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -248,65 +250,67 @@ rule determine_availability_matrix_MD_UA:
# Optional input when having Ukraine (UA) or Moldova (MD) in the countries list # Optional input when having Ukraine (UA) or Moldova (MD) in the countries list
if {"UA", "MD"}.intersection(set(config["countries"])): def input_ua_md_availability_matrix(w):
opt = { countries = set(config_provider("countries")(w))
"availability_matrix_MD_UA": RESOURCES if {"UA", "MD"}.intersection(countries):
+ "availability_matrix_MD-UA_{technology}.nc" return {
"availability_matrix_MD_UA": resources(
"availability_matrix_MD-UA_{technology}.nc"
)
} }
else: return {}
opt = {}
rule build_renewable_profiles: rule build_renewable_profiles:
params: params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
renewable=config["renewable"], renewable=config_provider("renewable"),
input: input:
**opt, unpack(input_ua_md_availability_matrix),
base_network=RESOURCES + "networks/base.nc", base_network=resources("networks/base.nc"),
corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"), corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"),
natura=lambda w: ( natura=lambda w: (
RESOURCES + "natura.tiff" resources("natura.tiff")
if config["renewable"][w.technology]["natura"] if config_provider("renewable", w.technology, "natura")(w)
else [] else []
), ),
luisa=lambda w: ( luisa=lambda w: (
"data/LUISA_basemap_020321_50m.tif" "data/LUISA_basemap_020321_50m.tif"
if config["renewable"][w.technology].get("luisa") if config_provider("renewable", w.technology, "luisa")(w)
else [] else []
), ),
gebco=ancient( gebco=ancient(
lambda w: ( lambda w: (
"data/bundle/GEBCO_2014_2D.nc" "data/bundle/GEBCO_2014_2D.nc"
if config["renewable"][w.technology].get("max_depth") if config_provider("renewable", w.technology)(w).get("max_depth")
else [] else []
) )
), ),
ship_density=lambda w: ( ship_density=lambda w: (
RESOURCES + "shipdensity_raster.tif" resources("shipdensity_raster.tif")
if config["renewable"][w.technology].get("ship_threshold", False) if "ship_threshold" in config_provider("renewable", w.technology)(w).keys()
else [] else []
), ),
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=resources("country_shapes.geojson"),
offshore_shapes=RESOURCES + "offshore_shapes.geojson", offshore_shapes=resources("offshore_shapes.geojson"),
regions=lambda w: ( regions=lambda w: (
RESOURCES + "regions_onshore.geojson" resources("regions_onshore.geojson")
if w.technology in ("onwind", "solar") if w.technology in ("onwind", "solar")
else RESOURCES + "regions_offshore.geojson" else resources("regions_offshore.geojson")
), ),
cutout=lambda w: "cutouts/" cutout=lambda w: "cutouts/"
+ CDIR + CDIR
+ config["renewable"][w.technology]["cutout"] + config_provider("renewable", w.technology, "cutout")(w)
+ ".nc", + ".nc",
output: output:
profile=RESOURCES + "profile_{technology}.nc", profile=resources("profile_{technology}.nc"),
log: log:
LOGS + "build_renewable_profile_{technology}.log", logs("build_renewable_profile_{technology}.log"),
benchmark: benchmark:
BENCHMARKS + "build_renewable_profiles_{technology}" benchmarks("build_renewable_profiles_{technology}")
threads: ATLITE_NPROCESSES threads: config["atlite"].get("nprocesses", 4)
resources: resources:
mem_mb=ATLITE_NPROCESSES * 5000, mem_mb=config["atlite"].get("nprocesses", 4) * 5000,
wildcard_constraints: wildcard_constraints:
technology="(?!hydro).*", # Any technology other than hydro technology="(?!hydro).*", # Any technology other than hydro
conda: conda:
@ -320,10 +324,10 @@ rule build_monthly_prices:
co2_price_raw="data/validation/emission-spot-primary-market-auction-report-2019-data.xls", co2_price_raw="data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
fuel_price_raw="data/validation/energy-price-trends-xlsx-5619002.xlsx", fuel_price_raw="data/validation/energy-price-trends-xlsx-5619002.xlsx",
output: output:
co2_price=RESOURCES + "co2_price.csv", co2_price=resources("co2_price.csv"),
fuel_price=RESOURCES + "monthly_fuel_price.csv", fuel_price=resources("monthly_fuel_price.csv"),
log: log:
LOGS + "build_monthly_prices.log", logs("build_monthly_prices.log"),
threads: 1 threads: 1
resources: resources:
mem_mb=5000, mem_mb=5000,
@ -335,16 +339,19 @@ rule build_monthly_prices:
rule build_hydro_profile: rule build_hydro_profile:
params: params:
hydro=config["renewable"]["hydro"], hydro=config_provider("renewable", "hydro"),
countries=config["countries"], countries=config_provider("countries"),
input: input:
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=resources("country_shapes.geojson"),
eia_hydro_generation="data/eia_hydro_annual_generation.csv", eia_hydro_generation="data/eia_hydro_annual_generation.csv",
cutout=f"cutouts/" + CDIR + config["renewable"]["hydro"]["cutout"] + ".nc", cutout=lambda w: f"cutouts/"
+ CDIR
+ config_provider("renewable", "hydro", "cutout")(w)
+ ".nc",
output: output:
RESOURCES + "profile_hydro.nc", resources("profile_hydro.nc"),
log: log:
LOGS + "build_hydro_profile.log", logs("build_hydro_profile.log"),
resources: resources:
mem_mb=5000, mem_mb=5000,
conda: conda:
@ -353,79 +360,87 @@ rule build_hydro_profile:
"../scripts/build_hydro_profile.py" "../scripts/build_hydro_profile.py"
if config["lines"]["dynamic_line_rating"]["activate"]:
rule build_line_rating: rule build_line_rating:
params: params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
input: input:
base_network=RESOURCES + "networks/base.nc", base_network=resources("networks/base.nc"),
cutout="cutouts/" cutout=lambda w: "cutouts/"
+ CDIR + CDIR
+ config["lines"]["dynamic_line_rating"]["cutout"] + config_provider("lines", "dynamic_line_rating", "cutout")(w)
+ ".nc", + ".nc",
output: output:
output=RESOURCES + "networks/line_rating.nc", output=resources("networks/line_rating.nc"),
log: log:
LOGS + "build_line_rating.log", logs("build_line_rating.log"),
benchmark: benchmark:
BENCHMARKS + "build_line_rating" benchmarks("build_line_rating")
threads: ATLITE_NPROCESSES threads: config["atlite"].get("nprocesses", 4)
resources: resources:
mem_mb=ATLITE_NPROCESSES * 1000, mem_mb=config["atlite"].get("nprocesses", 4) * 1000,
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
"../scripts/build_line_rating.py" "../scripts/build_line_rating.py"
rule add_electricity: def input_profile_tech(w):
params: return {
length_factor=config["lines"]["length_factor"], f"profile_{tech}": resources(f"profile_{tech}.nc")
scaling_factor=config["load"]["scaling_factor"], for tech in config_provider("electricity", "renewable_carriers")(w)
countries=config["countries"], }
renewable=config["renewable"],
electricity=config["electricity"],
conventional=config["conventional"], def input_conventional(w):
costs=config["costs"], return {
input:
**{
f"profile_{tech}": RESOURCES + f"profile_{tech}.nc"
for tech in config["electricity"]["renewable_carriers"]
},
**{
f"conventional_{carrier}_{attr}": fn f"conventional_{carrier}_{attr}": fn
for carrier, d in config.get("conventional", {None: {}}).items() for carrier, d in config_provider("conventional", default={None: {}})(w).items()
if carrier in config["electricity"]["conventional_carriers"] if carrier in config_provider("electricity", "conventional_carriers")(w)
for attr, fn in d.items() for attr, fn in d.items()
if str(fn).startswith("data/") if str(fn).startswith("data/")
}, }
base_network=RESOURCES + "networks/base.nc",
line_rating=(
RESOURCES + "networks/line_rating.nc" rule add_electricity:
if config["lines"]["dynamic_line_rating"]["activate"] params:
else RESOURCES + "networks/base.nc" length_factor=config_provider("lines", "length_factor"),
scaling_factor=config_provider("load", "scaling_factor"),
countries=config_provider("countries"),
renewable=config_provider("renewable"),
electricity=config_provider("electricity"),
conventional=config_provider("conventional"),
costs=config_provider("costs"),
input:
unpack(input_profile_tech),
unpack(input_conventional),
base_network=resources("networks/base.nc"),
line_rating=lambda w: (
resources("networks/line_rating.nc")
if config_provider("lines", "dynamic_line_rating", "activate")(w)
else resources("networks/base.nc")
), ),
tech_costs=COSTS, tech_costs=lambda w: resources(
regions=RESOURCES + "regions_onshore.geojson", f"costs_{config_provider('costs', 'year')(w)}.csv"
powerplants=RESOURCES + "powerplants.csv", ),
regions=resources("regions_onshore.geojson"),
powerplants=resources("powerplants.csv"),
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"), hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
geth_hydro_capacities="data/geth2015_hydro_capacities.csv", geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
unit_commitment="data/unit_commitment.csv", unit_commitment="data/unit_commitment.csv",
fuel_price=( fuel_price=lambda w: (
RESOURCES + "monthly_fuel_price.csv" resources("monthly_fuel_price.csv")
if config["conventional"]["dynamic_fuel_price"] if config_provider("conventional", "dynamic_fuel_price")(w)
else [] else []
), ),
load=RESOURCES + "electricity_demand.csv", load=resources("electricity_demand.csv"),
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", nuts3_shapes=resources("nuts3_shapes.geojson"),
ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv", ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv",
output: output:
RESOURCES + "networks/elec.nc", resources("networks/elec.nc"),
log: log:
LOGS + "add_electricity.log", logs("add_electricity.log"),
benchmark: benchmark:
BENCHMARKS + "add_electricity" benchmarks("add_electricity")
threads: 1 threads: 1
resources: resources:
mem_mb=10000, mem_mb=10000,
@ -437,31 +452,33 @@ rule add_electricity:
rule simplify_network: rule simplify_network:
params: params:
simplify_network=config["clustering"]["simplify_network"], simplify_network=config_provider("clustering", "simplify_network"),
aggregation_strategies=config["clustering"].get("aggregation_strategies", {}), aggregation_strategies=config_provider(
focus_weights=config["clustering"].get( "clustering", "aggregation_strategies", default={}
"focus_weights", config.get("focus_weights")
), ),
renewable_carriers=config["electricity"]["renewable_carriers"], focus_weights=config_provider("clustering", "focus_weights", default=None),
max_hours=config["electricity"]["max_hours"], renewable_carriers=config_provider("electricity", "renewable_carriers"),
length_factor=config["lines"]["length_factor"], max_hours=config_provider("electricity", "max_hours"),
p_max_pu=config["links"].get("p_max_pu", 1.0), length_factor=config_provider("lines", "length_factor"),
costs=config["costs"], p_max_pu=config_provider("links", "p_max_pu", default=1.0),
costs=config_provider("costs"),
input: input:
network=RESOURCES + "networks/elec.nc", network=resources("networks/elec.nc"),
tech_costs=COSTS, tech_costs=lambda w: resources(
regions_onshore=RESOURCES + "regions_onshore.geojson", f"costs_{config_provider('costs', 'year')(w)}.csv"
regions_offshore=RESOURCES + "regions_offshore.geojson", ),
regions_onshore=resources("regions_onshore.geojson"),
regions_offshore=resources("regions_offshore.geojson"),
output: output:
network=RESOURCES + "networks/elec_s{simpl}.nc", network=resources("networks/elec_s{simpl}.nc"),
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson", regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"),
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}.geojson", regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"),
busmap=RESOURCES + "busmap_elec_s{simpl}.csv", busmap=resources("busmap_elec_s{simpl}.csv"),
connection_costs=RESOURCES + "connection_costs_s{simpl}.csv", connection_costs=resources("connection_costs_s{simpl}.csv"),
log: log:
LOGS + "simplify_network/elec_s{simpl}.log", logs("simplify_network/elec_s{simpl}.log"),
benchmark: benchmark:
BENCHMARKS + "simplify_network/elec_s{simpl}" benchmarks("simplify_network/elec_s{simpl}")
threads: 1 threads: 1
resources: resources:
mem_mb=12000, mem_mb=12000,
@ -473,38 +490,42 @@ rule simplify_network:
rule cluster_network: rule cluster_network:
params: params:
cluster_network=config["clustering"]["cluster_network"], cluster_network=config_provider("clustering", "cluster_network"),
aggregation_strategies=config["clustering"].get("aggregation_strategies", {}), aggregation_strategies=config_provider(
custom_busmap=config["enable"].get("custom_busmap", False), "clustering", "aggregation_strategies", default={}
focus_weights=config["clustering"].get(
"focus_weights", config.get("focus_weights")
), ),
renewable_carriers=config["electricity"]["renewable_carriers"], custom_busmap=config_provider("enable", "custom_busmap", default=False),
conventional_carriers=config["electricity"].get("conventional_carriers", []), focus_weights=config_provider("clustering", "focus_weights", default=None),
max_hours=config["electricity"]["max_hours"], renewable_carriers=config_provider("electricity", "renewable_carriers"),
length_factor=config["lines"]["length_factor"], conventional_carriers=config_provider(
costs=config["costs"], "electricity", "conventional_carriers", default=[]
),
max_hours=config_provider("electricity", "max_hours"),
length_factor=config_provider("lines", "length_factor"),
costs=config_provider("costs"),
input: input:
network=RESOURCES + "networks/elec_s{simpl}.nc", network=resources("networks/elec_s{simpl}.nc"),
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson", regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"),
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}.geojson", regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"),
busmap=ancient(RESOURCES + "busmap_elec_s{simpl}.csv"), busmap=ancient(resources("busmap_elec_s{simpl}.csv")),
custom_busmap=( custom_busmap=lambda w: (
"data/custom_busmap_elec_s{simpl}_{clusters}.csv" "data/custom_busmap_elec_s{simpl}_{clusters}.csv"
if config["enable"].get("custom_busmap", False) if config_provider("enable", "custom_busmap", default=False)(w)
else [] else []
), ),
tech_costs=COSTS, tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
output: output:
network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", network=resources("networks/elec_s{simpl}_{clusters}.nc"),
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson", regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"),
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
linemap=RESOURCES + "linemap_elec_s{simpl}_{clusters}.csv", linemap=resources("linemap_elec_s{simpl}_{clusters}.csv"),
log: log:
LOGS + "cluster_network/elec_s{simpl}_{clusters}.log", logs("cluster_network/elec_s{simpl}_{clusters}.log"),
benchmark: benchmark:
BENCHMARKS + "cluster_network/elec_s{simpl}_{clusters}" benchmarks("cluster_network/elec_s{simpl}_{clusters}")
threads: 1 threads: 1
resources: resources:
mem_mb=10000, mem_mb=10000,
@ -516,18 +537,20 @@ rule cluster_network:
rule add_extra_components: rule add_extra_components:
params: params:
extendable_carriers=config["electricity"]["extendable_carriers"], extendable_carriers=config_provider("electricity", "extendable_carriers"),
max_hours=config["electricity"]["max_hours"], max_hours=config_provider("electricity", "max_hours"),
costs=config["costs"], costs=config_provider("costs"),
input: input:
network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", network=resources("networks/elec_s{simpl}_{clusters}.nc"),
tech_costs=COSTS, tech_costs=lambda w: resources(
f"costs_{config_provider('costs', 'year')(w)}.csv"
),
output: output:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
log: log:
LOGS + "add_extra_components/elec_s{simpl}_{clusters}.log", logs("add_extra_components/elec_s{simpl}_{clusters}.log"),
benchmark: benchmark:
BENCHMARKS + "add_extra_components/elec_s{simpl}_{clusters}_ec" benchmarks("add_extra_components/elec_s{simpl}_{clusters}_ec")
threads: 1 threads: 1
resources: resources:
mem_mb=4000, mem_mb=4000,
@ -539,30 +562,30 @@ rule add_extra_components:
rule prepare_network: rule prepare_network:
params: params:
snapshots={ time_resolution=config_provider("clustering", "temporal", "resolution_elec"),
"resolution": config["snapshots"].get("resolution", False), links=config_provider("links"),
"segmentation": config["snapshots"].get("segmentation", False), lines=config_provider("lines"),
}, co2base=config_provider("electricity", "co2base"),
links=config["links"], co2limit_enable=config_provider("electricity", "co2limit_enable", default=False),
lines=config["lines"], co2limit=config_provider("electricity", "co2limit"),
co2base=config["electricity"]["co2base"], gaslimit_enable=config_provider("electricity", "gaslimit_enable", default=False),
co2limit_enable=config["electricity"].get("co2limit_enable", False), gaslimit=config_provider("electricity", "gaslimit"),
co2limit=config["electricity"]["co2limit"], max_hours=config_provider("electricity", "max_hours"),
gaslimit_enable=config["electricity"].get("gaslimit_enable", False), costs=config_provider("costs"),
gaslimit=config["electricity"].get("gaslimit"), adjustments=config_provider("adjustments", "electricity"),
max_hours=config["electricity"]["max_hours"], autarky=config_provider("electricity", "autarky", default={}),
costs=config["costs"],
autarky=config["electricity"].get("autarky", {}),
input: input:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
tech_costs=COSTS, tech_costs=lambda w: resources(
co2_price=lambda w: RESOURCES + "co2_price.csv" if "Ept" in w.opts else [], f"costs_{config_provider('costs', 'year')(w)}.csv"
),
co2_price=lambda w: resources("co2_price.csv") if "Ept" in w.opts else [],
output: output:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
log: log:
LOGS + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log", logs("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"),
benchmark: benchmark:
(BENCHMARKS + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}") (benchmarks("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"))
threads: 1 threads: 1
resources: resources:
mem_mb=4000, mem_mb=4000,

File diff suppressed because it is too large Load Diff

View File

@ -15,21 +15,28 @@ localrules:
rule cluster_networks: rule cluster_networks:
input: input:
expand(RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]), expand(
resources("networks/elec_s{simpl}_{clusters}.nc"),
**config["scenario"],
run=config["run"]["name"],
),
rule extra_components_networks: rule extra_components_networks:
input: input:
expand( expand(
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", **config["scenario"] resources("networks/elec_s{simpl}_{clusters}_ec.nc"),
**config["scenario"],
run=config["run"]["name"],
), ),
rule prepare_elec_networks: rule prepare_elec_networks:
input: input:
expand( expand(
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
**config["scenario"], **config["scenario"],
run=config["run"]["name"],
), ),
@ -39,6 +46,7 @@ rule prepare_sector_networks:
RESULTS RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"], **config["scenario"],
run=config["run"]["name"],
), ),
@ -47,6 +55,7 @@ rule solve_elec_networks:
expand( expand(
RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
**config["scenario"], **config["scenario"],
run=config["run"]["name"],
), ),
@ -56,6 +65,7 @@ rule solve_sector_networks:
RESULTS RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"], **config["scenario"],
run=config["run"]["name"],
), ),
@ -63,8 +73,9 @@ rule solve_sector_networks_perfect:
input: input:
expand( expand(
RESULTS RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config["scenario"], **config["scenario"],
run=config["run"]["name"],
), ),
@ -74,10 +85,12 @@ rule validate_elec_networks:
RESULTS RESULTS
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", + "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
**config["scenario"], **config["scenario"],
run=config["run"]["name"],
), ),
expand( expand(
RESULTS RESULTS
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", + "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
**config["scenario"], **config["scenario"],
run=config["run"]["name"],
kind=["production", "prices", "cross_border"], kind=["production", "prices", "cross_border"],
), ),

View File

@ -2,17 +2,85 @@
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
import copy
from functools import partial, lru_cache
import os, sys, glob import os, sys, glob
path = workflow.source_path("../scripts/_helpers.py") path = workflow.source_path("../scripts/_helpers.py")
sys.path.insert(0, os.path.dirname(path)) sys.path.insert(0, os.path.dirname(path))
from _helpers import validate_checksum from _helpers import validate_checksum, update_config_from_wildcards
from snakemake.utils import update_config
def get_config(config, keys, default=None):
"""Retrieve a nested value from a dictionary using a tuple of keys."""
value = config
for key in keys:
if isinstance(value, list):
value = value[key]
else:
value = value.get(key, default)
if value == default:
return default
return value
def merge_configs(base_config, scenario_config):
"""Merge base config with a specific scenario without modifying the original."""
merged = copy.deepcopy(base_config)
update_config(merged, scenario_config)
return merged
@lru_cache
def scenario_config(scenario_name):
"""Retrieve a scenario config based on the overrides from the scenario file."""
return merge_configs(config, scenarios[scenario_name])
def static_getter(wildcards, keys, default):
"""Getter function for static config values."""
config_with_wildcards = update_config_from_wildcards(
config, wildcards, inplace=False
)
return get_config(config_with_wildcards, keys, default)
def dynamic_getter(wildcards, keys, default):
"""Getter function for dynamic config values based on scenario."""
if "run" not in wildcards.keys():
return get_config(config, keys, default)
scenario_name = wildcards.run
if scenario_name not in scenarios:
raise ValueError(
f"Scenario {scenario_name} not found in file {config['run']['scenario']['file']}."
)
config_with_scenario = scenario_config(scenario_name)
config_with_wildcards = update_config_from_wildcards(
config_with_scenario, wildcards, inplace=False
)
return get_config(config_with_wildcards, keys, default)
def config_provider(*keys, default=None):
"""Dynamically provide config values based on 'run' -> 'name'.
Usage in Snakemake rules would look something like:
params:
my_param=config_provider("key1", "key2", default="some_default_value")
"""
# Using functools.partial to freeze certain arguments in our getter functions.
if config["run"].get("scenarios", {}).get("enable", False):
return partial(dynamic_getter, keys=keys, default=default)
else:
return partial(static_getter, keys=keys, default=default)
def solver_threads(w): def solver_threads(w):
solver_options = config["solving"]["solver_options"] solver_options = config_provider("solving", "solver_options")(w)
option_set = config["solving"]["solver"]["options"] option_set = config_provider("solving", "solver", "options")(w)
threads = solver_options[option_set].get("threads", 4) threads = solver_options[option_set].get("threads", 4)
return threads return threads
@ -38,7 +106,9 @@ def memory(w):
def input_custom_extra_functionality(w): def input_custom_extra_functionality(w):
path = config["solving"]["options"].get("custom_extra_functionality", False) path = config_provider(
"solving", "options", "custom_extra_functionality", default=False
)(w)
if path: if path:
return os.path.join(os.path.dirname(workflow.snakefile), path) return os.path.join(os.path.dirname(workflow.snakefile), path)
return [] return []
@ -62,14 +132,15 @@ def has_internet_access(url="www.zenodo.org") -> bool:
def input_eurostat(w): def input_eurostat(w):
# 2016 includes BA, 2017 does not # 2016 includes BA, 2017 does not
report_year = config["energy"]["eurostat_report_year"] report_year = config_provider("energy", "eurostat_report_year")(w)
return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition" return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition"
def solved_previous_horizon(wildcards): def solved_previous_horizon(w):
planning_horizons = config["scenario"]["planning_horizons"] planning_horizons = config_provider("scenario", "planning_horizons")(w)
i = planning_horizons.index(int(wildcards.planning_horizons)) i = planning_horizons.index(int(w.planning_horizons))
planning_horizon_p = str(planning_horizons[i - 1]) planning_horizon_p = str(planning_horizons[i - 1])
return ( return (
RESULTS RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"

View File

@ -11,18 +11,19 @@ if config["foresight"] != "perfect":
rule plot_power_network_clustered: rule plot_power_network_clustered:
params: params:
plotting=config["plotting"], plotting=config_provider("plotting"),
input: input:
network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", network=resources("networks/elec_s{simpl}_{clusters}.nc"),
regions_onshore=RESOURCES regions_onshore=resources(
+ "regions_onshore_elec_s{simpl}_{clusters}.geojson", "regions_onshore_elec_s{simpl}_{clusters}.geojson"
),
output: output:
map=RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", map=resources("maps/power-network-s{simpl}-{clusters}.pdf"),
threads: 1 threads: 1
resources: resources:
mem_mb=4000, mem_mb=4000,
benchmark: benchmark:
BENCHMARKS + "plot_power_network_clustered/elec_s{simpl}_{clusters}" benchmarks("plot_power_network_clustered/elec_s{simpl}_{clusters}")
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -30,11 +31,11 @@ if config["foresight"] != "perfect":
rule plot_power_network: rule plot_power_network:
params: params:
plotting=config["plotting"], plotting=config_provider("plotting"),
input: input:
network=RESULTS network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output: output:
map=RESULTS map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
@ -42,14 +43,12 @@ if config["foresight"] != "perfect":
resources: resources:
mem_mb=10000, mem_mb=10000,
log: log:
( RESULTS
LOGS + "logs/plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
benchmark: benchmark:
( (
BENCHMARKS RESULTS
+ "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + "benchmarksplot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
@ -58,12 +57,12 @@ if config["foresight"] != "perfect":
rule plot_hydrogen_network: rule plot_hydrogen_network:
params: params:
plotting=config["plotting"], plotting=config_provider("plotting"),
foresight=config["foresight"], foresight=config_provider("foresight"),
input: input:
network=RESULTS network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output: output:
map=RESULTS map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf",
@ -71,14 +70,12 @@ if config["foresight"] != "perfect":
resources: resources:
mem_mb=10000, mem_mb=10000,
log: log:
( RESULTS
LOGS + "logs/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
benchmark: benchmark:
( (
BENCHMARKS RESULTS
+ "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + "benchmarks/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
@ -87,11 +84,11 @@ if config["foresight"] != "perfect":
rule plot_gas_network: rule plot_gas_network:
params: params:
plotting=config["plotting"], plotting=config_provider("plotting"),
input: input:
network=RESULTS network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output: output:
map=RESULTS map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf",
@ -99,14 +96,12 @@ if config["foresight"] != "perfect":
resources: resources:
mem_mb=10000, mem_mb=10000,
log: log:
( RESULTS
LOGS + "logs/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
+ "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
benchmark: benchmark:
( (
BENCHMARKS RESULTS
+ "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + "benchmarks/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
@ -116,26 +111,26 @@ if config["foresight"] != "perfect":
if config["foresight"] == "perfect": if config["foresight"] == "perfect":
rule plot_power_network_perfect: def output_map_year(w):
params: return {
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
output:
**{
f"map_{year}": RESULTS f"map_{year}": RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_" + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ f"{year}.pdf" + f"{year}.pdf"
for year in config["scenario"]["planning_horizons"] for year in config_provider("scenario", "planning_horizons")(w)
}, }
rule plot_power_network_perfect:
params:
plotting=config_provider("plotting"),
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"),
output:
unpack(output_map_year),
threads: 2 threads: 2
resources: resources:
mem_mb=10000, mem_mb=10000,
benchmark:
BENCHMARKS
+"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -150,8 +145,6 @@ rule copy_config:
threads: 1 threads: 1
resources: resources:
mem_mb=1000, mem_mb=1000,
benchmark:
BENCHMARKS + "copy_config"
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -160,52 +153,57 @@ rule copy_config:
rule make_summary: rule make_summary:
params: params:
foresight=config["foresight"], foresight=config_provider("foresight"),
costs=config["costs"], costs=config_provider("costs"),
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
scenario=config["scenario"], scenario=config_provider("scenario"),
RDIR=RDIR, RDIR=RDIR,
input: input:
expand(
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
**config["scenario"],
),
networks=expand( networks=expand(
RESULTS RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config["scenario"], **config["scenario"],
allow_missing=True,
), ),
costs=( costs=lambda w: (
"data/costs_{}.csv".format(config["costs"]["year"]) resources("costs_{}.csv".format(config_provider("costs", "year")(w)))
if config["foresight"] == "overnight" if config_provider("foresight")(w) == "overnight"
else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]) else resources(
"costs_{}.csv".format(
config_provider("scenario", "planning_horizons", 0)(w)
)
)
), ),
ac_plot=expand( ac_plot=expand(
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", resources("maps/power-network-s{simpl}-{clusters}.pdf"),
**config["scenario"], **config["scenario"],
allow_missing=True,
), ),
costs_plot=expand( costs_plot=expand(
RESULTS RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config["scenario"], **config["scenario"],
allow_missing=True,
), ),
h2_plot=expand( h2_plot=lambda w: expand(
( (
RESULTS RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf" + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf"
if config["sector"]["H2_network"] if config_provider("sector", "H2_network")(w)
else [] else []
), ),
**config["scenario"], **config["scenario"],
allow_missing=True,
), ),
ch4_plot=expand( ch4_plot=lambda w: expand(
( (
RESULTS RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf" + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf"
if config["sector"]["gas_network"] if config_provider("sector", "gas_network")(w)
else [] else []
), ),
**config["scenario"], **config["scenario"],
allow_missing=True,
), ),
output: output:
nodal_costs=RESULTS + "csvs/nodal_costs.csv", nodal_costs=RESULTS + "csvs/nodal_costs.csv",
@ -227,9 +225,7 @@ rule make_summary:
resources: resources:
mem_mb=10000, mem_mb=10000,
log: log:
LOGS + "make_summary.log", RESULTS + "logs/make_summary.log",
benchmark:
BENCHMARKS + "make_summary"
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -238,12 +234,14 @@ rule make_summary:
rule plot_summary: rule plot_summary:
params: params:
countries=config["countries"], countries=config_provider("countries"),
planning_horizons=config["scenario"]["planning_horizons"], planning_horizons=config_provider("scenario", "planning_horizons"),
sector_opts=config["scenario"]["sector_opts"], emissions_scope=config_provider("energy", "emissions"),
emissions_scope=config["energy"]["emissions"], eurostat_report_year=config_provider("energy", "eurostat_report_year"),
eurostat_report_year=config["energy"]["eurostat_report_year"], plotting=config_provider("plotting"),
plotting=config["plotting"], foresight=config_provider("foresight"),
co2_budget=config_provider("co2_budget"),
sector=config_provider("sector"),
RDIR=RDIR, RDIR=RDIR,
input: input:
costs=RESULTS + "csvs/costs.csv", costs=RESULTS + "csvs/costs.csv",
@ -259,9 +257,7 @@ rule plot_summary:
resources: resources:
mem_mb=10000, mem_mb=10000,
log: log:
LOGS + "plot_summary.log", RESULTS + "logs/plot_summary.log",
benchmark:
BENCHMARKS + "plot_summary"
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -283,7 +279,7 @@ STATISTICS_BARPLOTS = [
rule plot_elec_statistics: rule plot_elec_statistics:
params: params:
plotting=config["plotting"], plotting=config_provider("plotting"),
barplots=STATISTICS_BARPLOTS, barplots=STATISTICS_BARPLOTS,
input: input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",

View File

@ -32,7 +32,7 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle",
output: output:
protected(expand("data/bundle/{file}", file=datafiles)), protected(expand("data/bundle/{file}", file=datafiles)),
log: log:
LOGS + "retrieve_databundle.log", "logs/retrieve_databundle.log",
resources: resources:
mem_mb=1000, mem_mb=1000,
retries: 2 retries: 2
@ -50,7 +50,7 @@ if config["enable"].get("retrieve_irena"):
onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
solar="data/existing_infrastructure/solar_capacity_IRENA.csv", solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
log: log:
LOGS + "retrieve_irena.log", logs("retrieve_irena.log"),
resources: resources:
mem_mb=1000, mem_mb=1000,
retries: 2 retries: 2
@ -83,23 +83,19 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True
if config["enable"]["retrieve"] and config["enable"].get("retrieve_cost_data", True): if config["enable"]["retrieve"] and config["enable"].get("retrieve_cost_data", True):
rule retrieve_cost_data: rule retrieve_cost_data:
input: params:
HTTP.remote( version=config_provider("costs", "version"),
"raw.githubusercontent.com/PyPSA/technology-data/{}/outputs/".format(
config["costs"]["version"]
)
+ "costs_{year}.csv",
keep_local=True,
),
output: output:
"data/costs_{year}.csv", resources("costs_{year}.csv"),
log: log:
LOGS + "retrieve_cost_data_{year}.log", logs("retrieve_cost_data_{year}.log"),
resources: resources:
mem_mb=1000, mem_mb=1000,
retries: 2 retries: 2
run: conda:
move(input[0], output[0]) "../envs/retrieve.yaml"
script:
"../scripts/retrieve_cost_data.py"
if config["enable"]["retrieve"] and config["enable"].get( if config["enable"]["retrieve"] and config["enable"].get(
@ -114,9 +110,9 @@ if config["enable"]["retrieve"] and config["enable"].get(
static=True, static=True,
), ),
output: output:
RESOURCES + "natura.tiff", resources("natura.tiff"),
log: log:
LOGS + "retrieve_natura_raster.log", logs("retrieve_natura_raster.log"),
resources: resources:
mem_mb=5000, mem_mb=5000,
retries: 2 retries: 2
@ -154,7 +150,7 @@ if config["enable"]["retrieve"] and config["enable"].get(
protected(expand("data/bundle-sector/{files}", files=datafiles)), protected(expand("data/bundle-sector/{files}", files=datafiles)),
*datafolders, *datafolders,
log: log:
LOGS + "retrieve_sector_databundle.log", "logs/retrieve_sector_databundle.log",
retries: 2 retries: 2
conda: conda:
"../envs/retrieve.yaml" "../envs/retrieve.yaml"
@ -173,11 +169,9 @@ if config["enable"]["retrieve"]:
rule retrieve_gas_infrastructure_data: rule retrieve_gas_infrastructure_data:
output: output:
protected( expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles),
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles)
),
log: log:
LOGS + "retrieve_gas_infrastructure_data.log", "logs/retrieve_gas_infrastructure_data.log",
retries: 2 retries: 2
conda: conda:
"../envs/retrieve.yaml" "../envs/retrieve.yaml"
@ -193,10 +187,12 @@ if config["enable"]["retrieve"]:
output: output:
"data/electricity_demand_raw.csv", "data/electricity_demand_raw.csv",
log: log:
LOGS + "retrieve_electricity_demand.log", "logs/retrieve_electricity_demand.log",
resources: resources:
mem_mb=5000, mem_mb=5000,
retries: 2 retries: 2
conda:
"../envs/retrieve.yaml"
script: script:
"../scripts/retrieve_electricity_demand.py" "../scripts/retrieve_electricity_demand.py"
@ -213,7 +209,7 @@ if config["enable"]["retrieve"]:
output: output:
protected("data/shipdensity_global.zip"), protected("data/shipdensity_global.zip"),
log: log:
LOGS + "retrieve_ship_raster.log", "logs/retrieve_ship_raster.log",
resources: resources:
mem_mb=5000, mem_mb=5000,
retries: 2 retries: 2
@ -349,7 +345,7 @@ if config["enable"]["retrieve"]:
output: output:
"data/validation/emission-spot-primary-market-auction-report-2019-data.xls", "data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
log: log:
LOGS + "retrieve_monthly_co2_prices.log", "logs/retrieve_monthly_co2_prices.log",
resources: resources:
mem_mb=5000, mem_mb=5000,
retries: 2 retries: 2
@ -363,7 +359,7 @@ if config["enable"]["retrieve"]:
output: output:
"data/validation/energy-price-trends-xlsx-5619002.xlsx", "data/validation/energy-price-trends-xlsx-5619002.xlsx",
log: log:
LOGS + "retrieve_monthly_fuel_prices.log", "logs/retrieve_monthly_fuel_prices.log",
resources: resources:
mem_mb=5000, mem_mb=5000,
retries: 2 retries: 2

View File

@ -5,30 +5,31 @@
rule solve_network: rule solve_network:
params: params:
solving=config["solving"], solving=config_provider("solving"),
foresight=config["foresight"], foresight=config_provider("foresight"),
planning_horizons=config["scenario"]["planning_horizons"], planning_horizons=config_provider("scenario", "planning_horizons"),
co2_sequestration_potential=config["sector"].get( co2_sequestration_potential=config_provider(
"co2_sequestration_potential", 200 "sector", "co2_sequestration_potential", default=200
), ),
custom_extra_functionality=input_custom_extra_functionality, custom_extra_functionality=input_custom_extra_functionality,
input: input:
network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"),
config=RESULTS + "config.yaml", config=RESULTS + "config.yaml",
output: output:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log: log:
solver=normpath( solver=normpath(
LOGS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" RESULTS
+ "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
), ),
python=LOGS python=RESULTS
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
benchmark: benchmark:
BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" RESULTS + "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
threads: solver_threads threads: solver_threads
resources: resources:
mem_mb=memory, mem_mb=memory,
walltime=config["solving"].get("walltime", "12:00:00"), walltime=config_provider("solving", "walltime", default="12:00:00"),
shadow: shadow:
"minimal" "minimal"
conda: conda:
@ -39,27 +40,27 @@ rule solve_network:
rule solve_operations_network: rule solve_operations_network:
params: params:
options=config["solving"]["options"], options=config_provider("solving", "options"),
input: input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output: output:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc", network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
log: log:
solver=normpath( solver=normpath(
LOGS RESULTS
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log" + "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
), ),
python=LOGS python=RESULTS
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", + "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
benchmark: benchmark:
( (
BENCHMARKS RESULTS
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
) )
threads: 4 threads: 4
resources: resources:
mem_mb=(lambda w: 10000 + 372 * int(w.clusters)), mem_mb=(lambda w: 10000 + 372 * int(w.clusters)),
walltime=config["solving"].get("walltime", "12:00:00"), walltime=config_provider("solving", "walltime", default="12:00:00"),
shadow: shadow:
"minimal" "minimal"
conda: conda:

View File

@ -5,22 +5,27 @@
rule add_existing_baseyear: rule add_existing_baseyear:
params: params:
baseyear=config["scenario"]["planning_horizons"][0], baseyear=config_provider("scenario", "planning_horizons", 0),
sector=config["sector"], sector=config_provider("sector"),
existing_capacities=config["existing_capacities"], existing_capacities=config_provider("existing_capacities"),
costs=config["costs"], costs=config_provider("costs"),
input: input:
network=RESULTS network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv", powerplants=resources("powerplants.csv"),
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", busmap_s=resources("busmap_elec_s{simpl}.csv"),
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), costs=lambda w: resources(
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", "costs_{}.csv".format(
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", config_provider("scenario", "planning_horizons", 0)(w)
existing_heating_distribution=RESOURCES )
+ "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", ),
cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"),
cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"),
existing_heating_distribution=resources(
"existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
),
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
@ -28,17 +33,20 @@ rule add_existing_baseyear:
RESULTS RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
wildcard_constraints: wildcard_constraints:
# TODO: The first planning_horizon needs to be aligned across scenarios
# snakemake does not support passing functions to wildcard_constraints
# reference: https://github.com/snakemake/snakemake/issues/2703
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
threads: 1 threads: 1
resources: resources:
mem_mb=2000, mem_mb=2000,
log: log:
LOGS RESULTS
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + "logs/add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark: benchmark:
( (
BENCHMARKS RESULTS
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + "benchmarks/add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
@ -46,27 +54,33 @@ rule add_existing_baseyear:
"../scripts/add_existing_baseyear.py" "../scripts/add_existing_baseyear.py"
def input_profile_tech_brownfield(w):
return {
f"profile_{tech}": resources(f"profile_{tech}.nc")
for tech in config_provider("electricity", "renewable_carriers")(w)
if tech != "hydro"
}
rule add_brownfield: rule add_brownfield:
params: params:
H2_retrofit=config["sector"]["H2_retrofit"], H2_retrofit=config_provider("sector", "H2_retrofit"),
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"], H2_retrofit_capacity_per_CH4=config_provider(
threshold_capacity=config["existing_capacities"]["threshold_capacity"], "sector", "H2_retrofit_capacity_per_CH4"
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, ),
carriers=config["electricity"]["renewable_carriers"], threshold_capacity=config_provider("existing_capacities", " threshold_capacity"),
snapshots=config_provider("snapshots"),
carriers=config_provider("electricity", "renewable_carriers"),
input: input:
**{ unpack(input_profile_tech_brownfield),
f"profile_{tech}": RESOURCES + f"profile_{tech}.nc" simplify_busmap=resources("busmap_elec_s{simpl}.csv"),
for tech in config["electricity"]["renewable_carriers"] cluster_busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
if tech != "hydro"
},
simplify_busmap=RESOURCES + "busmap_elec_s{simpl}.csv",
cluster_busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
network=RESULTS network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
network_p=solved_previous_horizon, #solved network at previous time step network_p=solved_previous_horizon, #solved network at previous time step
costs="data/costs_{planning_horizons}.csv", costs=resources("costs_{planning_horizons}.csv"),
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"),
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"),
output: output:
RESULTS RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
@ -74,12 +88,12 @@ rule add_brownfield:
resources: resources:
mem_mb=10000, mem_mb=10000,
log: log:
LOGS RESULTS
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + "logs/add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark: benchmark:
( (
BENCHMARKS RESULTS
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + "benchmarks/add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
@ -92,17 +106,17 @@ ruleorder: add_existing_baseyear > add_brownfield
rule solve_sector_network_myopic: rule solve_sector_network_myopic:
params: params:
solving=config["solving"], solving=config_provider("solving"),
foresight=config["foresight"], foresight=config_provider("foresight"),
planning_horizons=config["scenario"]["planning_horizons"], planning_horizons=config_provider("scenario", "planning_horizons"),
co2_sequestration_potential=config["sector"].get( co2_sequestration_potential=config_provider(
"co2_sequestration_potential", 200 "sector", "co2_sequestration_potential", default=200
), ),
custom_extra_functionality=input_custom_extra_functionality, custom_extra_functionality=input_custom_extra_functionality,
input: input:
network=RESULTS network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs="data/costs_{planning_horizons}.csv", costs=resources("costs_{planning_horizons}.csv"),
config=RESULTS + "config.yaml", config=RESULTS + "config.yaml",
output: output:
RESULTS RESULTS
@ -110,18 +124,18 @@ rule solve_sector_network_myopic:
shadow: shadow:
"shallow" "shallow"
log: log:
solver=LOGS solver=RESULTS
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=LOGS python=RESULTS
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
threads: solver_threads threads: solver_threads
resources: resources:
mem_mb=config["solving"]["mem"], mem_mb=config_provider("solving", "mem"),
walltime=config["solving"].get("walltime", "12:00:00"), walltime=config_provider("solving", "walltime", default="12:00:00"),
benchmark: benchmark:
( (
BENCHMARKS RESULTS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"

View File

@ -5,11 +5,11 @@
rule solve_sector_network: rule solve_sector_network:
params: params:
solving=config["solving"], solving=config_provider("solving"),
foresight=config["foresight"], foresight=config_provider("foresight"),
planning_horizons=config["scenario"]["planning_horizons"], planning_horizons=config_provider("scenario", "planning_horizons"),
co2_sequestration_potential=config["sector"].get( co2_sequestration_potential=config_provider(
"co2_sequestration_potential", 200 "sector", "co2_sequestration_potential", default=200
), ),
custom_extra_functionality=input_custom_extra_functionality, custom_extra_functionality=input_custom_extra_functionality,
input: input:
@ -30,13 +30,12 @@ rule solve_sector_network:
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
threads: solver_threads threads: solver_threads
resources: resources:
mem_mb=config["solving"]["mem"], mem_mb=config_provider("solving", "mem"),
walltime=config["solving"].get("walltime", "12:00:00"), walltime=config_provider("solving", "walltime", default="12:00:00"),
benchmark: benchmark:
( (
RESULTS RESULTS
+ BENCHMARKS + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"

View File

@ -3,22 +3,27 @@
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
rule add_existing_baseyear: rule add_existing_baseyear:
params: params:
baseyear=config["scenario"]["planning_horizons"][0], baseyear=config_provider("scenario", "planning_horizons", 0),
sector=config["sector"], sector=config_provider("sector"),
existing_capacities=config["existing_capacities"], existing_capacities=config_provider("existing_capacities"),
costs=config["costs"], costs=config_provider("costs"),
input: input:
network=RESULTS network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv", powerplants=resources("powerplants.csv"),
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", busmap_s=resources("busmap_elec_s{simpl}.csv"),
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"),
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), costs=lambda w: resources(
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", "costs_{}.csv".format(
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", config_provider("scenario", "planning_horizons", 0)(w)
existing_heating_distribution=RESOURCES )
+ "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", ),
cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"),
cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"),
existing_heating_distribution=resources(
"existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv"
),
existing_heating="data/existing_infrastructure/existing_heating_raw.csv", existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
@ -32,12 +37,12 @@ rule add_existing_baseyear:
resources: resources:
mem_mb=2000, mem_mb=2000,
log: log:
LOGS logs(
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
),
benchmark: benchmark:
( benchmarks(
BENCHMARKS "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
@ -45,19 +50,28 @@ rule add_existing_baseyear:
"../scripts/add_existing_baseyear.py" "../scripts/add_existing_baseyear.py"
rule prepare_perfect_foresight: def input_network_year(w):
input: return {
**{
f"network_{year}": RESULTS f"network_{year}": RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
+ f"{year}.nc" + f"_{year}.nc"
for year in config["scenario"]["planning_horizons"][1:] for year in config_provider("scenario", "planning_horizons")(w)[1:]
}, }
rule prepare_perfect_foresight:
params:
costs=config_provider("costs"),
time_resolution=config_provider("clustering", "temporal", "sector"),
input:
unpack(input_network_year),
brownfield_network=lambda w: ( brownfield_network=lambda w: (
RESULTS RESULTS
+ "prenetworks-brownfield/" + "prenetworks-brownfield/"
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "{}.nc".format(str(config["scenario"]["planning_horizons"][0])) + "{}.nc".format(
str(config_provider("scenario", "planning_horizons", 0)(w))
)
), ),
output: output:
RESULTS RESULTS
@ -66,12 +80,12 @@ rule prepare_perfect_foresight:
resources: resources:
mem_mb=10000, mem_mb=10000,
log: log:
LOGS logs(
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log", "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log"
),
benchmark: benchmark:
( benchmarks(
BENCHMARKS "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
@ -81,25 +95,25 @@ rule prepare_perfect_foresight:
rule solve_sector_network_perfect: rule solve_sector_network_perfect:
params: params:
solving=config["solving"], solving=config_provider("solving"),
foresight=config["foresight"], foresight=config_provider("foresight"),
sector=config["sector"], sector=config_provider("sector"),
planning_horizons=config["scenario"]["planning_horizons"], planning_horizons=config_provider("scenario", "planning_horizons"),
co2_sequestration_potential=config["sector"].get( co2_sequestration_potential=config_provider(
"co2_sequestration_potential", 200 "sector", "co2_sequestration_potential", default=200
), ),
custom_extra_functionality=input_custom_extra_functionality, custom_extra_functionality=input_custom_extra_functionality,
input: input:
network=RESULTS network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
costs="data/costs_2030.csv", costs=resources("costs_2030.csv"),
config=RESULTS + "config.yaml", config=RESULTS + "config.yaml",
output: output:
RESULTS RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: solver_threads threads: solver_threads
resources: resources:
mem_mb=config["solving"]["mem"], mem_mb=config_provider("solving", "mem"),
shadow: shadow:
"shallow" "shallow"
log: log:
@ -111,8 +125,8 @@ rule solve_sector_network_perfect:
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log", + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
benchmark: benchmark:
( (
BENCHMARKS RESULTS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}" + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
) )
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
@ -120,18 +134,22 @@ rule solve_sector_network_perfect:
"../scripts/solve_network.py" "../scripts/solve_network.py"
rule make_summary_perfect: def input_networks_make_summary_perfect(w):
input: return {
**{
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc" + f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in config["scenario"]["simpl"] for simpl in config_provider("scenario", "simpl")(w)
for clusters in config["scenario"]["clusters"] for clusters in config_provider("scenario", "clusters")(w)
for opts in config["scenario"]["opts"] for opts in config_provider("scenario", "opts")(w)
for sector_opts in config["scenario"]["sector_opts"] for sector_opts in config_provider("scenario", "sector_opts")(w)
for ll in config["scenario"]["ll"] for ll in config_provider("scenario", "ll")(w)
}, }
costs="data/costs_2020.csv",
rule make_summary_perfect:
input:
unpack(input_networks_make_summary_perfect),
costs=resources("costs_2020.csv"),
output: output:
nodal_costs=RESULTS + "csvs/nodal_costs.csv", nodal_costs=RESULTS + "csvs/nodal_costs.csv",
nodal_capacities=RESULTS + "csvs/nodal_capacities.csv", nodal_capacities=RESULTS + "csvs/nodal_capacities.csv",
@ -153,9 +171,9 @@ rule make_summary_perfect:
resources: resources:
mem_mb=10000, mem_mb=10000,
log: log:
LOGS + "make_summary_perfect.log", logs("make_summary_perfect.log"),
benchmark: benchmark:
(BENCHMARKS + "make_summary_perfect") benchmarks("make_summary_perfect")
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:

View File

@ -17,12 +17,12 @@ rule build_electricity_production:
The data is used for validation of the optimization results. The data is used for validation of the optimization results.
""" """
params: params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
countries=config["countries"], countries=config_provider("countries"),
output: output:
RESOURCES + "historical_electricity_production.csv", resources("historical_electricity_production.csv"),
log: log:
LOGS + "build_electricity_production.log", logs("build_electricity_production.log"),
resources: resources:
mem_mb=5000, mem_mb=5000,
script: script:
@ -35,14 +35,14 @@ rule build_cross_border_flows:
The data is used for validation of the optimization results. The data is used for validation of the optimization results.
""" """
params: params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
countries=config["countries"], countries=config_provider("countries"),
input: input:
network=RESOURCES + "networks/base.nc", network=resources("networks/base.nc"),
output: output:
RESOURCES + "historical_cross_border_flows.csv", resources("historical_cross_border_flows.csv"),
log: log:
LOGS + "build_cross_border_flows.log", logs("build_cross_border_flows.log"),
resources: resources:
mem_mb=5000, mem_mb=5000,
script: script:
@ -55,12 +55,12 @@ rule build_electricity_prices:
The data is used for validation of the optimization results. The data is used for validation of the optimization results.
""" """
params: params:
snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, snapshots=config_provider("snapshots"),
countries=config["countries"], countries=config_provider("countries"),
output: output:
RESOURCES + "historical_electricity_prices.csv", resources("historical_electricity_prices.csv"),
log: log:
LOGS + "build_electricity_prices.log", logs("build_electricity_prices.log"),
resources: resources:
mem_mb=5000, mem_mb=5000,
script: script:
@ -70,7 +70,7 @@ rule build_electricity_prices:
rule plot_validation_electricity_production: rule plot_validation_electricity_production:
input: input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
electricity_production=RESOURCES + "historical_electricity_production.csv", electricity_production=resources("historical_electricity_production.csv"),
output: output:
**{ **{
plot: RESULTS plot: RESULTS
@ -85,10 +85,10 @@ rule plot_validation_electricity_production:
rule plot_validation_cross_border_flows: rule plot_validation_cross_border_flows:
params: params:
countries=config["countries"], countries=config_provider("countries"),
input: input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
cross_border_flows=RESOURCES + "historical_cross_border_flows.csv", cross_border_flows=resources("historical_cross_border_flows.csv"),
output: output:
**{ **{
plot: RESULTS plot: RESULTS
@ -104,7 +104,7 @@ rule plot_validation_cross_border_flows:
rule plot_validation_electricity_prices: rule plot_validation_electricity_prices:
input: input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
electricity_prices=RESOURCES + "historical_electricity_prices.csv", electricity_prices=resources("historical_electricity_prices.csv"),
output: output:
**{ **{
plot: RESULTS plot: RESULTS

4
scripts/__init__.py Normal file
View File

@ -0,0 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT

View File

@ -4,17 +4,20 @@
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
import contextlib import contextlib
import copy
import hashlib import hashlib
import logging import logging
import os import os
import re import re
import urllib import urllib
from functools import partial
from pathlib import Path from pathlib import Path
import pandas as pd import pandas as pd
import pytz import pytz
import requests import requests
import yaml import yaml
from snakemake.utils import update_config
from tqdm import tqdm from tqdm import tqdm
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -22,6 +25,77 @@ logger = logging.getLogger(__name__)
REGION_COLS = ["geometry", "name", "x", "y", "country"] REGION_COLS = ["geometry", "name", "x", "y", "country"]
def get_run_path(fn, dir, rdir, shared_resources):
"""
Dynamically provide paths based on shared resources and filename.
Use this function for snakemake rule inputs or outputs that should be
optionally shared across runs or created individually for each run.
Parameters
----------
fn : str
The filename for the path to be generated.
dir : str
The base directory.
rdir : str
Relative directory for non-shared resources.
shared_resources : str or bool
Specifies which resources should be shared.
- If string is "base", special handling for shared "base" resources (see notes).
- If random string other than "base", this folder is used instead of the `rdir` keyword.
- If boolean, directly specifies if the resource is shared.
Returns
-------
str
Full path where the resource should be stored.
Notes
-----
Special case for "base" allows no wildcards other than "technology", "year"
and "scope" and excludes filenames starting with "networks/elec" or
"add_electricity". All other resources are shared.
"""
if shared_resources == "base":
pattern = r"\{([^{}]+)\}"
existing_wildcards = set(re.findall(pattern, fn))
irrelevant_wildcards = {"technology", "year", "scope"}
no_relevant_wildcards = not existing_wildcards - irrelevant_wildcards
no_elec_rule = not fn.startswith("networks/elec") and not fn.startswith(
"add_electricity"
)
is_shared = no_relevant_wildcards and no_elec_rule
elif isinstance(shared_resources, str):
rdir = shared_resources + "/"
is_shared = True
elif isinstance(shared_resources, bool):
is_shared = shared_resources
else:
raise ValueError(
"shared_resources must be a boolean, str, or 'base' for special handling."
)
if is_shared:
return f"{dir}{fn}"
else:
return f"{dir}{rdir}{fn}"
def path_provider(dir, rdir, shared_resources):
"""
Returns a partial function that dynamically provides paths based on shared
resources and the filename.
Returns
-------
partial function
A partial function that takes a filename as input and
returns the path to the file based on the shared_resources parameter.
"""
return partial(get_run_path, dir=dir, rdir=rdir, shared_resources=shared_resources)
def get_opt(opts, expr, flags=None): def get_opt(opts, expr, flags=None):
""" """
Return the first option matching the regular expression. Return the first option matching the regular expression.
@ -43,9 +117,9 @@ def find_opt(opts, expr):
""" """
for o in opts: for o in opts:
if expr in o: if expr in o:
m = re.findall("[0-9]*\.?[0-9]+$", o) m = re.findall(r"m?\d+(?:[\.p]\d+)?", o)
if len(m) > 0: if len(m) > 0:
return True, float(m[0]) return True, float(m[-1].replace("p", ".").replace("m", "-"))
else: else:
return True, None return True, None
return False, None return False, None
@ -59,6 +133,21 @@ def mute_print():
yield yield
def set_scenario_config(snakemake):
scenario = snakemake.config["run"].get("scenarios", {})
if scenario.get("enable") and "run" in snakemake.wildcards.keys():
try:
with open(scenario["file"], "r") as f:
scenario_config = yaml.safe_load(f)
except FileNotFoundError:
# fallback for mock_snakemake
script_dir = Path(__file__).parent.resolve()
root_dir = script_dir.parent
with open(root_dir / scenario["file"], "r") as f:
scenario_config = yaml.safe_load(f)
update_config(snakemake.config, scenario_config[snakemake.wildcards.run])
def configure_logging(snakemake, skip_handlers=False): def configure_logging(snakemake, skip_handlers=False):
""" """
Configure the basic behaviour for the logging module. Configure the basic behaviour for the logging module.
@ -235,7 +324,7 @@ def progress_retrieve(url, file, disable=False):
def mock_snakemake( def mock_snakemake(
rulename, rulename,
root_dir=None, root_dir=None,
configfiles=[], configfiles=None,
submodule_dir="workflow/submodules/pypsa-eur", submodule_dir="workflow/submodules/pypsa-eur",
**wildcards, **wildcards,
): ):
@ -289,7 +378,9 @@ def mock_snakemake(
if os.path.exists(p): if os.path.exists(p):
snakefile = p snakefile = p
break break
if isinstance(configfiles, str): if configfiles is None:
configfiles = []
elif isinstance(configfiles, str):
configfiles = [configfiles] configfiles = [configfiles]
workflow = sm.Workflow( workflow = sm.Workflow(
@ -311,7 +402,7 @@ def mock_snakemake(
def make_accessable(*ios): def make_accessable(*ios):
for io in ios: for io in ios:
for i in range(len(io)): for i, _ in enumerate(io):
io[i] = os.path.abspath(io[i]) io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log) make_accessable(job.input, job.output, job.log)
@ -379,14 +470,182 @@ def parse(infix):
return {infix.pop(0): parse(infix)} return {infix.pop(0): parse(infix)}
def update_config_with_sector_opts(config, sector_opts): def update_config_from_wildcards(config, w, inplace=True):
from snakemake.utils import update_config """
Parses configuration settings from wildcards and updates the config.
"""
for o in sector_opts.split("-"): if not inplace:
config = copy.deepcopy(config)
if w.get("opts"):
opts = w.opts.split("-")
if nhours := get_opt(opts, r"^\d+(h|seg)$"):
config["clustering"]["temporal"]["resolution_elec"] = nhours
co2l_enable, co2l_value = find_opt(opts, "Co2L")
if co2l_enable:
config["electricity"]["co2limit_enable"] = True
if co2l_value is not None:
config["electricity"]["co2limit"] = (
co2l_value * config["electricity"]["co2base"]
)
gasl_enable, gasl_value = find_opt(opts, "CH4L")
if gasl_enable:
config["electricity"]["gaslimit_enable"] = True
if gasl_value is not None:
config["electricity"]["gaslimit"] = gasl_value * 1e6
if "Ept" in opts:
config["costs"]["emission_prices"]["co2_monthly_prices"] = True
ep_enable, ep_value = find_opt(opts, "Ep")
if ep_enable:
config["costs"]["emission_prices"]["enable"] = True
if ep_value is not None:
config["costs"]["emission_prices"]["co2"] = ep_value
if "ATK" in opts:
config["autarky"]["enable"] = True
if "ATKc" in opts:
config["autarky"]["by_country"] = True
attr_lookup = {
"p": "p_nom_max",
"e": "e_nom_max",
"c": "capital_cost",
"m": "marginal_cost",
}
for o in opts:
flags = ["+e", "+p", "+m", "+c"]
if all(flag not in o for flag in flags):
continue
carrier, attr_factor = o.split("+")
attr = attr_lookup[attr_factor[0]]
factor = float(attr_factor[1:])
if not isinstance(config["adjustments"]["electricity"], dict):
config["adjustments"]["electricity"] = dict()
update_config(
config["adjustments"]["electricity"], {attr: {carrier: factor}}
)
if w.get("sector_opts"):
opts = w.sector_opts.split("-")
if "T" in opts:
config["sector"]["transport"] = True
if "H" in opts:
config["sector"]["heating"] = True
if "B" in opts:
config["sector"]["biomass"] = True
if "I" in opts:
config["sector"]["industry"] = True
if "A" in opts:
config["sector"]["agriculture"] = True
if "CCL" in opts:
config["solving"]["constraints"]["CCL"] = True
eq_value = get_opt(opts, r"^EQ+\d*\.?\d+(c|)")
for o in opts:
if eq_value is not None:
config["solving"]["constraints"]["EQ"] = eq_value
elif "EQ" in o:
config["solving"]["constraints"]["EQ"] = True
break
if "BAU" in opts:
config["solving"]["constraints"]["BAU"] = True
if "SAFE" in opts:
config["solving"]["constraints"]["SAFE"] = True
if nhours := get_opt(opts, r"^\d+(h|sn|seg)$"):
config["clustering"]["temporal"]["resolution_sector"] = nhours
if "decentral" in opts:
config["sector"]["electricity_transmission_grid"] = False
if "noH2network" in opts:
config["sector"]["H2_network"] = False
if "nowasteheat" in opts:
config["sector"]["use_fischer_tropsch_waste_heat"] = False
config["sector"]["use_methanolisation_waste_heat"] = False
config["sector"]["use_haber_bosch_waste_heat"] = False
config["sector"]["use_methanation_waste_heat"] = False
config["sector"]["use_fuel_cell_waste_heat"] = False
config["sector"]["use_electrolysis_waste_heat"] = False
if "nodistrict" in opts:
config["sector"]["district_heating"]["progress"] = 0.0
dg_enable, dg_factor = find_opt(opts, "dist")
if dg_enable:
config["sector"]["electricity_distribution_grid"] = True
if dg_factor is not None:
config["sector"][
"electricity_distribution_grid_cost_factor"
] = dg_factor
if "biomasstransport" in opts:
config["sector"]["biomass_transport"] = True
_, maxext = find_opt(opts, "linemaxext")
if maxext is not None:
config["lines"]["max_extension"] = maxext * 1e3
config["links"]["max_extension"] = maxext * 1e3
_, co2l_value = find_opt(opts, "Co2L")
if co2l_value is not None:
config["co2_budget"] = float(co2l_value)
if co2_distribution := get_opt(opts, r"^(cb)\d+(\.\d+)?(ex|be)$"):
config["co2_budget"] = co2_distribution
if co2_budget := get_opt(opts, r"^(cb)\d+(\.\d+)?$"):
config["co2_budget"] = float(co2_budget[2:])
attr_lookup = {
"p": "p_nom_max",
"e": "e_nom_max",
"c": "capital_cost",
"m": "marginal_cost",
}
for o in opts:
flags = ["+e", "+p", "+m", "+c"]
if all(flag not in o for flag in flags):
continue
carrier, attr_factor = o.split("+")
attr = attr_lookup[attr_factor[0]]
factor = float(attr_factor[1:])
if not isinstance(config["adjustments"]["sector"], dict):
config["adjustments"]["sector"] = dict()
update_config(config["adjustments"]["sector"], {attr: {carrier: factor}})
_, sdr_value = find_opt(opts, "sdr")
if sdr_value is not None:
config["costs"]["social_discountrate"] = sdr_value / 100
_, seq_limit = find_opt(opts, "seq")
if seq_limit is not None:
config["sector"]["co2_sequestration_potential"] = seq_limit
# any config option can be represented in wildcard
for o in opts:
if o.startswith("CF+"): if o.startswith("CF+"):
infix = o.split("+")[1:] infix = o.split("+")[1:]
update_config(config, parse(infix)) update_config(config, parse(infix))
if not inplace:
return config
def get_checksum_from_zenodo(file_url): def get_checksum_from_zenodo(file_url):
parts = file_url.split("/") parts = file_url.split("/")

View File

@ -12,7 +12,11 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import xarray as xr import xarray as xr
from _helpers import update_config_with_sector_opts from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_existing_baseyear import add_build_year_to_new_assets from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.clustering.spatial import normed_or_uniform from pypsa.clustering.spatial import normed_or_uniform
@ -210,9 +214,10 @@ if __name__ == "__main__":
planning_horizons=2030, planning_horizons=2030,
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_from_wildcards(snakemake.config, snakemake.wildcards)
logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}") logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}")

View File

@ -93,7 +93,7 @@ import powerplantmatching as pm
import pypsa import pypsa
import scipy.sparse as sparse import scipy.sparse as sparse
import xarray as xr import xarray as xr
from _helpers import configure_logging, update_p_nom_max from _helpers import configure_logging, set_scenario_config, update_p_nom_max
from powerplantmatching.export import map_country_bus from powerplantmatching.export import map_country_bus
from shapely.prepared import prep from shapely.prepared import prep
@ -790,6 +790,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("add_electricity") snakemake = mock_snakemake("add_electricity")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params params = snakemake.params

View File

@ -15,7 +15,11 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import xarray as xr import xarray as xr
from _helpers import update_config_with_sector_opts from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_electricity import sanitize_carriers from add_electricity import sanitize_carriers
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
@ -552,12 +556,12 @@ if __name__ == "__main__":
planning_horizons=2020, planning_horizons=2020,
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_from_wildcards(snakemake.config, snakemake.wildcards)
options = snakemake.params.sector options = snakemake.params.sector
opts = snakemake.wildcards.sector_opts.split("-")
baseyear = snakemake.params.baseyear baseyear = snakemake.params.baseyear
@ -580,7 +584,7 @@ if __name__ == "__main__":
n, grouping_years_power, costs, baseyear n, grouping_years_power, costs, baseyear
) )
if "H" in opts: if options["heating"]:
time_dep_hp_cop = options["time_dep_hp_cop"] time_dep_hp_cop = options["time_dep_hp_cop"]
ashp_cop = ( ashp_cop = (
xr.open_dataarray(snakemake.input.cop_air_total) xr.open_dataarray(snakemake.input.cop_air_total)

View File

@ -55,7 +55,7 @@ import logging
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from add_electricity import load_costs, sanitize_carriers, sanitize_locations from add_electricity import load_costs, sanitize_carriers, sanitize_locations
idx = pd.IndexSlice idx = pd.IndexSlice
@ -230,6 +230,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5) snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5)
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
extendable_carriers = snakemake.params.extendable_carriers extendable_carriers = snakemake.params.extendable_carriers

View File

@ -77,7 +77,7 @@ import shapely
import shapely.prepared import shapely.prepared
import shapely.wkt import shapely.wkt
import yaml import yaml
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from packaging.version import Version, parse from packaging.version import Version, parse
from scipy import spatial from scipy import spatial
from scipy.sparse import csgraph from scipy.sparse import csgraph
@ -769,6 +769,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("base_network") snakemake = mock_snakemake("base_network")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = base_network( n = base_network(
snakemake.input.eg_buses, snakemake.input.eg_buses,

View File

@ -8,6 +8,7 @@ Build historical annual ammonia production per country in ktonNH3/a.
import country_converter as coco import country_converter as coco
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
cc = coco.CountryConverter() cc = coco.CountryConverter()
@ -18,6 +19,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_ammonia_production") snakemake = mock_snakemake("build_ammonia_production")
set_scenario_config(snakemake)
ammonia = pd.read_excel( ammonia = pd.read_excel(
snakemake.input.usgs, snakemake.input.usgs,
sheet_name="T12", sheet_name="T12",

View File

@ -16,6 +16,8 @@ import pandas as pd
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050] AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]
from _helpers import configure_logging, set_scenario_config
def build_nuts_population_data(year=2013): def build_nuts_population_data(year=2013):
pop = pd.read_csv( pop = pd.read_csv(
@ -221,6 +223,9 @@ if __name__ == "__main__":
planning_horizons=2050, planning_horizons=2050,
) )
configure_logging(snakemake)
set_scenario_config(snakemake)
overnight = snakemake.config["foresight"] == "overnight" overnight = snakemake.config["foresight"] == "overnight"
params = snakemake.params.biomass params = snakemake.params.biomass
investment_year = int(snakemake.wildcards.planning_horizons) investment_year = int(snakemake.wildcards.planning_horizons)

View File

@ -47,7 +47,7 @@ import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import REGION_COLS, configure_logging from _helpers import REGION_COLS, configure_logging, set_scenario_config
from scipy.spatial import Voronoi from scipy.spatial import Voronoi
from shapely.geometry import Polygon from shapely.geometry import Polygon
@ -115,6 +115,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_bus_regions") snakemake = mock_snakemake("build_bus_regions")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
countries = snakemake.params.countries countries = snakemake.params.countries

View File

@ -11,6 +11,7 @@ import atlite
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import set_scenario_config
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
@ -22,6 +23,8 @@ if __name__ == "__main__":
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
cutout = atlite.Cutout(snakemake.input.cutout) cutout = atlite.Cutout(snakemake.input.cutout)
clustered_regions = ( clustered_regions = (

View File

@ -14,6 +14,7 @@ https://doi.org/10.1039/C2EE22653G.
""" """
import xarray as xr import xarray as xr
from _helpers import set_scenario_config
def coefficient_of_performance(delta_T, source="air"): def coefficient_of_performance(delta_T, source="air"):
@ -35,6 +36,8 @@ if __name__ == "__main__":
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
for area in ["total", "urban", "rural"]: for area in ["total", "urban", "rural"]:
for source in ["air", "soil"]: for source in ["air", "soil"]:
source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"]) source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"])

View File

@ -8,7 +8,7 @@ import logging
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from entsoe import EntsoePandasClient from entsoe import EntsoePandasClient
from entsoe.exceptions import InvalidBusinessParameterError, NoMatchingDataError from entsoe.exceptions import InvalidBusinessParameterError, NoMatchingDataError
from requests import HTTPError from requests import HTTPError
@ -21,6 +21,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_cross_border_flows") snakemake = mock_snakemake("build_cross_border_flows")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"] api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key) client = EntsoePandasClient(api_key=api_key)

View File

@ -95,7 +95,7 @@ import logging
import atlite import atlite
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -105,6 +105,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5") snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
cutout_params = snakemake.params.cutouts[snakemake.wildcards.cutout] cutout_params = snakemake.params.cutouts[snakemake.wildcards.cutout]

View File

@ -11,6 +11,7 @@ import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import set_scenario_config
from dask.distributed import Client, LocalCluster from dask.distributed import Client, LocalCluster
if __name__ == "__main__": if __name__ == "__main__":
@ -23,6 +24,7 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads) nprocesses = int(snakemake.threads)
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)

View File

@ -9,6 +9,7 @@ Build district heat shares at each node, depending on investment year.
import logging import logging
import pandas as pd import pandas as pd
from _helpers import configure_logging, set_scenario_config
from prepare_sector_network import get from prepare_sector_network import get
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -24,6 +25,8 @@ if __name__ == "__main__":
clusters=48, clusters=48,
planning_horizons="2050", planning_horizons="2050",
) )
configure_logging(snakemake)
set_scenario_config(snakemake)
investment_year = int(snakemake.wildcards.planning_horizons[-4:]) investment_year = int(snakemake.wildcards.planning_horizons[-4:])

View File

@ -39,7 +39,7 @@ import logging
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from pandas import Timedelta as Delta from pandas import Timedelta as Delta
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -261,6 +261,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_electricity_demand") snakemake = mock_snakemake("build_electricity_demand")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
interpolate_limit = snakemake.params.load["interpolate_limit"] interpolate_limit = snakemake.params.load["interpolate_limit"]
countries = snakemake.params.countries countries = snakemake.params.countries

View File

@ -7,7 +7,7 @@
import logging import logging
import pandas as pd import pandas as pd
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from entsoe import EntsoePandasClient from entsoe import EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError from entsoe.exceptions import NoMatchingDataError
@ -19,6 +19,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_cross_border_flows") snakemake = mock_snakemake("build_cross_border_flows")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"] api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key) client = EntsoePandasClient(api_key=api_key)

View File

@ -7,7 +7,7 @@
import logging import logging
import pandas as pd import pandas as pd
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from entsoe import EntsoePandasClient from entsoe import EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError from entsoe.exceptions import NoMatchingDataError
@ -39,6 +39,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_electricity_production") snakemake = mock_snakemake("build_electricity_production")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"] api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key) client = EntsoePandasClient(api_key=api_key)

View File

@ -14,7 +14,7 @@ import country_converter as coco
import geopandas as gpd import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from _helpers import mute_print from _helpers import configure_logging, mute_print, set_scenario_config
from tqdm import tqdm from tqdm import tqdm
cc = coco.CountryConverter() cc = coco.CountryConverter()
@ -743,7 +743,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_energy_totals") snakemake = mock_snakemake("build_energy_totals")
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params.energy params = snakemake.params.energy

View File

@ -9,6 +9,7 @@ horizon.
import country_converter as coco import country_converter as coco
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
cc = coco.CountryConverter() cc = coco.CountryConverter()
@ -126,5 +127,6 @@ if __name__ == "__main__":
clusters=48, clusters=48,
planning_horizons=2050, planning_horizons=2050,
) )
set_scenario_config(snakemake)
build_existing_heating() build_existing_heating()

View File

@ -11,6 +11,7 @@ import logging
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import configure_logging, set_scenario_config
from cluster_gas_network import load_bus_regions from cluster_gas_network import load_bus_regions
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -134,7 +135,8 @@ if __name__ == "__main__":
clusters="128", clusters="128",
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
regions = load_bus_regions( regions = load_bus_regions(
snakemake.input.regions_onshore, snakemake.input.regions_offshore snakemake.input.regions_onshore, snakemake.input.regions_offshore

View File

@ -11,6 +11,7 @@ import logging
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import configure_logging, set_scenario_config
from pypsa.geo import haversine_pts from pypsa.geo import haversine_pts
from shapely.geometry import Point from shapely.geometry import Point
@ -143,7 +144,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_gas_network") snakemake = mock_snakemake("build_gas_network")
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
gas_network = load_dataset(snakemake.input.gas_network) gas_network = load_dataset(snakemake.input.gas_network)

View File

@ -10,7 +10,7 @@ from itertools import product
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import generate_periodic_profiles from _helpers import generate_periodic_profiles, set_scenario_config
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
@ -22,6 +22,7 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) snapshots = pd.date_range(freq="h", **snakemake.params.snapshots)

View File

@ -65,7 +65,7 @@ import atlite
import country_converter as coco import country_converter as coco
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
cc = coco.CountryConverter() cc = coco.CountryConverter()
@ -131,6 +131,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_hydro_profile") snakemake = mock_snakemake("build_hydro_profile")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
params_hydro = snakemake.params.hydro params_hydro = snakemake.params.hydro
cutout = atlite.Cutout(snakemake.input.cutout) cutout = atlite.Cutout(snakemake.input.cutout)

View File

@ -13,6 +13,7 @@ from itertools import product
import country_converter as coco import country_converter as coco
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
cc = coco.CountryConverter() cc = coco.CountryConverter()
@ -148,8 +149,8 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=128, clusters=128,
) )
configure_logging(snakemake)
logging.basicConfig(level=snakemake.config["logging"]["level"]) set_scenario_config(snakemake)
countries = snakemake.params.countries countries = snakemake.params.countries

View File

@ -11,6 +11,7 @@ from functools import partial
import country_converter as coco import country_converter as coco
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
from tqdm import tqdm from tqdm import tqdm
cc = coco.CountryConverter() cc = coco.CountryConverter()
@ -174,6 +175,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today") snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today")
set_scenario_config(snakemake)
params = snakemake.params.industry params = snakemake.params.industry
year = params.get("reference_year", 2015) year = params.get("reference_year", 2015)

View File

@ -7,6 +7,7 @@ Build industrial energy demand per model region.
""" """
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
@ -18,6 +19,7 @@ if __name__ == "__main__":
clusters=48, clusters=48,
planning_horizons=2030, planning_horizons=2030,
) )
set_scenario_config(snakemake)
# import ratios # import ratios
fn = snakemake.input.industry_sector_ratios fn = snakemake.input.industry_sector_ratios

View File

@ -10,6 +10,7 @@ from itertools import product
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
# map JRC/our sectors to hotmaps sector, where mapping exist # map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = { sector_mapping = {
@ -75,5 +76,6 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
build_nodal_industrial_energy_demand() build_nodal_industrial_energy_demand()

View File

@ -13,7 +13,7 @@ from functools import partial
import country_converter as coco import country_converter as coco
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from _helpers import mute_print from _helpers import configure_logging, mute_print, set_scenario_config
from tqdm import tqdm from tqdm import tqdm
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -278,8 +278,8 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake("build_industrial_production_per_country") snakemake = mock_snakemake("build_industrial_production_per_country")
configure_logging(snakemake)
logging.basicConfig(level=snakemake.config["logging"]["level"]) set_scenario_config(snakemake)
countries = snakemake.params.countries countries = snakemake.params.countries

View File

@ -7,6 +7,7 @@ Build future industrial production per country.
""" """
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
from prepare_sector_network import get from prepare_sector_network import get
if __name__ == "__main__": if __name__ == "__main__":
@ -14,6 +15,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow") snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow")
set_scenario_config(snakemake)
params = snakemake.params.industry params = snakemake.params.industry

View File

@ -9,6 +9,7 @@ Build industrial production per model region.
from itertools import product from itertools import product
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
# map JRC/our sectors to hotmaps sector, where mapping exist # map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = { sector_mapping = {
@ -72,5 +73,6 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
build_nodal_industrial_production() build_nodal_industrial_production()

View File

@ -7,7 +7,7 @@ Build specific energy consumption by carrier and industries.
""" """
import pandas as pd import pandas as pd
from _helpers import mute_print from _helpers import mute_print, set_scenario_config
# GWh/ktoe OR MWh/toe # GWh/ktoe OR MWh/toe
toe_to_MWh = 11.630 toe_to_MWh = 11.630
@ -1464,6 +1464,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake("build_industry_sector_ratios") snakemake = mock_snakemake("build_industry_sector_ratios")
set_scenario_config(snakemake)
# TODO make params option # TODO make params option
year = 2015 year = 2015

View File

@ -58,7 +58,7 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import xarray as xr import xarray as xr
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from shapely.geometry import LineString as Line from shapely.geometry import LineString as Line
from shapely.geometry import Point from shapely.geometry import Point
@ -144,6 +144,7 @@ if __name__ == "__main__":
opts="Co2L-4H", opts="Co2L-4H",
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
snapshots = snakemake.params.snapshots snapshots = snakemake.params.snapshots

View File

@ -43,7 +43,7 @@ Data was accessed at 16.5.2023
import logging import logging
import pandas as pd import pandas as pd
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -111,6 +111,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_monthly_prices") snakemake = mock_snakemake("build_monthly_prices")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
fuel_price = get_fuel_price() fuel_price = get_fuel_price()
fuel_price.to_csv(snakemake.output.fuel_price) fuel_price.to_csv(snakemake.output.fuel_price)

View File

@ -46,7 +46,7 @@ import logging
import atlite import atlite
import geopandas as gpd import geopandas as gpd
import rasterio as rio import rasterio as rio
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from rasterio.features import geometry_mask from rasterio.features import geometry_mask
from rasterio.warp import transform_bounds from rasterio.warp import transform_bounds
@ -92,6 +92,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_natura_raster") snakemake = mock_snakemake("build_natura_raster")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
cutouts = snakemake.input.cutouts cutouts = snakemake.input.cutouts
xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts))

View File

@ -13,6 +13,7 @@ import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -22,7 +23,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_population_layouts") snakemake = mock_snakemake("build_population_layouts")
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
cutout = atlite.Cutout(snakemake.input.cutout) cutout = atlite.Cutout(snakemake.input.cutout)

View File

@ -7,6 +7,7 @@ Distribute country-level energy demands by population.
""" """
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
@ -17,6 +18,7 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)

View File

@ -91,7 +91,7 @@ import numpy as np
import pandas as pd import pandas as pd
import powerplantmatching as pm import powerplantmatching as pm
import pypsa import pypsa
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from powerplantmatching.export import map_country_bus from powerplantmatching.export import map_country_bus
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -165,6 +165,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_powerplants") snakemake = mock_snakemake("build_powerplants")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.base_network) n = pypsa.Network(snakemake.input.base_network)
countries = snakemake.params.countries countries = snakemake.params.countries

View File

@ -188,7 +188,7 @@ import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from dask.distributed import Client from dask.distributed import Client
from pypsa.geo import haversine from pypsa.geo import haversine
from shapely.geometry import LineString from shapely.geometry import LineString
@ -202,6 +202,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_renewable_profiles", technology="offwind-dc") snakemake = mock_snakemake("build_renewable_profiles", technology="offwind-dc")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads) nprocesses = int(snakemake.threads)
noprogress = snakemake.config["run"].get("disable_progressbar", True) noprogress = snakemake.config["run"].get("disable_progressbar", True)

View File

@ -68,6 +68,7 @@ The script has the following structure:
""" """
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import set_scenario_config
# (i) --- FIXED PARAMETER / STANDARD VALUES ----------------------------------- # (i) --- FIXED PARAMETER / STANDARD VALUES -----------------------------------
@ -1053,6 +1054,7 @@ if __name__ == "__main__":
ll="v1.0", ll="v1.0",
sector_opts="Co2L0-168H-T-H-B-I-solar3-dist1", sector_opts="Co2L0-168H-T-H-B-I-solar3-dist1",
) )
set_scenario_config(snakemake)
# ******** config ********************************************************* # ******** config *********************************************************

View File

@ -24,6 +24,7 @@ onshore (>50km from sea), offshore (Figure 7).
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
def concat_gdf(gdf_list, crs="EPSG:4326"): def concat_gdf(gdf_list, crs="EPSG:4326"):
@ -77,6 +78,8 @@ if __name__ == "__main__":
"build_salt_cavern_potentials", simpl="", clusters="37" "build_salt_cavern_potentials", simpl="", clusters="37"
) )
set_scenario_config(snakemake)
fn_onshore = snakemake.input.regions_onshore fn_onshore = snakemake.input.regions_onshore
fn_offshore = snakemake.input.regions_offshore fn_offshore = snakemake.input.regions_offshore

View File

@ -10,6 +10,7 @@ database_en>`_.
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
def area(gdf): def area(gdf):
@ -39,6 +40,8 @@ if __name__ == "__main__":
"build_sequestration_potentials", simpl="", clusters="181" "build_sequestration_potentials", simpl="", clusters="181"
) )
set_scenario_config(snakemake)
cf = snakemake.params.sequestration_potential cf = snakemake.params.sequestration_potential
gdf = gpd.read_file(snakemake.input.sequestration_potential[0]) gdf = gpd.read_file(snakemake.input.sequestration_potential[0])

View File

@ -77,7 +77,7 @@ import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pycountry as pyc import pycountry as pyc
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from shapely.geometry import MultiPolygon, Polygon from shapely.geometry import MultiPolygon, Polygon
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -254,6 +254,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_shapes") snakemake = mock_snakemake("build_shapes")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
country_shapes = countries(snakemake.input.naturalearth, snakemake.params.countries) country_shapes = countries(snakemake.input.naturalearth, snakemake.params.countries)
country_shapes.reset_index().to_file(snakemake.output.country_shapes) country_shapes.reset_index().to_file(snakemake.output.country_shapes)

View File

@ -46,7 +46,7 @@ import zipfile
from pathlib import Path from pathlib import Path
import rioxarray import rioxarray
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from build_natura_raster import determine_cutout_xXyY from build_natura_raster import determine_cutout_xXyY
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -57,6 +57,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_ship_raster") snakemake = mock_snakemake("build_ship_raster")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
cutouts = snakemake.input.cutouts cutouts = snakemake.input.cutouts
xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts))

View File

@ -11,6 +11,7 @@ import json
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import set_scenario_config
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
@ -21,6 +22,7 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
scope = gpd.read_file(snakemake.input.scope).geometry[0] scope = gpd.read_file(snakemake.input.scope).geometry[0]
regions = gpd.read_file(snakemake.input.regions).set_index("name") regions = gpd.read_file(snakemake.input.regions).set_index("name")

View File

@ -11,6 +11,7 @@ import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import set_scenario_config
from dask.distributed import Client, LocalCluster from dask.distributed import Client, LocalCluster
if __name__ == "__main__": if __name__ == "__main__":
@ -22,6 +23,7 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads) nprocesses = int(snakemake.threads)
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)

View File

@ -11,6 +11,7 @@ import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import set_scenario_config
from dask.distributed import Client, LocalCluster from dask.distributed import Client, LocalCluster
if __name__ == "__main__": if __name__ == "__main__":
@ -22,6 +23,7 @@ if __name__ == "__main__":
simpl="", simpl="",
clusters=48, clusters=48,
) )
set_scenario_config(snakemake)
nprocesses = int(snakemake.threads) nprocesses = int(snakemake.threads)
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)

View File

@ -13,7 +13,7 @@ import logging
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from _helpers import configure_logging, generate_periodic_profiles from _helpers import configure_logging, generate_periodic_profiles, set_scenario_config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -171,6 +171,7 @@ if __name__ == "__main__":
clusters=48, clusters=48,
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)

View File

@ -10,6 +10,7 @@ import logging
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import configure_logging, set_scenario_config
from pypsa.geo import haversine_pts from pypsa.geo import haversine_pts
from shapely import wkt from shapely import wkt
@ -105,8 +106,8 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37") snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37")
configure_logging(snakemake)
logging.basicConfig(level=snakemake.config["logging"]["level"]) set_scenario_config(snakemake)
fn = snakemake.input.cleaned_gas_network fn = snakemake.input.cleaned_gas_network
df = pd.read_csv(fn, index_col=0) df = pd.read_csv(fn, index_col=0)

View File

@ -133,7 +133,7 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import seaborn as sns import seaborn as sns
from _helpers import configure_logging, update_p_nom_max from _helpers import configure_logging, set_scenario_config, update_p_nom_max
from add_electricity import load_costs from add_electricity import load_costs
from packaging.version import Version, parse from packaging.version import Version, parse
from pypsa.clustering.spatial import ( from pypsa.clustering.spatial import (
@ -456,6 +456,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("cluster_network", simpl="", clusters="37") snakemake = mock_snakemake("cluster_network", simpl="", clusters="37")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
params = snakemake.params params = snakemake.params
solver_name = snakemake.config["solving"]["solver"]["name"] solver_name = snakemake.config["solving"]["solver"]["name"]

View File

@ -8,6 +8,7 @@ Copy used configuration files and important scripts for archiving.
import yaml import yaml
from _helpers import set_scenario_config
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
@ -15,6 +16,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("copy_config") snakemake = mock_snakemake("copy_config")
set_scenario_config(snakemake)
with open(snakemake.output[0], "w") as yaml_file: with open(snakemake.output[0], "w") as yaml_file:
yaml.dump( yaml.dump(
snakemake.config, snakemake.config,

View File

@ -15,7 +15,7 @@ import fiona
import geopandas as gpd import geopandas as gpd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from atlite.gis import shape_availability from atlite.gis import shape_availability
from rasterio.plot import show from rasterio.plot import show
@ -38,6 +38,7 @@ if __name__ == "__main__":
"determine_availability_matrix_MD_UA", technology="solar" "determine_availability_matrix_MD_UA", technology="solar"
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
nprocesses = None # snakemake.config["atlite"].get("nprocesses") nprocesses = None # snakemake.config["atlite"].get("nprocesses")
noprogress = not snakemake.config["atlite"].get("show_progress", True) noprogress = not snakemake.config["atlite"].get("show_progress", True)

View File

@ -13,6 +13,7 @@ import sys
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging, set_scenario_config
from prepare_sector_network import prepare_costs from prepare_sector_network import prepare_costs
idx = pd.IndexSlice idx = pd.IndexSlice
@ -673,7 +674,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("make_summary") snakemake = mock_snakemake("make_summary")
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
networks_dict = { networks_dict = {
(cluster, ll, opt + sector_opt, planning_horizon): "results/" (cluster, ll, opt + sector_opt, planning_horizon): "results/"

View File

@ -12,6 +12,7 @@ other metrics.
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import set_scenario_config
from make_summary import calculate_cfs # noqa: F401 from make_summary import calculate_cfs # noqa: F401
from make_summary import calculate_nodal_cfs # noqa: F401 from make_summary import calculate_nodal_cfs # noqa: F401
from make_summary import calculate_nodal_costs # noqa: F401 from make_summary import calculate_nodal_costs # noqa: F401
@ -722,6 +723,7 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake("make_summary_perfect") snakemake = mock_snakemake("make_summary_perfect")
set_scenario_config(snakemake)
run = snakemake.config["run"]["name"] run = snakemake.config["run"]["name"]
if run != "": if run != "":

View File

@ -13,7 +13,7 @@ import geopandas as gpd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from plot_power_network import assign_location, load_projection from plot_power_network import assign_location, load_projection
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
@ -237,6 +237,7 @@ if __name__ == "__main__":
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)

View File

@ -13,7 +13,7 @@ import geopandas as gpd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from plot_power_network import assign_location, load_projection from plot_power_network import assign_location, load_projection
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
@ -257,6 +257,7 @@ if __name__ == "__main__":
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)

View File

@ -14,7 +14,7 @@ import geopandas as gpd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from plot_summary import preferred_order, rename_techs from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
@ -257,6 +257,7 @@ if __name__ == "__main__":
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)

View File

@ -6,10 +6,10 @@
Plot clustered electricity transmission network. Plot clustered electricity transmission network.
""" """
import cartopy.crs as ccrs
import geopandas as gpd import geopandas as gpd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pypsa import pypsa
from _helpers import set_scenario_config
from matplotlib.lines import Line2D from matplotlib.lines import Line2D
from plot_power_network import load_projection from plot_power_network import load_projection
from pypsa.plot import add_legend_lines from pypsa.plot import add_legend_lines
@ -23,6 +23,7 @@ if __name__ == "__main__":
clusters=128, clusters=128,
configfiles=["../../config/config.test.yaml"], configfiles=["../../config/config.test.yaml"],
) )
set_scenario_config(snakemake)
lw_factor = 2e3 lw_factor = 2e3

View File

@ -13,7 +13,7 @@ import geopandas as gpd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from plot_power_network import assign_location, load_projection, rename_techs_tyndp from plot_power_network import assign_location, load_projection, rename_techs_tyndp
from plot_summary import preferred_order from plot_summary import preferred_order
from pypsa.plot import add_legend_circles, add_legend_lines from pypsa.plot import add_legend_circles, add_legend_lines
@ -184,6 +184,7 @@ if __name__ == "__main__":
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)

View File

@ -7,7 +7,7 @@
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pypsa import pypsa
import seaborn as sns import seaborn as sns
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
sns.set_theme("paper", style="whitegrid") sns.set_theme("paper", style="whitegrid")
@ -24,6 +24,7 @@ if __name__ == "__main__":
ll="v1.0", ll="v1.0",
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
@ -58,7 +59,7 @@ if __name__ == "__main__":
fig, ax = plt.subplots() fig, ax = plt.subplots()
ds = n.statistics.installed_capacity().dropna() ds = n.statistics.installed_capacity().dropna()
ds = ds.drop("Line") ds = ds.drop("Line")
ds = ds.drop(("Generator", "Load")) ds = ds.drop(("Generator", "Load"), errors="ignore")
ds = ds / 1e3 ds = ds / 1e3
ds.attrs["unit"] = "GW" ds.attrs["unit"] = "GW"
plot_static_per_carrier(ds, ax) plot_static_per_carrier(ds, ax)
@ -67,7 +68,7 @@ if __name__ == "__main__":
fig, ax = plt.subplots() fig, ax = plt.subplots()
ds = n.statistics.optimal_capacity() ds = n.statistics.optimal_capacity()
ds = ds.drop("Line") ds = ds.drop("Line")
ds = ds.drop(("Generator", "Load")) ds = ds.drop(("Generator", "Load"), errors="ignore")
ds = ds / 1e3 ds = ds / 1e3
ds.attrs["unit"] = "GW" ds.attrs["unit"] = "GW"
plot_static_per_carrier(ds, ax) plot_static_per_carrier(ds, ax)

View File

@ -11,6 +11,7 @@ import logging
import matplotlib.gridspec as gridspec import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
from _helpers import configure_logging, set_scenario_config
from prepare_sector_network import co2_emissions_year from prepare_sector_network import co2_emissions_year
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -427,13 +428,13 @@ def historical_emissions(countries):
) )
emissions = co2_totals.loc["electricity"] emissions = co2_totals.loc["electricity"]
if "T" in opts: if options["transport"]:
emissions += co2_totals.loc[[i + " non-elec" for i in ["rail", "road"]]].sum() emissions += co2_totals.loc[[i + " non-elec" for i in ["rail", "road"]]].sum()
if "H" in opts: if options["heating"]:
emissions += co2_totals.loc[ emissions += co2_totals.loc[
[i + " non-elec" for i in ["residential", "services"]] [i + " non-elec" for i in ["residential", "services"]]
].sum() ].sum()
if "I" in opts: if options["industry"]:
emissions += co2_totals.loc[ emissions += co2_totals.loc[
[ [
"industrial non-elec", "industrial non-elec",
@ -447,7 +448,7 @@ def historical_emissions(countries):
return emissions return emissions
def plot_carbon_budget_distribution(input_eurostat): def plot_carbon_budget_distribution(input_eurostat, options):
""" """
Plot historical carbon emissions in the EU and decarbonization path. Plot historical carbon emissions in the EU and decarbonization path.
""" """
@ -469,7 +470,7 @@ def plot_carbon_budget_distribution(input_eurostat):
e_1990 = co2_emissions_year( e_1990 = co2_emissions_year(
countries, countries,
input_eurostat, input_eurostat,
opts, options,
emissions_scope, emissions_scope,
report_year, report_year,
input_co2, input_co2,
@ -572,7 +573,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("plot_summary") snakemake = mock_snakemake("plot_summary")
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
n_header = 4 n_header = 4
@ -582,7 +584,9 @@ if __name__ == "__main__":
plot_balances() plot_balances()
for sector_opts in snakemake.params.sector_opts: co2_budget = snakemake.params["co2_budget"]
opts = sector_opts.split("-") if (
if any("cb" in o for o in opts) or snakemake.config["foresight"] == "perfect": isinstance(co2_budget, str) and co2_budget.startswith("cb")
plot_carbon_budget_distribution(snakemake.input.eurostat) ) or snakemake.params["foresight"] == "perfect":
options = snakemake.params.sector
plot_carbon_budget_distribution(snakemake.input.eurostat, options)

View File

@ -9,7 +9,7 @@ import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import pypsa import pypsa
import seaborn as sns import seaborn as sns
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
sns.set_theme("paper", style="whitegrid") sns.set_theme("paper", style="whitegrid")
@ -187,6 +187,7 @@ if __name__ == "__main__":
ll="v1.0", ll="v1.0",
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
countries = snakemake.params.countries countries = snakemake.params.countries

View File

@ -8,7 +8,7 @@ import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import pypsa import pypsa
import seaborn as sns import seaborn as sns
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
sns.set_theme("paper", style="whitegrid") sns.set_theme("paper", style="whitegrid")
@ -24,6 +24,7 @@ if __name__ == "__main__":
ll="v1.0", ll="v1.0",
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load" n.loads.carrier = "load"

View File

@ -8,7 +8,7 @@ import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import pypsa import pypsa
import seaborn as sns import seaborn as sns
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
from pypsa.statistics import get_bus_and_carrier from pypsa.statistics import get_bus_and_carrier
sns.set_theme("paper", style="whitegrid") sns.set_theme("paper", style="whitegrid")
@ -35,6 +35,7 @@ if __name__ == "__main__":
ll="v1.0", ll="v1.0",
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load" n.loads.carrier = "load"

View File

@ -40,7 +40,7 @@ Description
import logging import logging
import pandas as pd import pandas as pd
from _helpers import configure_logging from _helpers import configure_logging, set_scenario_config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -69,6 +69,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("prepare_links_p_nom", simpl="") snakemake = mock_snakemake("prepare_links_p_nom", simpl="")
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
links_p_nom = pd.read_html( links_p_nom = pd.read_html(
"https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol" "https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol"

View File

@ -62,7 +62,11 @@ import logging
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging, find_opt, get_opt from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_electricity import load_costs, update_transmission_costs from add_electricity import load_costs, update_transmission_costs
from pypsa.descriptors import expand_series from pypsa.descriptors import expand_series
@ -71,6 +75,28 @@ idx = pd.IndexSlice
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def maybe_adjust_costs_and_potentials(n, adjustments):
if not adjustments:
return
for attr, carrier_factor in adjustments.items():
for carrier, factor in carrier_factor.items():
# beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan
if carrier == "AC": # lines do not have carrier
n.lines[attr] *= factor
continue
comps = {
"p_nom_max": {"Generator", "Link", "StorageUnit"},
"e_nom_max": {"Store"},
"capital_cost": {"Generator", "Link", "StorageUnit", "Store"},
"marginal_cost": {"Generator", "Link", "StorageUnit", "Store"},
}
for c in n.iterate_components(comps[attr]):
sel = c.df.index[c.df.carrier == carrier]
c.df.loc[sel, attr] *= factor
logger.info(f"changing {attr} for {carrier} by factor {factor}")
def add_co2limit(n, co2limit, Nyears=1.0): def add_co2limit(n, co2limit, Nyears=1.0):
n.add( n.add(
"GlobalConstraint", "GlobalConstraint",
@ -265,7 +291,7 @@ def set_line_nom_max(
n.lines["s_nom_max"] = n.lines["s_nom"] + s_nom_max_ext n.lines["s_nom_max"] = n.lines["s_nom"] + s_nom_max_ext
if np.isfinite(p_nom_max_ext) and p_nom_max_ext > 0: if np.isfinite(p_nom_max_ext) and p_nom_max_ext > 0:
logger.info(f"Limiting line extensions to {p_nom_max_ext} MW") logger.info(f"Limiting link extensions to {p_nom_max_ext} MW")
hvdc = n.links.index[n.links.carrier == "DC"] hvdc = n.links.index[n.links.carrier == "DC"]
n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext
@ -278,11 +304,11 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
"prepare_network", simpl="", clusters="37", ll="v1.0", opts="Ept" "prepare_network", simpl="", clusters="37", ll="v1.0", opts="Co2L-4H"
) )
configure_logging(snakemake) configure_logging(snakemake)
set_scenario_config(snakemake)
opts = snakemake.wildcards.opts.split("-") update_config_from_wildcards(snakemake.config, snakemake.wildcards)
n = pypsa.Network(snakemake.input[0]) n = pypsa.Network(snakemake.input[0])
Nyears = n.snapshot_weightings.objective.sum() / 8760.0 Nyears = n.snapshot_weightings.objective.sum() / 8760.0
@ -296,78 +322,32 @@ if __name__ == "__main__":
set_line_s_max_pu(n, snakemake.params.lines["s_max_pu"]) set_line_s_max_pu(n, snakemake.params.lines["s_max_pu"])
# temporal averaging # temporal averaging
nhours_config = snakemake.params.snapshots.get("resolution", False) time_resolution = snakemake.params.time_resolution
nhours_wildcard = get_opt(opts, r"^\d+h$") is_string = isinstance(time_resolution, str)
nhours = nhours_wildcard or nhours_config if is_string and time_resolution.lower().endswith("h"):
if nhours: n = average_every_nhours(n, time_resolution)
n = average_every_nhours(n, nhours)
# segments with package tsam # segments with package tsam
time_seg_config = snakemake.params.snapshots.get("segmentation", False) if is_string and time_resolution.lower().endswith("seg"):
time_seg_wildcard = get_opt(opts, r"^\d+seg$")
time_seg = time_seg_wildcard or time_seg_config
if time_seg:
solver_name = snakemake.config["solving"]["solver"]["name"] solver_name = snakemake.config["solving"]["solver"]["name"]
n = apply_time_segmentation(n, time_seg.replace("seg", ""), solver_name) segments = int(time_resolution.replace("seg", ""))
n = apply_time_segmentation(n, segments, solver_name)
Co2L_config = snakemake.params.co2limit_enable if snakemake.params.co2limit_enable:
Co2L_wildcard, co2limit_wildcard = find_opt(opts, "Co2L")
if Co2L_wildcard or Co2L_config:
if co2limit_wildcard is not None:
co2limit = co2limit_wildcard * snakemake.params.co2base
add_co2limit(n, co2limit, Nyears)
logger.info("Setting CO2 limit according to wildcard value.")
else:
add_co2limit(n, snakemake.params.co2limit, Nyears) add_co2limit(n, snakemake.params.co2limit, Nyears)
logger.info("Setting CO2 limit according to config value.")
CH4L_config = snakemake.params.gaslimit_enable if snakemake.params.gaslimit_enable:
CH4L_wildcard, gaslimit_wildcard = find_opt(opts, "CH4L")
if CH4L_wildcard or CH4L_config:
if gaslimit_wildcard is not None:
gaslimit = gaslimit_wildcard * 1e6
add_gaslimit(n, gaslimit, Nyears)
logger.info("Setting gas usage limit according to wildcard value.")
else:
add_gaslimit(n, snakemake.params.gaslimit, Nyears) add_gaslimit(n, snakemake.params.gaslimit, Nyears)
logger.info("Setting gas usage limit according to config value.")
for o in opts: maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"])
if "+" not in o:
continue
oo = o.split("+")
suptechs = map(lambda c: c.split("-", 2)[0], n.carriers.index)
if oo[0].startswith(tuple(suptechs)):
carrier = oo[0]
# handles only p_nom_max as stores and lines have no potentials
attr_lookup = {"p": "p_nom_max", "c": "capital_cost", "m": "marginal_cost"}
attr = attr_lookup[oo[1][0]]
factor = float(oo[1][1:])
if carrier == "AC": # lines do not have carrier
n.lines[attr] *= factor
else:
comps = {"Generator", "Link", "StorageUnit", "Store"}
for c in n.iterate_components(comps):
sel = c.df.carrier.str.contains(carrier)
c.df.loc[sel, attr] *= factor
emission_prices = snakemake.params.costs["emission_prices"] emission_prices = snakemake.params.costs["emission_prices"]
Ept_config = emission_prices.get("co2_monthly_prices", False) if emission_prices["co2_monthly_prices"]:
Ept_wildcard = "Ept" in opts
Ep_config = emission_prices.get("enable", False)
Ep_wildcard, co2_wildcard = find_opt(opts, "Ep")
if Ept_wildcard or Ept_config:
logger.info( logger.info(
"Setting time dependent emission prices according spot market price" "Setting time dependent emission prices according spot market price"
) )
add_dynamic_emission_prices(n) add_dynamic_emission_prices(n)
elif Ep_wildcard or Ep_config: elif emission_prices["enable"]:
if co2_wildcard is not None:
logger.info("Setting CO2 prices according to wildcard value.")
add_emission_prices(n, dict(co2=co2_wildcard))
else:
logger.info("Setting CO2 prices according to config value.")
add_emission_prices( add_emission_prices(
n, dict(co2=snakemake.params.costs["emission_prices"]["co2"]) n, dict(co2=snakemake.params.costs["emission_prices"]["co2"])
) )
@ -383,11 +363,8 @@ if __name__ == "__main__":
p_nom_max_ext=snakemake.params.links.get("max_extension", np.inf), p_nom_max_ext=snakemake.params.links.get("max_extension", np.inf),
) )
autarky_config = snakemake.params.autarky if snakemake.params.autarky["enable"]:
if "ATK" in opts or autarky_config.get("enable", False): only_crossborder = snakemake.params.autarky["by_country"]
only_crossborder = False
if "ATKc" in opts or autarky_config.get("by_country", False):
only_crossborder = True
enforce_autarky(n, only_crossborder=only_crossborder) enforce_autarky(n, only_crossborder=only_crossborder)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))

View File

@ -12,7 +12,11 @@ import re
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import update_config_with_sector_opts from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_existing_baseyear import add_build_year_to_new_assets from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.descriptors import expand_series from pypsa.descriptors import expand_series
from pypsa.io import import_components_from_dataframe from pypsa.io import import_components_from_dataframe
@ -304,17 +308,14 @@ def set_all_phase_outs(n):
n.mremove("Link", remove_i) n.mremove("Link", remove_i)
def set_carbon_constraints(n, opts): def set_carbon_constraints(n):
""" """
Add global constraints for carbon emissions. Add global constraints for carbon emissions.
""" """
budget = None budget = snakemake.config["co2_budget"]
for o in opts: if budget and isinstance(budget, float):
# other budgets budget *= 1e9 # convert to t CO2
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
if m is not None:
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
if budget is not None:
logger.info(f"add carbon budget of {budget}") logger.info(f"add carbon budget of {budget}")
n.add( n.add(
"GlobalConstraint", "GlobalConstraint",
@ -341,7 +342,7 @@ def set_carbon_constraints(n, opts):
) )
# set minimum CO2 emission constraint to avoid too fast reduction # set minimum CO2 emission constraint to avoid too fast reduction
if "co2min" in opts: if "co2min" in snakemake.wildcards.sector_opts.split("-"):
emissions_1990 = 4.53693 emissions_1990 = 4.53693
emissions_2019 = 3.344096 emissions_2019 = 3.344096
target_2030 = 0.45 * emissions_1990 target_2030 = 0.45 * emissions_1990
@ -487,21 +488,6 @@ def apply_time_segmentation_perfect(
return n return n
def set_temporal_aggregation_SEG(n, opts, solver_name):
"""
Aggregate network temporally with tsam.
"""
for o in opts:
# segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
segments = int(m[1])
logger.info(f"Use temporal segmentation with {segments} segments")
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
break
return n
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -514,15 +500,13 @@ if __name__ == "__main__":
ll="v1.5", ll="v1.5",
sector_opts="1p7-4380H-T-H-B-I-A-dist1", sector_opts="1p7-4380H-T-H-B-I-A-dist1",
) )
configure_logging(snakemake)
set_scenario_config(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_from_wildcards(snakemake.config, snakemake.wildcards)
# parameters ----------------------------------------------------------- # parameters -----------------------------------------------------------
years = snakemake.config["scenario"]["planning_horizons"] years = snakemake.config["scenario"]["planning_horizons"]
opts = snakemake.wildcards.sector_opts.split("-") social_discountrate = snakemake.params.costs["social_discountrate"]
social_discountrate = snakemake.config["costs"]["social_discountrate"]
for o in opts:
if "sdr" in o:
social_discountrate = float(o.replace("sdr", "")) / 100
logger.info( logger.info(
f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%" f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%"
@ -532,9 +516,10 @@ if __name__ == "__main__":
n = concat_networks(years) n = concat_networks(years)
# temporal aggregate # temporal aggregate
opts = snakemake.wildcards.sector_opts.split("-")
solver_name = snakemake.config["solving"]["solver"]["name"] solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation_SEG(n, opts, solver_name) segments = snakemake.params.time_resolution
if isinstance(segments, (int, float)):
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
# adjust global constraints lv limit if the same for all years # adjust global constraints lv limit if the same for all years
n = adjust_lvlimit(n) n = adjust_lvlimit(n)
@ -550,8 +535,7 @@ if __name__ == "__main__":
add_H2_boilers(n) add_H2_boilers(n)
# set carbon constraints # set carbon constraints
opts = snakemake.wildcards.sector_opts.split("-") n = set_carbon_constraints(n)
n = set_carbon_constraints(n, opts)
# export network # export network
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -9,7 +9,6 @@ technologies for the buildings, transport and industry sectors.
import logging import logging
import os import os
import re
from itertools import product from itertools import product
from types import SimpleNamespace from types import SimpleNamespace
@ -18,11 +17,16 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import xarray as xr import xarray as xr
from _helpers import update_config_with_sector_opts from _helpers import (
configure_logging,
set_scenario_config,
update_config_from_wildcards,
)
from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locations from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locations
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
from networkx.algorithms import complement from networkx.algorithms import complement
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from prepare_network import maybe_adjust_costs_and_potentials
from pypsa.geo import haversine_pts from pypsa.geo import haversine_pts
from pypsa.io import import_components_from_dataframe from pypsa.io import import_components_from_dataframe
from scipy.stats import beta from scipy.stats import beta
@ -190,13 +194,13 @@ def define_spatial(nodes, options):
spatial = SimpleNamespace() spatial = SimpleNamespace()
def emission_sectors_from_opts(opts): def determine_emission_sectors(options):
sectors = ["electricity"] sectors = ["electricity"]
if "T" in opts: if options["transport"]:
sectors += ["rail non-elec", "road non-elec"] sectors += ["rail non-elec", "road non-elec"]
if "H" in opts: if options["heating"]:
sectors += ["residential non-elec", "services non-elec"] sectors += ["residential non-elec", "services non-elec"]
if "I" in opts: if options["industry"]:
sectors += [ sectors += [
"industrial non-elec", "industrial non-elec",
"industrial processes", "industrial processes",
@ -205,7 +209,7 @@ def emission_sectors_from_opts(opts):
"domestic navigation", "domestic navigation",
"international navigation", "international navigation",
] ]
if "A" in opts: if options["agriculture"]:
sectors += ["agriculture"] sectors += ["agriculture"]
return sectors return sectors
@ -215,11 +219,36 @@ def get(item, investment_year=None):
""" """
Check whether item depends on investment year. Check whether item depends on investment year.
""" """
return item[investment_year] if isinstance(item, dict) else item if not isinstance(item, dict):
return item
elif investment_year in item.keys():
return item[investment_year]
else:
logger.warning(
f"Investment key {investment_year} not found in dictionary {item}."
)
keys = sorted(item.keys())
if investment_year < keys[0]:
logger.warning(f"Lower than minimum key. Taking minimum key {keys[0]}")
return item[keys[0]]
elif investment_year > keys[-1]:
logger.warning(f"Higher than maximum key. Taking maximum key {keys[0]}")
return item[keys[-1]]
else:
logger.warning(
"Interpolate linearly between the next lower and next higher year."
)
lower_key = max(k for k in keys if k < investment_year)
higher_key = min(k for k in keys if k > investment_year)
lower = item[lower_key]
higher = item[higher_key]
return lower + (higher - lower) * (investment_year - lower_key) / (
higher_key - lower_key
)
def co2_emissions_year( def co2_emissions_year(
countries, input_eurostat, opts, emissions_scope, report_year, input_co2, year countries, input_eurostat, options, emissions_scope, report_year, input_co2, year
): ):
""" """
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018). Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
@ -237,7 +266,7 @@ def co2_emissions_year(
co2_totals = build_co2_totals(countries, eea_co2, eurostat_co2) co2_totals = build_co2_totals(countries, eea_co2, eurostat_co2)
sectors = emission_sectors_from_opts(opts) sectors = determine_emission_sectors(options)
co2_emissions = co2_totals.loc[countries, sectors].sum().sum() co2_emissions = co2_totals.loc[countries, sectors].sum().sum()
@ -248,11 +277,12 @@ def co2_emissions_year(
# TODO: move to own rule with sector-opts wildcard? # TODO: move to own rule with sector-opts wildcard?
def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): def build_carbon_budget(
o, input_eurostat, fn, emissions_scope, report_year, input_co2, options
):
""" """
Distribute carbon budget following beta or exponential transition path. Distribute carbon budget following beta or exponential transition path.
""" """
# opts?
if "be" in o: if "be" in o:
# beta decay # beta decay
@ -268,7 +298,7 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
e_1990 = co2_emissions_year( e_1990 = co2_emissions_year(
countries, countries,
input_eurostat, input_eurostat,
opts, options,
emissions_scope, emissions_scope,
report_year, report_year,
input_co2, input_co2,
@ -279,7 +309,7 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
e_0 = co2_emissions_year( e_0 = co2_emissions_year(
countries, countries,
input_eurostat, input_eurostat,
opts, options,
emissions_scope, emissions_scope,
report_year, report_year,
input_co2, input_co2,
@ -758,12 +788,12 @@ def add_dac(n, costs):
) )
def add_co2limit(n, nyears=1.0, limit=0.0): def add_co2limit(n, options, nyears=1.0, limit=0.0):
logger.info(f"Adding CO2 budget limit as per unit of 1990 levels of {limit}") logger.info(f"Adding CO2 budget limit as per unit of 1990 levels of {limit}")
countries = snakemake.params.countries countries = snakemake.params.countries
sectors = emission_sectors_from_opts(opts) sectors = determine_emission_sectors(options)
# convert Mt to tCO2 # convert Mt to tCO2
co2_totals = 1e6 * pd.read_csv(snakemake.input.co2_totals_name, index_col=0) co2_totals = 1e6 * pd.read_csv(snakemake.input.co2_totals_name, index_col=0)
@ -2002,13 +2032,6 @@ def add_heat(n, costs):
if options["retrofitting"]["retro_endogen"]: if options["retrofitting"]["retro_endogen"]:
logger.info("Add retrofitting endogenously") logger.info("Add retrofitting endogenously")
# resample heat demand temporal 'heat_demand_r' depending on in config
# specified temporal resolution, to not overestimate retrofitting
hours = list(filter(re.compile(r"^\d+h$", re.IGNORECASE).search, opts))
if len(hours) == 0:
hours = [n.snapshots[1] - n.snapshots[0]]
heat_demand_r = heat_demand.resample(hours[0]).mean()
# retrofitting data 'retro_data' with 'costs' [EUR/m^2] and heat # retrofitting data 'retro_data' with 'costs' [EUR/m^2] and heat
# demand 'dE' [per unit of original heat demand] for each country and # demand 'dE' [per unit of original heat demand] for each country and
# different retrofitting strengths [additional insulation thickness in m] # different retrofitting strengths [additional insulation thickness in m]
@ -2026,12 +2049,12 @@ def add_heat(n, costs):
# share of space heat demand 'w_space' of total heat demand # share of space heat demand 'w_space' of total heat demand
w_space = {} w_space = {}
for sector in sectors: for sector in sectors:
w_space[sector] = heat_demand_r[sector + " space"] / ( w_space[sector] = heat_demand[sector + " space"] / (
heat_demand_r[sector + " space"] + heat_demand_r[sector + " water"] heat_demand[sector + " space"] + heat_demand[sector + " water"]
) )
w_space["tot"] = ( w_space["tot"] = (
heat_demand_r["services space"] + heat_demand_r["residential space"] heat_demand["services space"] + heat_demand["residential space"]
) / heat_demand_r.T.groupby(level=[1]).sum().T ) / heat_demand.T.groupby(level=[1]).sum().T
for name in n.loads[ for name in n.loads[
n.loads.carrier.isin([x + " heat" for x in heat_systems]) n.loads.carrier.isin([x + " heat" for x in heat_systems])
@ -2061,7 +2084,7 @@ def add_heat(n, costs):
pop_layout.loc[node].fraction * floor_area.loc[ct, "value"] * 10**6 pop_layout.loc[node].fraction * floor_area.loc[ct, "value"] * 10**6
).loc[sec] * f ).loc[sec] * f
# total heat demand at node [MWh] # total heat demand at node [MWh]
demand = n.loads_t.p_set[name].resample(hours[0]).mean() demand = n.loads_t.p_set[name]
# space heat demand at node [MWh] # space heat demand at node [MWh]
space_heat_demand = demand * w_space[sec][node] space_heat_demand = demand * w_space[sec][node]
@ -3061,6 +3084,8 @@ def add_industry(n, costs):
+ mwh_coal_per_mwh_coke * industrial_demand["coke"] + mwh_coal_per_mwh_coke * industrial_demand["coke"]
) / nhours ) / nhours
p_set.rename(lambda x: x + " coal for industry", inplace=True)
if not options["regional_coal_demand"]: if not options["regional_coal_demand"]:
p_set = p_set.sum() p_set = p_set.sum()
@ -3294,52 +3319,6 @@ def remove_h2_network(n):
n.stores.drop("EU H2 Store", inplace=True) n.stores.drop("EU H2 Store", inplace=True)
def maybe_adjust_costs_and_potentials(n, opts):
for o in opts:
flags = ["+e", "+p", "+m", "+c"]
if all(flag not in o for flag in flags):
continue
oo = o.split("+")
carrier_list = np.hstack(
(
n.generators.carrier.unique(),
n.links.carrier.unique(),
n.stores.carrier.unique(),
n.storage_units.carrier.unique(),
)
)
suptechs = map(lambda c: c.split("-", 2)[0], carrier_list)
if oo[0].startswith(tuple(suptechs)):
carrier = oo[0]
attr_lookup = {
"p": "p_nom_max",
"e": "e_nom_max",
"c": "capital_cost",
"m": "marginal_cost",
}
attr = attr_lookup[oo[1][0]]
factor = float(oo[1][1:])
# beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan
if carrier == "AC": # lines do not have carrier
n.lines[attr] *= factor
else:
if attr == "p_nom_max":
comps = {"Generator", "Link", "StorageUnit"}
elif attr == "e_nom_max":
comps = {"Store"}
else:
comps = {"Generator", "Link", "StorageUnit", "Store"}
for c in n.iterate_components(comps):
if carrier == "solar":
sel = c.df.carrier.str.contains(
carrier
) & ~c.df.carrier.str.contains("solar rooftop")
else:
sel = c.df.carrier.str.contains(carrier)
c.df.loc[sel, attr] *= factor
logger.info(f"changing {attr} for {carrier} by factor {factor}")
def limit_individual_line_extension(n, maxext): def limit_individual_line_extension(n, maxext):
logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW") logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW")
n.lines["s_nom_max"] = n.lines["s_nom"] + maxext n.lines["s_nom_max"] = n.lines["s_nom"] + maxext
@ -3509,31 +3488,31 @@ def apply_time_segmentation(
return n return n
def set_temporal_aggregation(n, opts, solver_name): def set_temporal_aggregation(n, resolution, solver_name):
""" """
Aggregate network temporally. Aggregate network temporally.
""" """
for o in opts: if not resolution:
# temporal averaging return n
m = re.match(r"^\d+h$", o, re.IGNORECASE)
if m is not None:
n = average_every_nhours(n, m.group(0))
break
# representative snapshots # representative snapshots
m = re.match(r"(^\d+)sn$", o, re.IGNORECASE) if "sn" in resolution.lower():
if m is not None: sn = int(resolution[:-2])
sn = int(m[1]) logger.info("Use every %s snapshot as representative", sn)
logger.info(f"Use every {sn} snapshot as representative")
n.set_snapshots(n.snapshots[::sn]) n.set_snapshots(n.snapshots[::sn])
n.snapshot_weightings *= sn n.snapshot_weightings *= sn
break
# segments with package tsam # segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) elif "seg" in resolution.lower():
if m is not None: segments = int(resolution[:-3])
segments = int(m[1]) logger.info("Use temporal segmentation with %s segments", segments)
logger.info(f"Use temporal segmentation with {segments} segments")
n = apply_time_segmentation(n, segments, solver_name=solver_name) n = apply_time_segmentation(n, segments, solver_name=solver_name)
break
# temporal averaging
elif "h" in resolution.lower():
logger.info("Aggregate to frequency %s", resolution)
n = average_every_nhours(n, resolution)
return n return n
@ -3602,14 +3581,12 @@ if __name__ == "__main__":
planning_horizons="2030", planning_horizons="2030",
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) configure_logging(snakemake)
set_scenario_config(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_from_wildcards(snakemake.config, snakemake.wildcards)
options = snakemake.params.sector options = snakemake.params.sector
opts = snakemake.wildcards.sector_opts.split("-")
investment_year = int(snakemake.wildcards.planning_horizons[-4:]) investment_year = int(snakemake.wildcards.planning_horizons[-4:])
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
@ -3647,56 +3624,34 @@ if __name__ == "__main__":
add_storage_and_grids(n, costs) add_storage_and_grids(n, costs)
# TODO merge with opts cost adjustment below if options["transport"]:
for o in opts:
if o[:4] == "dist":
options["electricity_distribution_grid"] = True
options["electricity_distribution_grid_cost_factor"] = float(
o[4:].replace("p", ".").replace("m", "-")
)
if o == "biomasstransport":
options["biomass_transport"] = True
if "nodistrict" in opts:
options["district_heating"]["progress"] = 0.0
if "nowasteheat" in opts:
logger.info("Disabling waste heat.")
options["use_fischer_tropsch_waste_heat"] = False
options["use_methanolisation_waste_heat"] = False
options["use_haber_bosch_waste_heat"] = False
options["use_methanation_waste_heat"] = False
options["use_fuel_cell_waste_heat"] = False
options["use_electrolysis_waste_heat"] = False
if "T" in opts:
add_land_transport(n, costs) add_land_transport(n, costs)
if "H" in opts: if options["heating"]:
add_heat(n, costs) add_heat(n, costs)
if "B" in opts: if options["biomass"]:
add_biomass(n, costs) add_biomass(n, costs)
if options["ammonia"]: if options["ammonia"]:
add_ammonia(n, costs) add_ammonia(n, costs)
if "I" in opts: if options["industry"]:
add_industry(n, costs) add_industry(n, costs)
if "H" in opts: if options["heating"]:
add_waste_heat(n) add_waste_heat(n)
if "A" in opts: # requires H and I if options["agriculture"]: # requires H and I
add_agriculture(n, costs) add_agriculture(n, costs)
if options["dac"]: if options["dac"]:
add_dac(n, costs) add_dac(n, costs)
if "decentral" in opts: if not options["electricity_transmission_grid"]:
decentral(n) decentral(n)
if "noH2network" in opts: if not options["H2_network"]:
remove_h2_network(n) remove_h2_network(n)
if options["co2network"]: if options["co2network"]:
@ -3706,51 +3661,39 @@ if __name__ == "__main__":
add_allam(n, costs) add_allam(n, costs)
solver_name = snakemake.config["solving"]["solver"]["name"] solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation(n, opts, solver_name) resolution = snakemake.params.time_resolution
n = set_temporal_aggregation(n, resolution, solver_name)
limit_type = "config" co2_budget = snakemake.params.co2_budget
limit = get(snakemake.params.co2_budget, investment_year) if isinstance(co2_budget, str) and co2_budget.startswith("cb"):
for o in opts:
if "cb" not in o:
continue
limit_type = "carbon budget"
fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv" fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv"
if not os.path.exists(fn): if not os.path.exists(fn):
emissions_scope = snakemake.params.emissions_scope emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year report_year = snakemake.params.eurostat_report_year
input_co2 = snakemake.input.co2 input_co2 = snakemake.input.co2
build_carbon_budget( build_carbon_budget(
o, co2_budget,
snakemake.input.eurostat, snakemake.input.eurostat,
fn, fn,
emissions_scope, emissions_scope,
report_year, report_year,
input_co2, input_co2,
options,
) )
co2_cap = pd.read_csv(fn, index_col=0).squeeze() co2_cap = pd.read_csv(fn, index_col=0).squeeze()
limit = co2_cap.loc[investment_year] limit = co2_cap.loc[investment_year]
break else:
for o in opts: limit = get(co2_budget, investment_year)
if "Co2L" not in o: add_co2limit(n, options, nyears, limit)
continue
limit_type = "wildcard"
limit = o[o.find("Co2L") + 4 :]
limit = float(limit.replace("p", ".").replace("m", "-"))
break
logger.info(f"Add CO2 limit from {limit_type}")
add_co2limit(n, nyears, limit)
for o in opts: maxext = snakemake.params["lines"]["max_extension"]
if not o[:10] == "linemaxext": if maxext is not None:
continue
maxext = float(o[10:]) * 1e3
limit_individual_line_extension(n, maxext) limit_individual_line_extension(n, maxext)
break
if options["electricity_distribution_grid"]: if options["electricity_distribution_grid"]:
insert_electricity_distribution_grid(n, costs) insert_electricity_distribution_grid(n, costs)
maybe_adjust_costs_and_potentials(n, opts) maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"])
if options["gas_distribution_grid"]: if options["gas_distribution_grid"]:
insert_gas_distribution_costs(n, costs) insert_gas_distribution_costs(n, costs)

View File

@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Retrieve cost data from ``technology-data``.
"""
import logging
from pathlib import Path
from _helpers import configure_logging, progress_retrieve, set_scenario_config
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("retrieve_cost_data", year=2030)
rootpath = ".."
else:
rootpath = "."
configure_logging(snakemake)
set_scenario_config(snakemake)
version = snakemake.params.version
baseurl = (
f"https://raw.githubusercontent.com/PyPSA/technology-data/{version}/outputs/"
)
filepath = Path(snakemake.output[0])
url = baseurl + filepath.name
print(url)
to_fn = Path(rootpath) / filepath
print(to_fn)
logger.info(f"Downloading technology data from '{url}'.")
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
progress_retrieve(url, to_fn, disable=disable_progress)
logger.info(f"Technology data available at at {to_fn}")

Some files were not shown because too many files have changed in this diff Show More