Merge remote-tracking branch 'origin/master' into multiyear

This commit is contained in:
Fabian Neumann 2023-07-26 10:23:32 +02:00
commit ddd1840981
93 changed files with 1712 additions and 1285 deletions

View File

@ -3,3 +3,6 @@ contact_links:
- name: PyPSA Mailing List - name: PyPSA Mailing List
url: https://groups.google.com/forum/#!forum/pypsa url: https://groups.google.com/forum/#!forum/pypsa
about: Please ask and answer general usage questions here. about: Please ask and answer general usage questions here.
- name: Stackoverflow
url: https://stackoverflow.com/questions/tagged/pypsa
about: Please ask and answer code-related questions here.

View File

@ -19,7 +19,6 @@ on:
- cron: "0 5 * * TUE" - cron: "0 5 * * TUE"
env: env:
CONDA_CACHE_NUMBER: 1 # Change this value to manually reset the environment cache
DATA_CACHE_NUMBER: 2 DATA_CACHE_NUMBER: 2
jobs: jobs:
@ -27,22 +26,12 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
max-parallel: 3
matrix: matrix:
include: os:
# Matrix required to handle caching with Mambaforge - ubuntu-latest
- os: ubuntu-latest - macos-latest
label: ubuntu-latest - windows-latest
prefix: /usr/share/miniconda3/envs/pypsa-eur
- os: macos-latest
label: macos-latest
prefix: /Users/runner/miniconda3/envs/pypsa-eur
- os: windows-latest
label: windows-latest
prefix: C:\Miniconda3\envs\pypsa-eur
name: ${{ matrix.label }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
@ -60,24 +49,25 @@ jobs:
- name: Add solver to environment - name: Add solver to environment
run: | run: |
echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml
if: ${{ matrix.label }} == 'windows-latest' if: ${{ matrix.os }} == 'windows-latest'
- name: Add solver to environment - name: Add solver to environment
run: | run: |
echo -e "- glpk\n- ipopt" >> envs/environment.yaml echo -e "- glpk\n- ipopt" >> envs/environment.yaml
if: ${{ matrix.label }} != 'windows-latest' if: ${{ matrix.os }} != 'windows-latest'
- name: Setup Mambaforge - name: Setup micromamba
uses: conda-incubator/setup-miniconda@v2 uses: mamba-org/setup-micromamba@v1
with: with:
miniforge-variant: Mambaforge micromamba-version: latest
miniforge-version: latest environment-file: envs/environment.yaml
activate-environment: pypsa-eur log-level: debug
use-mamba: true init-shell: bash
cache-environment: true
cache-downloads: true
- name: Set cache dates - name: Set cache dates
run: | run: |
echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV
echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV
- name: Cache data and cutouts folders - name: Cache data and cutouts folders
@ -88,21 +78,8 @@ jobs:
cutouts cutouts
key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }} key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }}
- name: Create environment cache
uses: actions/cache@v3
id: cache
with:
path: ${{ matrix.prefix }}
key: ${{ matrix.label }}-conda-${{ env.DATE }}-${{ env.CONDA_CACHE_NUMBER }}
- name: Update environment due to outdated or unavailable cache
run: mamba env update -n pypsa-eur -f envs/environment.yaml
if: steps.cache.outputs.cache-hit != 'true'
- name: Test snakemake workflow - name: Test snakemake workflow
run: | run: |
conda activate pypsa-eur
conda list
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime

View File

@ -30,7 +30,7 @@ repos:
# Find common spelling mistakes in comments and docstrings # Find common spelling mistakes in comments and docstrings
- repo: https://github.com/codespell-project/codespell - repo: https://github.com/codespell-project/codespell
rev: v2.2.4 rev: v2.2.5
hooks: hooks:
- id: codespell - id: codespell
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore'] # Ignore capital case words, e.g. country codes args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore'] # Ignore capital case words, e.g. country codes
@ -39,7 +39,7 @@ repos:
# Make docstrings PEP 257 compliant # Make docstrings PEP 257 compliant
- repo: https://github.com/PyCQA/docformatter - repo: https://github.com/PyCQA/docformatter
rev: v1.6.3 rev: v1.7.5
hooks: hooks:
- id: docformatter - id: docformatter
args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"] args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"]
@ -51,7 +51,7 @@ repos:
# Formatting with "black" coding style # Formatting with "black" coding style
- repo: https://github.com/psf/black - repo: https://github.com/psf/black
rev: 23.3.0 rev: 23.7.0
hooks: hooks:
# Format Python files # Format Python files
- id: black - id: black
@ -67,7 +67,7 @@ repos:
# Do YAML formatting (before the linter checks it for misses) # Do YAML formatting (before the linter checks it for misses)
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
rev: v2.8.0 rev: v2.10.0
hooks: hooks:
- id: pretty-format-yaml - id: pretty-format-yaml
args: [--autofix, --indent, "2", --preserve-quotes] args: [--autofix, --indent, "2", --preserve-quotes]
@ -87,6 +87,6 @@ repos:
# Check for FSFE REUSE compliance (licensing) # Check for FSFE REUSE compliance (licensing)
- repo: https://github.com/fsfe/reuse-tool - repo: https://github.com/fsfe/reuse-tool
rev: v1.1.2 rev: v2.1.0
hooks: hooks:
- id: reuse - id: reuse

View File

@ -4,8 +4,14 @@
version: 2 version: 2
build:
os: ubuntu-22.04
tools:
python: "3.11"
apt_packages:
- graphviz
python: python:
version: 3.8
install: install:
- requirements: doc/requirements.txt - requirements: doc/requirements.txt
system_packages: true system_packages: false

View File

@ -11,6 +11,7 @@ SPDX-License-Identifier: CC-BY-4.0
[![Zenodo PyPSA-Eur-Sec](https://zenodo.org/badge/DOI/10.5281/zenodo.3938042.svg)](https://doi.org/10.5281/zenodo.3938042) [![Zenodo PyPSA-Eur-Sec](https://zenodo.org/badge/DOI/10.5281/zenodo.3938042.svg)](https://doi.org/10.5281/zenodo.3938042)
[![Snakemake](https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io) [![Snakemake](https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)
[![REUSE status](https://api.reuse.software/badge/github.com/pypsa/pypsa-eur)](https://api.reuse.software/info/github.com/pypsa/pypsa-eur) [![REUSE status](https://api.reuse.software/badge/github.com/pypsa/pypsa-eur)](https://api.reuse.software/info/github.com/pypsa/pypsa-eur)
[![Stack Exchange questions](https://img.shields.io/stackexchange/stackoverflow/t/pypsa)](https://stackoverflow.com/questions/tagged/pypsa)
# PyPSA-Eur: A Sector-Coupled Open Optimisation Model of the European Energy System # PyPSA-Eur: A Sector-Coupled Open Optimisation Model of the European Energy System
@ -90,6 +91,14 @@ to 50-200 nodes.
Already-built versions of the model can be found in the accompanying [Zenodo Already-built versions of the model can be found in the accompanying [Zenodo
repository](https://doi.org/10.5281/zenodo.3601881). repository](https://doi.org/10.5281/zenodo.3601881).
# Contributing and Support
We strongly welcome anyone interested in contributing to this project. If you have any ideas, suggestions or encounter problems, feel invited to file issues or make pull requests on GitHub.
- In case of code-related **questions**, please post on [stack overflow](https://stackoverflow.com/questions/tagged/pypsa).
- For non-programming related and more general questions please refer to the [mailing list](https://groups.google.com/group/pypsa).
- To **discuss** with other PyPSA users, organise projects, share news, and get in touch with the community you can use the [discord server](https://discord.com/invite/AnuJBk23FU).
- For **bugs and feature requests**, please use the [PyPSA-Eur Github Issues page](https://github.com/PyPSA/pypsa-eur/issues).
# Licence # Licence
The code in PyPSA-Eur is released as free software under the The code in PyPSA-Eur is released as free software under the

View File

@ -2,6 +2,7 @@
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#top-level-configuration
version: 0.8.0 version: 0.8.0
tutorial: false tutorial: false
@ -9,65 +10,52 @@ logging:
level: INFO level: INFO
format: '%(levelname)s:%(name)s:%(message)s' format: '%(levelname)s:%(name)s:%(message)s'
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
run: run:
name: "" # use this to keep track of runs with different settings name: ""
disable_progressbar: false # set to true to disable the progressbar disable_progressbar: false
shared_resources: false # set to true to share the default resources across runs shared_resources: false
shared_cutouts: true # set to true to share the default cutout(s) across runs shared_cutouts: true
foresight: overnight # options are overnight, myopic, perfect (perfect is not yet implemented) # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#foresight
# if you use myopic or perfect foresight, set the investment years in "planning_horizons" below foresight: overnight
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario
# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html
scenario: scenario:
weather_year: [''] # for backwards compatibility weather_year: [''] # for backwards compatibility
simpl: simpl:
- '' - ''
ll: # allowed transmission line volume expansion, can be any float >= 1.0 with a prefix v|c (today) or "copt" ll:
- v1.0
- v1.5 - v1.5
clusters: # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred clusters:
- 37 - 37
- 128 - 128
- 256 - 256
- 512 - 512
- 1024 - 1024
opts: # only relevant for PyPSA-Eur opts:
- '' - ''
sector_opts: # this is where the main scenario settings are sector_opts:
- Co2L0-3H-T-H-B-I-A-solar+p3-dist1 - Co2L0-3H-T-H-B-I-A-solar+p3-dist1
# to really understand the options here, look in scripts/prepare_sector_network.py planning_horizons:
# Co2Lx specifies the CO2 target in x% of the 1990 values; default will give default (5%);
# Co2L0p25 will give 25% CO2 emissions; Co2Lm0p05 will give 5% negative emissions
# xH is the temporal resolution; 3H is 3-hourly, i.e. one snapshot every 3 hours
# single letters are sectors: T for land transport, H for building heating,
# B for biomass supply, I for industry, shipping and aviation,
# A for agriculture, forestry and fishing
# solar+c0.5 reduces the capital cost of solar to 50\% of reference value
# solar+p3 multiplies the available installable potential by factor 3
# seq400 sets the potential of CO2 sequestration to 400 Mt CO2 per year
# dist{n} includes distribution grids with investment cost of n times cost in data/costs.csv
# for myopic/perfect foresight cb states the carbon budget in GtCO2 (cumulative
# emissions throughout the transition path in the timeframe determined by the
# planning_horizons), be:beta decay; ex:exponential decay
# cb40ex0 distributes a carbon budget of 40 GtCO2 following an exponential
# decay with initial growth rate 0
planning_horizons: # investment years for myopic and perfect; for overnight, year of cost assumptions can be different and is defined under 'costs'
- 2050
# for example, set to
# - 2020 # - 2020
# - 2030 # - 2030
# - 2040 # - 2040
# - 2050 - 2050
# for myopic foresight
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#countries
countries: ['AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK'] countries: ['AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK']
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#snapshots
snapshots: snapshots:
start: "2013-01-01" start: "2013-01-01"
end: "2014-01-01" end: "2014-01-01"
inclusive: 'left' # include start, not end inclusive: 'left'
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#enable
enable: enable:
retrieve: auto
prepare_links_p_nom: false prepare_links_p_nom: false
retrieve_databundle: true retrieve_databundle: true
retrieve_sector_databundle: true retrieve_sector_databundle: true
@ -81,9 +69,7 @@ enable:
custom_busmap: false custom_busmap: false
drop_leap_days: true drop_leap_days: true
# CO2 budget as a fraction of 1990 emissions # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget
# this is over-ridden if CO2Lx is set in sector_opts
# this is also over-ridden if cb is set in sector_opts
co2_budget: co2_budget:
2020: 0.701 2020: 0.701
2025: 0.524 2025: 0.524
@ -93,18 +79,19 @@ co2_budget:
2045: 0.032 2045: 0.032
2050: 0.000 2050: 0.000
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#electricity
electricity: electricity:
voltages: [220., 300., 380.] voltages: [220., 300., 380.]
gaslimit: false # global gas usage limit of X MWh_th gaslimit: false
co2limit: 7.75e+7 # 0.05 * 3.1e9*0.5 co2limit: 7.75e+7
co2base: 1.487e+9 co2base: 1.487e+9
agg_p_nom_limits: data/agg_p_nom_minmax.csv agg_p_nom_limits: data/agg_p_nom_minmax.csv
operational_reserve: # like https://genxproject.github.io/GenX/dev/core/#Reserves operational_reserve:
activate: false activate: false
epsilon_load: 0.02 # share of total load epsilon_load: 0.02
epsilon_vres: 0.02 # share of total renewable supply epsilon_vres: 0.02
contingency: 4000 # fixed capacity in MW contingency: 4000
max_hours: max_hours:
battery: 6 battery: 6
@ -116,9 +103,7 @@ electricity:
Store: [battery, H2] Store: [battery, H2]
Link: [] # H2 pipeline Link: [] # H2 pipeline
# use pandas query strings here, e.g. Country not in ['Germany']
powerplants_filter: (DateOut >= 2022 or DateOut != DateOut) powerplants_filter: (DateOut >= 2022 or DateOut != DateOut)
# use pandas query strings here, e.g. Country in ['Germany']
custom_powerplants: false custom_powerplants: false
conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
@ -126,25 +111,19 @@ electricity:
estimate_renewable_capacities: estimate_renewable_capacities:
enable: true enable: true
# Add capacities from OPSD data
from_opsd: true from_opsd: true
# Renewable capacities are based on existing capacities reported by IRENA
year: 2020 year: 2020
# Artificially limit maximum capacities to factor * (IRENA capacities),
# i.e. 110% of <years>'s capacities => expansion_limit: 1.1
# false: Use estimated renewable potentials determine by the workflow
expansion_limit: false expansion_limit: false
technology_mapping: technology_mapping:
# Wind is the Fueltype in powerplantmatching, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur
Offshore: [offwind-ac, offwind-dc] Offshore: [offwind-ac, offwind-dc]
Onshore: [onwind] Onshore: [onwind]
PV: [solar] PV: [solar]
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#atlite
atlite: atlite:
default_cutout: europe-2013-era5 default_cutout: europe-2013-era5
nprocesses: 4 nprocesses: 4
show_progress: false # false saves time show_progress: false
cutouts: cutouts:
# use 'base' to determine geographical bounds and time span from config # use 'base' to determine geographical bounds and time span from config
# base: # base:
@ -167,20 +146,16 @@ atlite:
sarah_dir: sarah_dir:
features: [influx, temperature] features: [influx, temperature]
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#renewable
renewable: renewable:
onwind: onwind:
cutout: europe-2013-era5 cutout: europe-2013-era5
resource: resource:
method: wind method: wind
turbine: Vestas_V112_3MW turbine: Vestas_V112_3MW
capacity_per_sqkm: 3 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 30% fraction of the already restricted capacity_per_sqkm: 3
# area is available for installation of wind generators due to competing land use and likely public
# acceptance issues.
# correction_factor: 0.93 # correction_factor: 0.93
corine: corine:
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs
# development of the REMix model and application for Europe. ( p.42 / p.28)
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000 distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6] distance_grid_codes: [1, 2, 3, 4, 5, 6]
@ -193,13 +168,8 @@ renewable:
resource: resource:
method: wind method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore turbine: NREL_ReferenceTurbine_5MW_offshore
capacity_per_sqkm: 2 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 20% fraction of the already restricted capacity_per_sqkm: 2
# area is available for installation of wind generators due to competing land use and likely public
# acceptance issues.
correction_factor: 0.8855 correction_factor: 0.8855
# proxy for wake losses
# from 10.1016/j.energy.2018.08.153
# until done more rigorously in #153
corine: [44, 255] corine: [44, 255]
natura: true natura: true
ship_threshold: 400 ship_threshold: 400
@ -213,13 +183,8 @@ renewable:
resource: resource:
method: wind method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore turbine: NREL_ReferenceTurbine_5MW_offshore
capacity_per_sqkm: 2 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 20% fraction of the already restricted capacity_per_sqkm: 2
# area is available for installation of wind generators due to competing land use and likely public
# acceptance issues.
correction_factor: 0.8855 correction_factor: 0.8855
# proxy for wake losses
# from 10.1016/j.energy.2018.08.153
# until done more rigorously in #153
corine: [44, 255] corine: [44, 255]
natura: true natura: true
ship_threshold: 400 ship_threshold: 400
@ -236,14 +201,7 @@ renewable:
orientation: orientation:
slope: 35. slope: 35.
azimuth: 180. azimuth: 180.
capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2 and assuming 1% of the area can be used for solar PV panels capacity_per_sqkm: 1.7
# Correction factor determined by comparing uncorrected area-weighted full-load hours to those
# published in Supplementary Data to
# Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power
# sector -- The economic potential of photovoltaics and concentrating solar
# power." Applied Energy 135 (2014): 704-720.
# This correction factor of 0.854337 may be in order if using reanalysis data.
# for discussion refer to https://github.com/PyPSA/pypsa-eur/pull/304
# correction_factor: 0.854337 # correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true natura: true
@ -260,10 +218,12 @@ renewable:
eia_correct_by_capacity: false eia_correct_by_capacity: false
eia_approximate_missing: false eia_approximate_missing: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#conventional
conventional: conventional:
nuclear: nuclear:
p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#lines
lines: lines:
types: types:
220.: "Al/St 240/40 2-bundle 220.0" 220.: "Al/St 240/40 2-bundle 220.0"
@ -271,28 +231,35 @@ lines:
380.: "Al/St 240/40 4-bundle 380.0" 380.: "Al/St 240/40 4-bundle 380.0"
s_max_pu: 0.7 s_max_pu: 0.7
s_nom_max: .inf s_nom_max: .inf
max_extension: .inf
length_factor: 1.25 length_factor: 1.25
under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#links
links: links:
p_max_pu: 1.0 p_max_pu: 1.0
p_nom_max: .inf p_nom_max: .inf
max_extension: .inf
include_tyndp: true include_tyndp: true
under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#transformers
transformers: transformers:
x: 0.1 x: 0.1
s_nom: 2000. s_nom: 2000.
type: '' type: ''
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#load
load: load:
power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data power_statistics: true
interpolate_limit: 3 # data gaps up until this size are interpolated linearly interpolate_limit: 3
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from time_shift_for_large_gaps: 1w
manual_adjustments: true # false manual_adjustments: true # false
scaling_factor: 1.0 scaling_factor: 1.0
fixed_year: false # false or year (e.g. 2013) fixed_year: false # false or year (e.g. 2013)
# docs
# TODO: PyPSA-Eur merge issue in prepare_sector_network.py
# regulate what components with which carriers are kept from PyPSA-Eur; # regulate what components with which carriers are kept from PyPSA-Eur;
# some technologies are removed because they are implemented differently # some technologies are removed because they are implemented differently
# (e.g. battery or H2 storage) or have different year-dependent costs # (e.g. battery or H2 storage) or have different year-dependent costs
@ -313,12 +280,14 @@ pypsa_eur:
- hydro - hydro
Store: [] Store: []
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#energy
energy: energy:
energy_totals_year: 2011 energy_totals_year: 2011
base_emissions_year: 1990 base_emissions_year: 1990
eurostat_report_year: 2016 eurostat_report_year: 2016
emissions: CO2 # "CO2" or "All greenhouse gases - (CO2 equivalent)" emissions: CO2
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#biomass
biomass: biomass:
year: 2030 year: 2030
scenario: ENS_Med scenario: ENS_Med
@ -344,14 +313,14 @@ biomass:
- Manure solid, liquid - Manure solid, liquid
- Sludge - Sludge
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solar-thermal
solar_thermal: solar_thermal:
clearsky_model: simple # should be "simple" or "enhanced"? clearsky_model: simple # should be "simple" or "enhanced"?
orientation: orientation:
slope: 45. slope: 45.
azimuth: 180. azimuth: 180.
# only relevant for foresight = myopic or perfect # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#existing-capacities
existing_capacities: existing_capacities:
grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030] grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]
grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020 grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020
@ -362,37 +331,34 @@ existing_capacities:
- oil - oil
- uranium - uranium
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#sector
sector: sector:
district_heating: district_heating:
potential: 0.6 # maximum fraction of urban demand which can be supplied by district heating potential: 0.6
# increase of today's district heating demand to potential maximum district heating share
# progress = 0 means today's district heating share, progress = 1 means maximum fraction of urban demand is supplied by district heating
progress: progress:
2020: 0.0 2020: 0.0
2030: 0.3 2030: 0.3
2040: 0.6 2040: 0.6
2050: 1.0 2050: 1.0
district_heating_loss: 0.15 district_heating_loss: 0.15
cluster_heat_buses: false # cluster residential and service heat buses to one to save memory cluster_heat_buses: false
bev_dsm_restriction_value: 0.75 #Set to 0 for no restriction on BEV DSM bev_dsm_restriction_value: 0.75
bev_dsm_restriction_time: 7 #Time at which SOC of BEV has to be dsm_restriction_value bev_dsm_restriction_time: 7
transport_heating_deadband_upper: 20. transport_heating_deadband_upper: 20.
transport_heating_deadband_lower: 15. transport_heating_deadband_lower: 15.
ICE_lower_degree_factor: 0.375 #in per cent increase in fuel consumption per degree above deadband ICE_lower_degree_factor: 0.375
ICE_upper_degree_factor: 1.6 ICE_upper_degree_factor: 1.6
EV_lower_degree_factor: 0.98 EV_lower_degree_factor: 0.98
EV_upper_degree_factor: 0.63 EV_upper_degree_factor: 0.63
bev_dsm: true #turns on EV battery bev_dsm: true
bev_availability: 0.5 #How many cars do smart charging bev_availability: 0.5
bev_energy: 0.05 #average battery size in MWh bev_energy: 0.05
bev_charge_efficiency: 0.9 #BEV (dis-)charging efficiency bev_charge_efficiency: 0.9
bev_plug_to_wheel_efficiency: 0.2 #kWh/km from EPA https://www.fueleconomy.gov/feg/ for Tesla Model S bev_plug_to_wheel_efficiency: 0.2
bev_charge_rate: 0.011 #3-phase charger with 11 kW bev_charge_rate: 0.011
bev_avail_max: 0.95 bev_avail_max: 0.95
bev_avail_mean: 0.8 bev_avail_mean: 0.8
v2g: true #allows feed-in to grid from EV battery v2g: true
#what is not EV or FCEV is oil-fuelled ICE
land_transport_fuel_cell_share: land_transport_fuel_cell_share:
2020: 0 2020: 0
2030: 0.05 2030: 0.05
@ -412,12 +378,12 @@ sector:
transport_internal_combustion_efficiency: 0.3 transport_internal_combustion_efficiency: 0.3
agriculture_machinery_electric_share: 0 agriculture_machinery_electric_share: 0
agriculture_machinery_oil_share: 1 agriculture_machinery_oil_share: 1
agriculture_machinery_fuel_efficiency: 0.7 # fuel oil per use agriculture_machinery_fuel_efficiency: 0.7
agriculture_machinery_electric_efficiency: 0.3 # electricity per use agriculture_machinery_electric_efficiency: 0.3
MWh_MeOH_per_MWh_H2: 0.8787 # in LHV, source: DECHEMA (2017): Low carbon energy and feedstock for the European chemical industry , pg. 64. MWh_MeOH_per_MWh_H2: 0.8787
MWh_MeOH_per_tCO2: 4.0321 # in LHV, source: DECHEMA (2017): Low carbon energy and feedstock for the European chemical industry , pg. 64. MWh_MeOH_per_tCO2: 4.0321
MWh_MeOH_per_MWh_e: 3.6907 # in LHV, source: DECHEMA (2017): Low carbon energy and feedstock for the European chemical industry , pg. 64. MWh_MeOH_per_MWh_e: 3.6907
shipping_hydrogen_liquefaction: false # whether to consider liquefaction costs for shipping H2 demands shipping_hydrogen_liquefaction: false
shipping_hydrogen_share: shipping_hydrogen_share:
2020: 0 2020: 0
2030: 0 2030: 0
@ -433,18 +399,14 @@ sector:
2030: 0.7 2030: 0.7
2040: 0.3 2040: 0.3
2050: 0 2050: 0
shipping_methanol_efficiency: 0.46 # 10-15% higher https://www.iea-amf.org/app/webroot/files/file/Annex%20Reports/AMF_Annex_56.pdf, https://users.ugent.be/~lsileghe/documents/extended_abstract.pdf shipping_methanol_efficiency: 0.46
shipping_oil_efficiency: 0.40 #For conversion of fuel oil to propulsion in 2011 shipping_oil_efficiency: 0.40
aviation_demand_factor: 1. # relative aviation demand compared to today aviation_demand_factor: 1.
HVC_demand_factor: 1. # relative HVC demand compared to today HVC_demand_factor: 1.
time_dep_hp_cop: true #time dependent heat pump coefficient of performance time_dep_hp_cop: true
heat_pump_sink_T: 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py heat_pump_sink_T: 55.
# conservatively high to cover hot water and space heating in poorly-insulated buildings reduce_space_heat_exogenously: true
reduce_space_heat_exogenously: true # reduces space heat demand by a given factor (applied before losses in DH) reduce_space_heat_exogenously_factor:
# this can represent e.g. building renovation, building demolition, or if
# the factor is negative: increasing floor area, increased thermal comfort, population growth
reduce_space_heat_exogenously_factor: # per unit reduction in space heat demand
# the default factors are determined by the LTS scenario from http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221
2020: 0.10 # this results in a space heat demand reduction of 10% 2020: 0.10 # this results in a space heat demand reduction of 10%
2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita 2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita
2030: 0.09 2030: 0.09
@ -452,15 +414,15 @@ sector:
2040: 0.16 2040: 0.16
2045: 0.21 2045: 0.21
2050: 0.29 2050: 0.29
retrofitting: # co-optimises building renovation to reduce space heat demand retrofitting:
retro_endogen: false # co-optimise space heat savings retro_endogen: false
cost_factor: 1.0 # weight costs for building renovation cost_factor: 1.0
interest_rate: 0.04 # for investment in building components interest_rate: 0.04
annualise_cost: true # annualise the investment costs annualise_cost: true
tax_weighting: false # weight costs depending on taxes in countries tax_weighting: false
construction_index: true # weight costs depending on labour/material costs per country construction_index: true
tes: true tes: true
tes_tau: # 180 day time constant for centralised, 3 day for decentralised tes_tau:
decentral: 3 decentral: 3
central: 180 central: 180
boilers: true boilers: true
@ -481,50 +443,48 @@ sector:
hydrogen_turbine: false hydrogen_turbine: false
SMR: true SMR: true
regional_co2_sequestration_potential: regional_co2_sequestration_potential:
enable: false # enable regionally resolved geological co2 storage potential enable: false
attribute: 'conservative estimate Mt' attribute: 'conservative estimate Mt'
include_onshore: false # include onshore sequestration potentials include_onshore: false
min_size: 3 # Gt, sites with lower potential will be excluded min_size: 3
max_size: 25 # Gt, max sequestration potential for any one site, TODO research suitable value max_size: 25
years_of_storage: 25 # years until potential exhausted at optimised annual rate years_of_storage: 25
co2_sequestration_potential: 200 #MtCO2/a sequestration potential for Europe co2_sequestration_potential: 200
co2_sequestration_cost: 10 #EUR/tCO2 for sequestration of CO2 co2_sequestration_cost: 10
co2_spatial: false co2_spatial: false
co2network: false co2network: false
cc_fraction: 0.9 # default fraction of CO2 captured with post-combustion capture cc_fraction: 0.9
hydrogen_underground_storage: true hydrogen_underground_storage: true
hydrogen_underground_storage_locations: hydrogen_underground_storage_locations:
# - onshore # more than 50 km from sea # - onshore # more than 50 km from sea
- nearshore # within 50 km of sea - nearshore # within 50 km of sea
# - offshore # - offshore
ammonia: false # can be false (no NH3 carrier), true (copperplated NH3), "regional" (regionalised NH3 without network) ammonia: false
min_part_load_fischer_tropsch: 0.9 # p_min_pu min_part_load_fischer_tropsch: 0.9
min_part_load_methanolisation: 0.5 # p_min_pu min_part_load_methanolisation: 0.5
use_fischer_tropsch_waste_heat: true use_fischer_tropsch_waste_heat: true
use_fuel_cell_waste_heat: true use_fuel_cell_waste_heat: true
use_electrolysis_waste_heat: false use_electrolysis_waste_heat: false
electricity_distribution_grid: true electricity_distribution_grid: true
electricity_distribution_grid_cost_factor: 1.0 #multiplies cost in data/costs.csv electricity_distribution_grid_cost_factor: 1.0
electricity_grid_connection: true # only applies to onshore wind and utility PV electricity_grid_connection: true
H2_network: true H2_network: true
gas_network: false gas_network: false
H2_retrofit: false # if set to True existing gas pipes can be retrofitted to H2 pipes H2_retrofit: false
# according to hydrogen backbone strategy (April, 2020) p.15 H2_retrofit_capacity_per_CH4: 0.6
# https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf gas_network_connectivity_upgrade: 1
# 60% of original natural gas capacity could be used in cost-optimal case as H2 capacity
H2_retrofit_capacity_per_CH4: 0.6 # ratio for H2 capacity per original CH4 capacity of retrofitted pipelines
gas_network_connectivity_upgrade: 1 # https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation.html#networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation
gas_distribution_grid: true gas_distribution_grid: true
gas_distribution_grid_cost_factor: 1.0 #multiplies cost in data/costs.csv gas_distribution_grid_cost_factor: 1.0
biomass_spatial: false # regionally resolve biomass (e.g. potentials) biomass_spatial: false
biomass_transport: false # allow transport of solid biomass between nodes biomass_transport: false
conventional_generation: # generator : carrier conventional_generation:
OCGT: gas OCGT: gas
biomass_to_liquid: false biomass_to_liquid: false
biosng: false biosng: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry
industry: industry:
St_primary_fraction: # fraction of steel produced via primary route versus secondary route (scrap+EAF); today fraction is 0.6 St_primary_fraction:
2020: 0.6 2020: 0.6
2025: 0.55 2025: 0.55
2030: 0.5 2030: 0.5
@ -532,7 +492,7 @@ industry:
2040: 0.4 2040: 0.4
2045: 0.35 2045: 0.35
2050: 0.3 2050: 0.3
DRI_fraction: # fraction of the primary route converted to DRI + EAF DRI_fraction:
2020: 0 2020: 0
2025: 0 2025: 0
2030: 0.05 2030: 0.05
@ -540,9 +500,9 @@ industry:
2040: 0.4 2040: 0.4
2045: 0.7 2045: 0.7
2050: 1 2050: 1
H2_DRI: 1.7 #H2 consumption in Direct Reduced Iron (DRI), MWh_H2,LHV/ton_Steel from 51kgH2/tSt in Vogl et al (2018) doi:10.1016/j.jclepro.2018.08.279 H2_DRI: 1.7
elec_DRI: 0.322 #electricity consumption in Direct Reduced Iron (DRI) shaft, MWh/tSt HYBRIT brochure https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf elec_DRI: 0.322
Al_primary_fraction: # fraction of aluminium produced via the primary route versus scrap; today fraction is 0.4 Al_primary_fraction:
2020: 0.4 2020: 0.4
2025: 0.375 2025: 0.375
2030: 0.35 2030: 0.35
@ -550,35 +510,33 @@ industry:
2040: 0.3 2040: 0.3
2045: 0.25 2045: 0.25
2050: 0.2 2050: 0.2
MWh_NH3_per_tNH3: 5.166 # LHV MWh_NH3_per_tNH3: 5.166
MWh_CH4_per_tNH3_SMR: 10.8 # 2012's demand from https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf MWh_CH4_per_tNH3_SMR: 10.8
MWh_elec_per_tNH3_SMR: 0.7 # same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3 MWh_elec_per_tNH3_SMR: 0.7
MWh_H2_per_tNH3_electrolysis: 6.5 # from https://doi.org/10.1016/j.joule.2018.04.017, around 0.197 tH2/tHN3 (>3/17 since some H2 lost and used for energy) MWh_H2_per_tNH3_electrolysis: 6.5
MWh_elec_per_tNH3_electrolysis: 1.17 # from https://doi.org/10.1016/j.joule.2018.04.017 Table 13 (air separation and HB) MWh_elec_per_tNH3_electrolysis: 1.17
MWh_NH3_per_MWh_H2_cracker: 1.46 # https://github.com/euronion/trace/blob/44a5ff8401762edbef80eff9cfe5a47c8d3c8be4/data/efficiencies.csv MWh_NH3_per_MWh_H2_cracker: 1.46 # https://github.com/euronion/trace/blob/44a5ff8401762edbef80eff9cfe5a47c8d3c8be4/data/efficiencies.csv
NH3_process_emissions: 24.5 # in MtCO2/a from SMR for H2 production for NH3 from UNFCCC for 2015 for EU28 NH3_process_emissions: 24.5
petrochemical_process_emissions: 25.5 # in MtCO2/a for petrochemical and other from UNFCCC for 2015 for EU28 petrochemical_process_emissions: 25.5
HVC_primary_fraction: 1. # fraction of today's HVC produced via primary route HVC_primary_fraction: 1.
HVC_mechanical_recycling_fraction: 0. # fraction of today's HVC produced via mechanical recycling HVC_mechanical_recycling_fraction: 0.
HVC_chemical_recycling_fraction: 0. # fraction of today's HVC produced via chemical recycling HVC_chemical_recycling_fraction: 0.
HVC_production_today: 52. # MtHVC/a from DECHEMA (2017), Figure 16, page 107; includes ethylene, propylene and BTX HVC_production_today: 52.
MWh_elec_per_tHVC_mechanical_recycling: 0.547 # from SI of https://doi.org/10.1016/j.resconrec.2020.105010, Table S5, for HDPE, PP, PS, PET. LDPE would be 0.756. MWh_elec_per_tHVC_mechanical_recycling: 0.547
MWh_elec_per_tHVC_chemical_recycling: 6.9 # Material Economics (2019), page 125; based on pyrolysis and electric steam cracking MWh_elec_per_tHVC_chemical_recycling: 6.9
chlorine_production_today: 9.58 # MtCl/a from DECHEMA (2017), Table 7, page 43 chlorine_production_today: 9.58
MWh_elec_per_tCl: 3.6 # DECHEMA (2017), Table 6, page 43 MWh_elec_per_tCl: 3.6
MWh_H2_per_tCl: -0.9372 # DECHEMA (2017), page 43; negative since hydrogen produced in chloralkali process MWh_H2_per_tCl: -0.9372
methanol_production_today: 1.5 # MtMeOH/a from DECHEMA (2017), page 62 methanol_production_today: 1.5
MWh_elec_per_tMeOH: 0.167 # DECHEMA (2017), Table 14, page 65 MWh_elec_per_tMeOH: 0.167
MWh_CH4_per_tMeOH: 10.25 # DECHEMA (2017), Table 14, page 65 MWh_CH4_per_tMeOH: 10.25
hotmaps_locate_missing: false hotmaps_locate_missing: false
reference_year: 2015 reference_year: 2015
# references:
# DECHEMA (2017): https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf
# Material Economics (2019): https://materialeconomics.com/latest-updates/industrial-transformation-2050
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs
costs: costs:
year: 2030 year: 2030
version: v0.5.0 version: v0.6.0
rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
fill_values: fill_values:
FOM: 0 FOM: 0
@ -602,14 +560,15 @@ costs:
fuel cell: 0. fuel cell: 0.
battery: 0. battery: 0.
battery inverter: 0. battery inverter: 0.
emission_prices: # in currency per tonne emission, only used with the option Ep emission_prices:
co2: 0. co2: 0.
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#clustering
clustering: clustering:
simplify_network: simplify_network:
to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections) to_substations: false
algorithm: kmeans # choose from: [hac, kmeans] algorithm: kmeans # choose from: [hac, kmeans]
feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc. feature: solar+onwind-time
exclude_carriers: [] exclude_carriers: []
remove_stubs: true remove_stubs: true
remove_stubs_across_borders: true remove_stubs_across_borders: true
@ -619,7 +578,7 @@ clustering:
exclude_carriers: [] exclude_carriers: []
aggregation_strategies: aggregation_strategies:
generators: generators:
p_nom_max: sum # use "min" for more conservative assumptions p_nom_max: sum
p_nom_min: sum p_nom_min: sum
p_min_pu: mean p_min_pu: mean
marginal_cost: mean marginal_cost: mean
@ -628,12 +587,13 @@ clustering:
ramp_limit_down: max ramp_limit_down: max
efficiency: mean efficiency: mean
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving
solving: solving:
#tmpdir: "path/to/tmp" #tmpdir: "path/to/tmp"
options: options:
formulation: kirchhoff
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
load_shedding: false load_shedding: false
transmission_losses: 0
noisy_costs: true noisy_costs: true
skip_iterations: true skip_iterations: true
track_iterations: false track_iterations: false
@ -717,6 +677,7 @@ operations:
co2_price: 500 # EUR/t co2_price: 500 # EUR/t
co2_sequestation_limit: 200 # Mt/a co2_sequestation_limit: 200 # Mt/a
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#plotting
plotting: plotting:
map: map:
boundaries: [-11, 30, 34, 71] boundaries: [-11, 30, 34, 71]
@ -731,48 +692,6 @@ plotting:
energy_max: 20000 energy_max: 20000
energy_min: -20000 energy_min: -20000
energy_threshold: 50. energy_threshold: 50.
vre_techs:
- onwind
- offwind-ac
- offwind-dc
- solar
- ror
renewable_storage_techs:
- PHS
- hydro
conv_techs:
- OCGT
- CCGT
- Nuclear
- Coal
storage_techs:
- hydro+PHS
- battery
- H2
load_carriers:
- AC load
AC_carriers:
- AC line
- AC transformer
link_carriers:
- DC line
- Converter AC-DC
heat_links:
- heat pump
- resistive heater
- CHP heat
- CHP electric
- gas boiler
- central heat pump
- central resistive heater
- central CHP heat
- central CHP electric
- central gas boiler
heat_generators:
- gas boiler
- central gas boiler
- solar thermal collector
- central solar thermal collector
nice_names: nice_names:
OCGT: "Open-Cycle Gas" OCGT: "Open-Cycle Gas"
@ -813,6 +732,11 @@ plotting:
solar: "#f9d002" solar: "#f9d002"
solar PV: "#f9d002" solar PV: "#f9d002"
solar thermal: '#ffbf2b' solar thermal: '#ffbf2b'
residential rural solar thermal: '#f1c069'
services rural solar thermal: '#eabf61'
residential urban decentral solar thermal: '#e5bc5a'
services urban decentral solar thermal: '#dfb953'
urban central solar thermal: '#d7b24c'
solar rooftop: '#ffea80' solar rooftop: '#ffea80'
# gas # gas
OCGT: '#e0986c' OCGT: '#e0986c'
@ -821,9 +745,15 @@ plotting:
gas boiler: '#db6a25' gas boiler: '#db6a25'
gas boilers: '#db6a25' gas boilers: '#db6a25'
gas boiler marginal: '#db6a25' gas boiler marginal: '#db6a25'
residential rural gas boiler: '#d4722e'
residential urban decentral gas boiler: '#cb7a36'
services rural gas boiler: '#c4813f'
services urban decentral gas boiler: '#ba8947'
urban central gas boiler: '#b0904f'
gas: '#e05b09' gas: '#e05b09'
fossil gas: '#e05b09' fossil gas: '#e05b09'
natural gas: '#e05b09' natural gas: '#e05b09'
biogas to gas: '#e36311'
CCGT: '#a85522' CCGT: '#a85522'
CCGT marginal: '#a85522' CCGT marginal: '#a85522'
allam: '#B98F76' allam: '#B98F76'
@ -836,6 +766,11 @@ plotting:
# oil # oil
oil: '#c9c9c9' oil: '#c9c9c9'
oil boiler: '#adadad' oil boiler: '#adadad'
residential rural oil boiler: '#a9a9a9'
services rural oil boiler: '#a5a5a5'
residential urban decentral oil boiler: '#a1a1a1'
urban central oil boiler: '#9d9d9d'
services urban decentral oil boiler: '#999999'
agriculture machinery oil: '#949494' agriculture machinery oil: '#949494'
shipping oil: "#808080" shipping oil: "#808080"
land transport oil: '#afafaf' land transport oil: '#afafaf'
@ -861,13 +796,20 @@ plotting:
solid biomass for industry CC: '#47411c' solid biomass for industry CC: '#47411c'
solid biomass for industry co2 from atmosphere: '#736412' solid biomass for industry co2 from atmosphere: '#736412'
solid biomass for industry co2 to stored: '#47411c' solid biomass for industry co2 to stored: '#47411c'
urban central solid biomass CHP: '#9d9042'
urban central solid biomass CHP CC: '#6c5d28'
biomass boiler: '#8A9A5B' biomass boiler: '#8A9A5B'
residential rural biomass boiler: '#a1a066'
residential urban decentral biomass boiler: '#b0b87b'
services rural biomass boiler: '#c6cf98'
services urban decentral biomass boiler: '#dde5b5'
biomass to liquid: '#32CD32' biomass to liquid: '#32CD32'
BioSNG: '#123456' BioSNG: '#123456'
# power transmission # power transmission
lines: '#6c9459' lines: '#6c9459'
transmission lines: '#6c9459' transmission lines: '#6c9459'
electricity distribution grid: '#97ad8c' electricity distribution grid: '#97ad8c'
low voltage: '#97ad8c'
# electricity demand # electricity demand
Electric load: '#110d63' Electric load: '#110d63'
electric demand: '#110d63' electric demand: '#110d63'
@ -878,24 +820,48 @@ plotting:
# battery + EVs # battery + EVs
battery: '#ace37f' battery: '#ace37f'
battery storage: '#ace37f' battery storage: '#ace37f'
battery charger: '#88a75b'
battery discharger: '#5d4e29'
home battery: '#80c944' home battery: '#80c944'
home battery storage: '#80c944' home battery storage: '#80c944'
home battery charger: '#5e8032'
home battery discharger: '#3c5221'
BEV charger: '#baf238' BEV charger: '#baf238'
V2G: '#e5ffa8' V2G: '#e5ffa8'
land transport EV: '#baf238' land transport EV: '#baf238'
Li ion: '#baf238' Li ion: '#baf238'
# hot water storage # hot water storage
water tanks: '#e69487' water tanks: '#e69487'
residential rural water tanks: '#f7b7a3'
services rural water tanks: '#f3afa3'
residential urban decentral water tanks: '#f2b2a3'
services urban decentral water tanks: '#f1b4a4'
urban central water tanks: '#e9977d'
hot water storage: '#e69487' hot water storage: '#e69487'
hot water charging: '#e69487' hot water charging: '#e8998b'
hot water discharging: '#e69487' urban central water tanks charger: '#b57a67'
residential rural water tanks charger: '#b4887c'
residential urban decentral water tanks charger: '#b39995'
services rural water tanks charger: '#b3abb0'
services urban decentral water tanks charger: '#b3becc'
hot water discharging: '#e99c8e'
urban central water tanks discharger: '#b9816e'
residential rural water tanks discharger: '#ba9685'
residential urban decentral water tanks discharger: '#baac9e'
services rural water tanks discharger: '#bbc2b8'
services urban decentral water tanks discharger: '#bdd8d3'
# heat demand # heat demand
Heat load: '#cc1f1f' Heat load: '#cc1f1f'
heat: '#cc1f1f' heat: '#cc1f1f'
heat demand: '#cc1f1f' heat demand: '#cc1f1f'
rural heat: '#ff5c5c' rural heat: '#ff5c5c'
residential rural heat: '#ff7c7c'
services rural heat: '#ff9c9c'
central heat: '#cc1f1f' central heat: '#cc1f1f'
urban central heat: '#d15959'
decentral heat: '#750606' decentral heat: '#750606'
residential urban decentral heat: '#a33c3c'
services urban decentral heat: '#cc1f1f'
low-temperature heat for industry: '#8f2727' low-temperature heat for industry: '#8f2727'
process heat: '#ff0000' process heat: '#ff0000'
agriculture heat: '#d9a5a5' agriculture heat: '#d9a5a5'
@ -903,14 +869,26 @@ plotting:
heat pumps: '#2fb537' heat pumps: '#2fb537'
heat pump: '#2fb537' heat pump: '#2fb537'
air heat pump: '#36eb41' air heat pump: '#36eb41'
residential urban decentral air heat pump: '#48f74f'
services urban decentral air heat pump: '#5af95d'
urban central air heat pump: '#6cfb6b'
ground heat pump: '#2fb537' ground heat pump: '#2fb537'
residential rural ground heat pump: '#48f74f'
services rural ground heat pump: '#5af95d'
Ambient: '#98eb9d' Ambient: '#98eb9d'
CHP: '#8a5751' CHP: '#8a5751'
urban central gas CHP: '#8d5e56'
CHP CC: '#634643' CHP CC: '#634643'
urban central gas CHP CC: '#6e4e4c'
CHP heat: '#8a5751' CHP heat: '#8a5751'
CHP electric: '#8a5751' CHP electric: '#8a5751'
district heating: '#e8beac' district heating: '#e8beac'
resistive heater: '#d8f9b8' resistive heater: '#d8f9b8'
residential rural resistive heater: '#bef5b5'
residential urban decentral resistive heater: '#b2f1a9'
services rural resistive heater: '#a5ed9d'
services urban decentral resistive heater: '#98e991'
urban central resistive heater: '#8cdf85'
retrofitting: '#8487e8' retrofitting: '#8487e8'
building retrofitting: '#8487e8' building retrofitting: '#8487e8'
# hydrogen # hydrogen
@ -922,13 +900,16 @@ plotting:
SMR CC: '#4f1745' SMR CC: '#4f1745'
H2 liquefaction: '#d647bd' H2 liquefaction: '#d647bd'
hydrogen storage: '#bf13a0' hydrogen storage: '#bf13a0'
H2 Store: '#bf13a0'
H2 storage: '#bf13a0' H2 storage: '#bf13a0'
land transport fuel cell: '#6b3161' land transport fuel cell: '#6b3161'
H2 pipeline: '#f081dc' H2 pipeline: '#f081dc'
H2 pipeline retrofitted: '#ba99b5' H2 pipeline retrofitted: '#ba99b5'
H2 Fuel Cell: '#c251ae' H2 Fuel Cell: '#c251ae'
H2 fuel cell: '#c251ae'
H2 turbine: '#991f83' H2 turbine: '#991f83'
H2 Electrolysis: '#ff29d9' H2 Electrolysis: '#ff29d9'
H2 electrolysis: '#ff29d9'
# ammonia # ammonia
NH3: '#46caf0' NH3: '#46caf0'
ammonia: '#46caf0' ammonia: '#46caf0'
@ -977,9 +958,11 @@ plotting:
waste: '#e3d37d' waste: '#e3d37d'
other: '#000000' other: '#000000'
geothermal: '#ba91b1' geothermal: '#ba91b1'
AC: "#70af1d"
AC-AC: "#70af1d" AC-AC: "#70af1d"
AC line: "#70af1d" AC line: "#70af1d"
links: "#8a1caf" links: "#8a1caf"
HVDC links: "#8a1caf" HVDC links: "#8a1caf"
DC: "#8a1caf"
DC-DC: "#8a1caf" DC-DC: "#8a1caf"
DC link: "#8a1caf" DC link: "#8a1caf"

View File

@ -31,6 +31,14 @@ snapshots:
end: "2013-03-08" end: "2013-03-08"
electricity: electricity:
co2limit: 100.e+6
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery]
Store: [H2]
Link: [H2 pipeline]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc] renewable_carriers: [solar, onwind, offwind-ac, offwind-dc]
atlite: atlite:

View File

@ -28,6 +28,14 @@ snapshots:
end: "2013-03-08" end: "2013-03-08"
electricity: electricity:
co2limit: 100.e+6
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery]
Store: [H2]
Link: [H2 pipeline]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc] renewable_carriers: [solar, onwind, offwind-ac, offwind-dc]
atlite: atlite:

View File

@ -1,3 +0,0 @@
attribute,type,unit,default,description,status
location,string,n/a,n/a,Reference to original electricity bus,Input (optional)
unit,string,n/a,MWh,Unit of the bus (descriptive only), Input (optional)
1 attribute type unit default description status
2 location string n/a n/a Reference to original electricity bus Input (optional)
3 unit string n/a MWh Unit of the bus (descriptive only) Input (optional)

View File

@ -1,4 +0,0 @@
attribute,type,unit,default,description,status
carrier,string,n/a,n/a,carrier,Input (optional)
lifetime,float,years,inf,lifetime,Input (optional)
build_year,int,year ,0,build year,Input (optional)
1 attribute type unit default description status
2 carrier string n/a n/a carrier Input (optional)
3 lifetime float years inf lifetime Input (optional)
4 build_year int year 0 build year Input (optional)

View File

@ -1,13 +0,0 @@
attribute,type,unit,default,description,status
bus2,string,n/a,n/a,2nd bus,Input (optional)
bus3,string,n/a,n/a,3rd bus,Input (optional)
bus4,string,n/a,n/a,4th bus,Input (optional)
efficiency2,static or series,per unit,1,2nd bus efficiency,Input (optional)
efficiency3,static or series,per unit,1,3rd bus efficiency,Input (optional)
efficiency4,static or series,per unit,1,4th bus efficiency,Input (optional)
p2,series,MW,0,2nd bus output,Output
p3,series,MW,0,3rd bus output,Output
p4,series,MW,0,4th bus output,Output
carrier,string,n/a,n/a,carrier,Input (optional)
lifetime,float,years,inf,lifetime,Input (optional)
build_year,int,year ,0,build year,Input (optional)
1 attribute type unit default description status
2 bus2 string n/a n/a 2nd bus Input (optional)
3 bus3 string n/a n/a 3rd bus Input (optional)
4 bus4 string n/a n/a 4th bus Input (optional)
5 efficiency2 static or series per unit 1 2nd bus efficiency Input (optional)
6 efficiency3 static or series per unit 1 3rd bus efficiency Input (optional)
7 efficiency4 static or series per unit 1 4th bus efficiency Input (optional)
8 p2 series MW 0 2nd bus output Output
9 p3 series MW 0 3rd bus output Output
10 p4 series MW 0 4th bus output Output
11 carrier string n/a n/a carrier Input (optional)
12 lifetime float years inf lifetime Input (optional)
13 build_year int year 0 build year Input (optional)

View File

@ -1,2 +0,0 @@
attribute,type,unit,default,description,status
carrier,string,n/a,n/a,carrier,Input (optional)
1 attribute type unit default description status
2 carrier string n/a n/a carrier Input (optional)

View File

@ -1,4 +0,0 @@
attribute,type,unit,default,description,status
carrier,string,n/a,n/a,carrier,Input (optional)
lifetime,float,years,inf,lifetime,Input (optional)
build_year,int,year ,0,build year,Input (optional)
1 attribute type unit default description status
2 carrier string n/a n/a carrier Input (optional)
3 lifetime float years inf lifetime Input (optional)
4 build_year int year 0 build year Input (optional)

View File

@ -36,6 +36,7 @@ sys.path.insert(0, os.path.abspath("../scripts"))
extensions = [ extensions = [
#'sphinx.ext.autodoc', #'sphinx.ext.autodoc',
#'sphinx.ext.autosummary', #'sphinx.ext.autosummary',
"myst_parser",
"sphinx.ext.autosectionlabel", "sphinx.ext.autosectionlabel",
"sphinx.ext.intersphinx", "sphinx.ext.intersphinx",
"sphinx.ext.todo", "sphinx.ext.todo",

View File

@ -0,0 +1,7 @@
,Unit,Values,Description
year ,--,"{2010, 2020, 2030, 2040, 2050}",Year for which to retrieve biomass potential according to the assumptions of the `JRC ENSPRESO <https://data.jrc.ec.europa.eu/dataset/74ed5a04-7d74-4807-9eab-b94774309d9f>`_ .
scenario ,--,"{""ENS_Low"", ""ENS_Med"", ""ENS_High""}",Scenario for which to retrieve biomass potential. The scenario definition can be seen in `ENSPRESO_BIOMASS <https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx>`_
classes ,,,
-- solid biomass,--,Array of biomass comodity,The comodity that are included as solid biomass
-- not included,--,Array of biomass comodity,The comodity that are not included as a biomass potential
-- biogas,--,Array of biomass comodity,The comodity that are included as biogas
1 Unit Values Description
2 year -- {2010, 2020, 2030, 2040, 2050} Year for which to retrieve biomass potential according to the assumptions of the `JRC ENSPRESO <https://data.jrc.ec.europa.eu/dataset/74ed5a04-7d74-4807-9eab-b94774309d9f>`_ .
3 scenario -- {"ENS_Low", "ENS_Med", "ENS_High"} Scenario for which to retrieve biomass potential. The scenario definition can be seen in `ENSPRESO_BIOMASS <https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx>`_
4 classes
5 -- solid biomass -- Array of biomass comodity The comodity that are included as solid biomass
6 -- not included -- Array of biomass comodity The comodity that are not included as a biomass potential
7 -- biogas -- Array of biomass comodity The comodity that are included as biogas

View File

@ -0,0 +1,2 @@
,Unit,Values,Description
co2_budget,--,Dictionary with planning horizons as keys.,CO2 budget as a fraction of 1990 emissions. Overwritten if ``CO2Lx`` or ``cb`` are set in ``{sector_opts}`` wildcard"doc/configtables/othertoplevel.csv
Can't render this file because it contains an unexpected character in line 2 and column 174.

View File

@ -0,0 +1,2 @@
,Unit,Values,Description
countries,--,"Subset of {'AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK'}","European countries defined by their `Two-letter country codes (ISO 3166-1) <https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`_ which should be included in the energy system model."
1 Unit Values Description
2 countries -- Subset of {'AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK'} European countries defined by their `Two-letter country codes (ISO 3166-1) <https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`_ which should be included in the energy system model.

View File

@ -1,29 +1,36 @@
,Unit,Values,Description ,Unit,Values,Description
voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider
gaslimit,MWhth,"float or false",Global gas usage limit gaslimit,MWhth,float or false,Global gas usage limit
co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions
co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard. co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard.
agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``.
operational_reserve,,,"Settings for reserve requirements following like `GenX <https://genxproject.github.io/GenX/dev/core/#Reserves>`_" operational_reserve,,,Settings for reserve requirements following `GenX <https://genxproject.github.io/GenX/dev/core/#Reserves>`_
-- activate,bool,"true or false","Whether to take operational reserve requirements into account during optimisation" ,,,
-- epsilon_load,--,float,share of total load -- activate,bool,true or false,Whether to take operational reserve requirements into account during optimisation
-- epsilon_vres,--,float,share of total renewable supply -- epsilon_load,--,float,share of total load
-- contingency,MW,float,fixed reserve capacity -- epsilon_vres,--,float,share of total renewable supply
max_hours,,, -- contingency,MW,float,fixed reserve capacity
-- battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_. max_hours,,,
-- H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_. -- battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
extendable_carriers,,, -- H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
-- Generator,--,"Any extendable carrier","Defines existing or non-existing conventional and renewable power plants to be extendable during the optimization. Conventional generators can only be built/expanded where already existent today. If a listed conventional carrier is not included in the ``conventional_carriers`` list, the lower limit of the capacity expansion is set to 0." extendable_carriers,,,
-- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. -- Generator,--,Any extendable carrier,"Defines existing or non-existing conventional and renewable power plants to be extendable during the optimization. Conventional generators can only be built/expanded where already existent today. If a listed conventional carrier is not included in the ``conventional_carriers`` list, the lower limit of the capacity expansion is set to 0."
-- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. -- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
-- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. -- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
powerplants_filter,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database. -- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``.
custom_powerplants,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database. powerplants_filter,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. ``Country not in ['Germany']``",Filter query for the default powerplant database.
conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in `extendable_carriers`, the capacity is taken as a lower bound." ,,,
renewable_carriers,--,"Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro}",List of renewable generators to include in the model. custom_powerplants,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. ``Country in ['Germany']``",Filter query for the custom powerplant database.
estimate_renewable_capacities,,, ,,,
-- enable,,bool,"Activate routine to estimate renewable capacities" conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in ``extendable_carriers``, the capacity is taken as a lower bound."
-- from_opsd,--,bool,"Add capacities from OPSD data" ,,,
-- year,--,bool,"Renewable capacities are based on existing capacities reported by IRENA for the specified year" renewable_carriers,--,"Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro}",List of renewable generators to include in the model.
-- expansion_limit,--,float or false,"Artificially limit maximum capacities to factor * (IRENA capacities), i.e. 110% of <years>'s capacities => expansion_limit: 1.1 false: Use estimated renewable potentials determine by the workflow" estimate_renewable_capacities,,,
-- technology_mapping,,,"Mapping between powerplantmatching and PyPSA-Eur technology names" -- enable,,bool,Activate routine to estimate renewable capacities
-- from_opsd,--,bool,Add renewable capacities from `OPSD database <https://data.open-power-system-data.org/renewable_power_plants/2020-08-25>`_. The value is depreciated but still can be used.
-- year,--,bool,Renewable capacities are based on existing capacities reported by IRENA (IRENASTAT) for the specified year
-- expansion_limit,--,float or false,"Artificially limit maximum IRENA capacities to a factor. For example, an ``expansion_limit: 1.1`` means 110% of capacities . If false are chosen, the estimated renewable potentials determine by the workflow are used."
-- technology_mapping,,,Mapping between PyPSA-Eur and powerplantmatching technology names
-- -- Offshore,--,"Any subset of {offwind-ac, offwind-dc}","List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) onshore technology."
-- -- Offshore,--,{onwind},"List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) offshore technology."
-- -- PV,--,{solar},"List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) PV technology."

1 Unit Values Description
2 voltages kV Any subset of {220., 300., 380.} Voltage levels to consider
3 gaslimit MWhth float or false Global gas usage limit
4 co2limit :math:`t_{CO_2-eq}/a` float Cap on total annual system carbon dioxide emissions
5 co2base :math:`t_{CO_2-eq}/a` float Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard.
6 agg_p_nom_limits file path Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``.
7 operational_reserve Settings for reserve requirements following like `GenX <https://genxproject.github.io/GenX/dev/core/#Reserves>`_ Settings for reserve requirements following `GenX <https://genxproject.github.io/GenX/dev/core/#Reserves>`_
8 -- activate bool true or false Whether to take operational reserve requirements into account during optimisation
9 -- epsilon_load -- activate -- bool float true or false share of total load Whether to take operational reserve requirements into account during optimisation
10 -- epsilon_vres -- epsilon_load -- float share of total renewable supply share of total load
11 -- contingency -- epsilon_vres MW -- float fixed reserve capacity share of total renewable supply
12 max_hours -- contingency MW float fixed reserve capacity
13 -- battery max_hours h float Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
14 -- H2 -- battery h float Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_. Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
15 extendable_carriers -- H2 h float Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
16 -- Generator extendable_carriers -- Any extendable carrier Defines existing or non-existing conventional and renewable power plants to be extendable during the optimization. Conventional generators can only be built/expanded where already existent today. If a listed conventional carrier is not included in the ``conventional_carriers`` list, the lower limit of the capacity expansion is set to 0.
17 -- StorageUnit -- Generator -- Any subset of {'battery','H2'} Any extendable carrier Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. Defines existing or non-existing conventional and renewable power plants to be extendable during the optimization. Conventional generators can only be built/expanded where already existent today. If a listed conventional carrier is not included in the ``conventional_carriers`` list, the lower limit of the capacity expansion is set to 0.
18 -- Store -- StorageUnit -- Any subset of {'battery','H2'} Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
19 -- Link -- Store -- Any subset of {'H2 pipeline'} Any subset of {'battery','H2'} Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
20 powerplants_filter -- Link -- use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country not in ['Germany'] Any subset of {'H2 pipeline'} Filter query for the default powerplant database. Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``.
21 custom_powerplants powerplants_filter -- use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country in ['Germany'] use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. ``Country not in ['Germany']`` Filter query for the custom powerplant database. Filter query for the default powerplant database.
22 conventional_carriers -- Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass} List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in `extendable_carriers`, the capacity is taken as a lower bound.
23 renewable_carriers custom_powerplants -- Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro} use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. ``Country in ['Germany']`` List of renewable generators to include in the model. Filter query for the custom powerplant database.
24 estimate_renewable_capacities
25 -- enable conventional_carriers -- bool Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass} Activate routine to estimate renewable capacities List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in ``extendable_carriers``, the capacity is taken as a lower bound.
26 -- from_opsd -- bool Add capacities from OPSD data
27 -- year renewable_carriers -- bool Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro} Renewable capacities are based on existing capacities reported by IRENA for the specified year List of renewable generators to include in the model.
28 -- expansion_limit estimate_renewable_capacities -- float or false Artificially limit maximum capacities to factor * (IRENA capacities), i.e. 110% of <years>'s capacities => expansion_limit: 1.1 false: Use estimated renewable potentials determine by the workflow
29 -- technology_mapping -- enable bool Mapping between powerplantmatching and PyPSA-Eur technology names Activate routine to estimate renewable capacities
30 -- from_opsd -- bool Add renewable capacities from `OPSD database <https://data.open-power-system-data.org/renewable_power_plants/2020-08-25>`_. The value is depreciated but still can be used.
31 -- year -- bool Renewable capacities are based on existing capacities reported by IRENA (IRENASTAT) for the specified year
32 -- expansion_limit -- float or false Artificially limit maximum IRENA capacities to a factor. For example, an ``expansion_limit: 1.1`` means 110% of capacities . If false are chosen, the estimated renewable potentials determine by the workflow are used.
33 -- technology_mapping Mapping between PyPSA-Eur and powerplantmatching technology names
34 -- -- Offshore -- Any subset of {offwind-ac, offwind-dc} List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) onshore technology.
35 -- -- Offshore -- {onwind} List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) offshore technology.
36 -- -- PV -- {solar} List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) PV technology.

View File

@ -1,4 +1,5 @@
,Unit,Values,Description ,Unit,Values,Description
enable,str or bool,"{auto, true, false}","Switch to include (true) or exclude (false) the retrieve_* rules of snakemake into the workflow; 'auto' sets true|false based on availability of an internet connection to prevent issues with snakemake failing due to lack of internet connection."
prepare_links_p_nom,bool,"{true, false}","Switch to retrieve current HVDC projects from `Wikipedia <https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_" prepare_links_p_nom,bool,"{true, false}","Switch to retrieve current HVDC projects from `Wikipedia <https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_"
retrieve_databundle,bool,"{true, false}","Switch to retrieve databundle from zenodo via the rule :mod:`retrieve_databundle` or whether to keep a custom databundle located in the corresponding folder." retrieve_databundle,bool,"{true, false}","Switch to retrieve databundle from zenodo via the rule :mod:`retrieve_databundle` or whether to keep a custom databundle located in the corresponding folder."
retrieve_sector_databundle,bool,"{true, false}","Switch to retrieve sector databundle from zenodo via the rule :mod:`retrieve_sector_databundle` or whether to keep a custom databundle located in the corresponding folder." retrieve_sector_databundle,bool,"{true, false}","Switch to retrieve sector databundle from zenodo via the rule :mod:`retrieve_sector_databundle` or whether to keep a custom databundle located in the corresponding folder."

1 Unit Values Description
2 enable str or bool {auto, true, false} Switch to include (true) or exclude (false) the retrieve_* rules of snakemake into the workflow; 'auto' sets true|false based on availability of an internet connection to prevent issues with snakemake failing due to lack of internet connection.
3 prepare_links_p_nom bool {true, false} Switch to retrieve current HVDC projects from `Wikipedia <https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_
4 retrieve_databundle bool {true, false} Switch to retrieve databundle from zenodo via the rule :mod:`retrieve_databundle` or whether to keep a custom databundle located in the corresponding folder.
5 retrieve_sector_databundle bool {true, false} Switch to retrieve sector databundle from zenodo via the rule :mod:`retrieve_sector_databundle` or whether to keep a custom databundle located in the corresponding folder.

View File

@ -0,0 +1,7 @@
,Unit,Values,Description
energy_totals_year ,--,"{1990,1995,2000,2005,2010,2011,…} ",The year for the sector energy use. The year must be avaliable in the Eurostat report
base_emissions_year ,--,"YYYY; e.g. 1990","The base year for the sector emissions. See `European Environment Agency (EEA) <https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16>`_."
eurostat_report_year ,--,"{2016,2017,2018}","The publication year of the Eurostat report. 2016 includes Bosnia and Herzegovina, 2017 does not"
emissions ,--,"{CO2, All greenhouse gases - (CO2 equivalent)}","Specify which sectoral emissions are taken into account. Data derived from EEA. Currently only CO2 is implemented."
1 Unit Values Description
2 energy_totals_year -- {1990,1995,2000,2005,2010,2011,…} The year for the sector energy use. The year must be avaliable in the Eurostat report
3 base_emissions_year -- YYYY; e.g. 1990 The base year for the sector emissions. See `European Environment Agency (EEA) <https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16>`_.
4 eurostat_report_year -- {2016,2017,2018} The publication year of the Eurostat report. 2016 includes Bosnia and Herzegovina, 2017 does not
5 emissions -- {CO2, All greenhouse gases - (CO2 equivalent)} Specify which sectoral emissions are taken into account. Data derived from EEA. Currently only CO2 is implemented.

View File

@ -0,0 +1,6 @@
,Unit,Values,Description
grouping_years_power ,--,A list of years,Intervals to group existing capacities for power
grouping_years_heat ,--,A list of years below 2020,Intervals to group existing capacities for heat
threshold_capacity ,MW,float,Capacities generators and links of below threshold are removed during add_existing_capacities
conventional_carriers ,--,"Any subset of {uranium, coal, lignite, oil} ",List of conventional power plants to include in the sectoral network
1 Unit Values Description
2 grouping_years_power -- A list of years Intervals to group existing capacities for power
3 grouping_years_heat -- A list of years below 2020 Intervals to group existing capacities for heat
4 threshold_capacity MW float Capacities generators and links of below threshold are removed during add_existing_capacities
5 conventional_carriers -- Any subset of {uranium, coal, lignite, oil} List of conventional power plants to include in the sectoral network

View File

@ -0,0 +1,2 @@
,Unit,Values,Description
foresight,string,"{overnight, myopic, perfect}","See :ref:`Foresight Options` for detail explanations."
1 Unit Values Description
2 foresight string {overnight, myopic, perfect} See :ref:`Foresight Options` for detail explanations.

View File

@ -0,0 +1,31 @@
,Unit,Values,Description
St_primary_fraction,--,Dictionary with planning horizons as keys.,The fraction of steel produced via primary route versus secondary route (scrap+EAF). Current fraction is 0.6
DRI_fraction,--,Dictionary with planning horizons as keys.,The fraction of the primary route DRI + EAF
,,,
H2_DRI,--,float,The hydrogen consumption in Direct Reduced Iron (DRI) Mwh_H2 LHV/ton_Steel from 51kgH2/tSt in `Vogl et al (2018) <https://doi.org/10.1016/j.jclepro.2018.08.279>`_
elec_DRI,MWh/tSt,float,The electricity consumed in Direct Reduced Iron (DRI) shaft. From `HYBRIT brochure <https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf>`_
Al_primary_fraction,--,Dictionary with planning horizons as keys.,The fraction of aluminium produced via the primary route versus scrap. Current fraction is 0.4
MWh_NH3_per_tNH3,LHV,float,The energy amount per ton of ammonia.
MWh_CH4_per_tNH3_SMR,--,float,The energy amount of methane needed to produce a ton of ammonia using steam methane reforming (SMR). Value derived from 2012's demand from `Center for European Policy Studies (2008) <https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf>`_
MWh_elec_per_tNH3_SMR,--,float,"The energy amount of electricity needed to produce a ton of ammonia using steam methane reforming (SMR). same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3"
Mwh_H2_per_tNH3 _electrolysis,--,float,"The energy amount of hydrogen needed to produce a ton of ammonia using HaberBosch process. From `Wang et al (2018) <https://doi.org/10.1016/j.joule.2018.04.017>`_, Base value assumed around 0.197 tH2/tHN3 (>3/17 since some H2 lost and used for energy)"
Mwh_elec_per_tNH3 _electrolysis,--,float,"The energy amount of electricity needed to produce a ton of ammonia using HaberBosch process. From `Wang et al (2018) <https://doi.org/10.1016/j.joule.2018.04.017>`_, Table 13 (air separation and HB)"
Mwh_NH3_per_MWh _H2_cracker,--,float,The energy amount of amonia needed to produce an energy amount hydrogen using ammonia cracker
NH3_process_emissions,MtCO2/a,float,The emission of ammonia production from steam methane reforming (SMR). From UNFCCC for 2015 for EU28
petrochemical_process _emissions,MtCO2/a,float,The emission of petrochemical production. From UNFCCC for 2015 for EU28
HVC_primary_fraction,--,float,The fraction of high value chemicals (HVC) produced via primary route
HVC_mechanical_recycling _fraction,--,float,The fraction of high value chemicals (HVC) produced using mechanical recycling
HVC_chemical_recycling _fraction,--,float,The fraction of high value chemicals (HVC) produced using chemical recycling
,,,
HVC_production_today,MtHVC/a,float,"The amount of high value chemicals (HVC) produced. This includes ethylene, propylene and BTX. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Figure 16, page 107"
Mwh_elec_per_tHVC _mechanical_recycling,MWh/tHVC,float,"The energy amount of electricity needed to produce a ton of high value chemical (HVC) using mechanical recycling. From SI of `Meys et al (2020) <https://doi.org/10.1016/j.resconrec.2020.105010>`_, Table S5, for HDPE, PP, PS, PET. LDPE would be 0.756."
Mwh_elec_per_tHVC _chemical_recycling,MWh/tHVC,float,"The energy amount of electricity needed to produce a ton of high value chemical (HVC) using chemical recycling. The default value is based on pyrolysis and electric steam cracking. From `Material Economics (2019) <https://materialeconomics.com/latest-updates/industrial-transformation-2050>`_, page 125"
,,,
chlorine_production _today,MtCl/a,float,"The amount of chlorine produced. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Table 7, page 43"
MWh_elec_per_tCl,MWh/tCl,float,"The energy amount of electricity needed to produce a ton of chlorine. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Table 6 page 43"
MWh_H2_per_tCl,MWhH2/tCl,float,"The energy amount of hydrogen needed to produce a ton of chlorine. The value is negative since hydrogen produced in chloralkali process. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 43"
methanol_production _today,MtMeOH/a,float,"The amount of methanol produced. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 62"
MWh_elec_per_tMeOH,MWh/tMeOH,float,"The energy amount of electricity needed to produce a ton of methanol. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Table 14, page 65"
MWh_CH4_per_tMeOH,MWhCH4/tMeOH,float,"The energy amount of methane needed to produce a ton of methanol. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Table 14, page 65"
hotmaps_locate_missing,--,"{true,false}",Locate industrial sites without valid locations based on city and countries.
reference_year,year,YYYY,The year used as the baseline for industrial energy demand and production. Data extracted from `JRC-IDEES 2015 <https://data.jrc.ec.europa.eu/dataset/jrc-10110-10001>`_
1 Unit Values Description
2 St_primary_fraction -- Dictionary with planning horizons as keys. The fraction of steel produced via primary route versus secondary route (scrap+EAF). Current fraction is 0.6
3 DRI_fraction -- Dictionary with planning horizons as keys. The fraction of the primary route DRI + EAF
4
5 H2_DRI -- float The hydrogen consumption in Direct Reduced Iron (DRI) Mwh_H2 LHV/ton_Steel from 51kgH2/tSt in `Vogl et al (2018) <https://doi.org/10.1016/j.jclepro.2018.08.279>`_
6 elec_DRI MWh/tSt float The electricity consumed in Direct Reduced Iron (DRI) shaft. From `HYBRIT brochure <https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf>`_
7 Al_primary_fraction -- Dictionary with planning horizons as keys. The fraction of aluminium produced via the primary route versus scrap. Current fraction is 0.4
8 MWh_NH3_per_tNH3 LHV float The energy amount per ton of ammonia.
9 MWh_CH4_per_tNH3_SMR -- float The energy amount of methane needed to produce a ton of ammonia using steam methane reforming (SMR). Value derived from 2012's demand from `Center for European Policy Studies (2008) <https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf>`_
10 MWh_elec_per_tNH3_SMR -- float The energy amount of electricity needed to produce a ton of ammonia using steam methane reforming (SMR). same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3
11 Mwh_H2_per_tNH3 _electrolysis -- float The energy amount of hydrogen needed to produce a ton of ammonia using Haber–Bosch process. From `Wang et al (2018) <https://doi.org/10.1016/j.joule.2018.04.017>`_, Base value assumed around 0.197 tH2/tHN3 (>3/17 since some H2 lost and used for energy)
12 Mwh_elec_per_tNH3 _electrolysis -- float The energy amount of electricity needed to produce a ton of ammonia using Haber–Bosch process. From `Wang et al (2018) <https://doi.org/10.1016/j.joule.2018.04.017>`_, Table 13 (air separation and HB)
13 Mwh_NH3_per_MWh _H2_cracker -- float The energy amount of amonia needed to produce an energy amount hydrogen using ammonia cracker
14 NH3_process_emissions MtCO2/a float The emission of ammonia production from steam methane reforming (SMR). From UNFCCC for 2015 for EU28
15 petrochemical_process _emissions MtCO2/a float The emission of petrochemical production. From UNFCCC for 2015 for EU28
16 HVC_primary_fraction -- float The fraction of high value chemicals (HVC) produced via primary route
17 HVC_mechanical_recycling _fraction -- float The fraction of high value chemicals (HVC) produced using mechanical recycling
18 HVC_chemical_recycling _fraction -- float The fraction of high value chemicals (HVC) produced using chemical recycling
19
20 HVC_production_today MtHVC/a float The amount of high value chemicals (HVC) produced. This includes ethylene, propylene and BTX. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Figure 16, page 107
21 Mwh_elec_per_tHVC _mechanical_recycling MWh/tHVC float The energy amount of electricity needed to produce a ton of high value chemical (HVC) using mechanical recycling. From SI of `Meys et al (2020) <https://doi.org/10.1016/j.resconrec.2020.105010>`_, Table S5, for HDPE, PP, PS, PET. LDPE would be 0.756.
22 Mwh_elec_per_tHVC _chemical_recycling MWh/tHVC float The energy amount of electricity needed to produce a ton of high value chemical (HVC) using chemical recycling. The default value is based on pyrolysis and electric steam cracking. From `Material Economics (2019) <https://materialeconomics.com/latest-updates/industrial-transformation-2050>`_, page 125
23
24 chlorine_production _today MtCl/a float The amount of chlorine produced. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Table 7, page 43
25 MWh_elec_per_tCl MWh/tCl float The energy amount of electricity needed to produce a ton of chlorine. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Table 6 page 43
26 MWh_H2_per_tCl MWhH2/tCl float The energy amount of hydrogen needed to produce a ton of chlorine. The value is negative since hydrogen produced in chloralkali process. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 43
27 methanol_production _today MtMeOH/a float The amount of methanol produced. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 62
28 MWh_elec_per_tMeOH MWh/tMeOH float The energy amount of electricity needed to produce a ton of methanol. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Table 14, page 65
29 MWh_CH4_per_tMeOH MWhCH4/tMeOH float The energy amount of methane needed to produce a ton of methanol. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, Table 14, page 65
30 hotmaps_locate_missing -- {true,false} Locate industrial sites without valid locations based on city and countries.
31 reference_year year YYYY The year used as the baseline for industrial energy demand and production. Data extracted from `JRC-IDEES 2015 <https://data.jrc.ec.europa.eu/dataset/jrc-10110-10001>`_

View File

@ -2,5 +2,6 @@
types,--,"Values should specify a `line type in PyPSA <https://pypsa.readthedocs.io/en/latest/components.html#line-types>`_. Keys should specify the corresponding voltage level (e.g. 220., 300. and 380. kV)","Specifies line types to assume for the different voltage levels of the ENTSO-E grid extraction. Should normally handle voltage levels 220, 300, and 380 kV" types,--,"Values should specify a `line type in PyPSA <https://pypsa.readthedocs.io/en/latest/components.html#line-types>`_. Keys should specify the corresponding voltage level (e.g. 220., 300. and 380. kV)","Specifies line types to assume for the different voltage levels of the ENTSO-E grid extraction. Should normally handle voltage levels 220, 300, and 380 kV"
s_max_pu,--,"Value in [0.,1.]","Correction factor for line capacities (``s_nom``) to approximate :math:`N-1` security and reserve capacity for reactive power flows" s_max_pu,--,"Value in [0.,1.]","Correction factor for line capacities (``s_nom``) to approximate :math:`N-1` security and reserve capacity for reactive power flows"
s_nom_max,MW,"float","Global upper limit for the maximum capacity of each extendable line." s_nom_max,MW,"float","Global upper limit for the maximum capacity of each extendable line."
max_extension,MW,"float","Upper limit for the extended capacity of each extendable line."
length_factor,--,float,"Correction factor to account for the fact that buses are *not* connected by lines through air-line distance." length_factor,--,float,"Correction factor to account for the fact that buses are *not* connected by lines through air-line distance."
under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction." under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction."

1 Unit Values Description
2 types -- Values should specify a `line type in PyPSA <https://pypsa.readthedocs.io/en/latest/components.html#line-types>`_. Keys should specify the corresponding voltage level (e.g. 220., 300. and 380. kV) Specifies line types to assume for the different voltage levels of the ENTSO-E grid extraction. Should normally handle voltage levels 220, 300, and 380 kV
3 s_max_pu -- Value in [0.,1.] Correction factor for line capacities (``s_nom``) to approximate :math:`N-1` security and reserve capacity for reactive power flows
4 s_nom_max MW float Global upper limit for the maximum capacity of each extendable line.
5 max_extension MW float Upper limit for the extended capacity of each extendable line.
6 length_factor -- float Correction factor to account for the fact that buses are *not* connected by lines through air-line distance.
7 under_construction -- One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity} Specifies how to handle lines which are currently under construction.

View File

@ -1,5 +1,6 @@
,Unit,Values,Description ,Unit,Values,Description
p_max_pu,--,"Value in [0.,1.]","Correction factor for link capacities ``p_nom``." p_max_pu,--,"Value in [0.,1.]","Correction factor for link capacities ``p_nom``."
p_nom_max,MW,"float","Global upper limit for the maximum capacity of each extendable DC link." p_nom_max,MW,"float","Global upper limit for the maximum capacity of each extendable DC link."
max_extension,MW,"float","Upper limit for the extended capacity of each extendable DC link."
include_tyndp,bool,"{'true', 'false'}","Specifies whether to add HVDC link projects from the `TYNDP 2018 <https://tyndp.entsoe.eu/tyndp2018/projects/>`_ which are at least in permitting." include_tyndp,bool,"{'true', 'false'}","Specifies whether to add HVDC link projects from the `TYNDP 2018 <https://tyndp.entsoe.eu/tyndp2018/projects/>`_ which are at least in permitting."
under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction." under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction."

1 Unit Values Description
2 p_max_pu -- Value in [0.,1.] Correction factor for link capacities ``p_nom``.
3 p_nom_max MW float Global upper limit for the maximum capacity of each extendable DC link.
4 max_extension MW float Upper limit for the extended capacity of each extendable DC link.
5 include_tyndp bool {'true', 'false'} Specifies whether to add HVDC link projects from the `TYNDP 2018 <https://tyndp.entsoe.eu/tyndp2018/projects/>`_ which are at least in permitting.
6 under_construction -- One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity} Specifies how to handle lines which are currently under construction.

View File

@ -1,10 +1,10 @@
,Unit,Values,Description ,Unit,Values,Description
map,,, map,,,
-- boundaries,°,"[x1,x2,y1,y2]","Boundaries of the map plots in degrees latitude (y) and longitude (x)" -- boundaries,°,"[x1,x2,y1,y2]",Boundaries of the map plots in degrees latitude (y) and longitude (x)
costs_max,bn Euro,float,"Upper y-axis limit in cost bar plots." costs_max,bn Euro,float,Upper y-axis limit in cost bar plots.
costs_threshold,bn Euro,float,"Threshold below which technologies will not be shown in cost bar plots." costs_threshold,bn Euro,float,Threshold below which technologies will not be shown in cost bar plots.
energy_max,TWh,float,"Upper y-axis limit in energy bar plots." energy_max,TWh,float,Upper y-axis limit in energy bar plots.
energy_min,TWh,float,"Lower y-axis limit in energy bar plots." energy_min,TWh,float,Lower y-axis limit in energy bar plots.
energy_threshold,TWh,float,"Threshold below which technologies will not be shown in energy bar plots." energy_threshold,TWh,float,Threshold below which technologies will not be shown in energy bar plots.
tech_colors,--,"carrier -> HEX colour code","Mapping from network ``carrier`` to a colour (`HEX colour code <https://en.wikipedia.org/wiki/Web_colors#Hex_triplet>`_)." tech_colors,--,carrier -> HEX colour code,Mapping from network ``carrier`` to a colour (`HEX colour code <https://en.wikipedia.org/wiki/Web_colors#Hex_triplet>`_).
nice_names,--,"str -> str","Mapping from network ``carrier`` to a more readable name." nice_names,--,str -> str,Mapping from network ``carrier`` to a more readable name.

1 Unit Values Description
2 map
3 -- boundaries ° [x1,x2,y1,y2] Boundaries of the map plots in degrees latitude (y) and longitude (x)
4 costs_max bn Euro float Upper y-axis limit in cost bar plots.
5 costs_threshold bn Euro float Threshold below which technologies will not be shown in cost bar plots.
6 energy_max TWh float Upper y-axis limit in energy bar plots.
7 energy_min TWh float Lower y-axis limit in energy bar plots.
8 energy_threshold TWh float Threshold below which technologies will not be shown in energy bar plots.
9 tech_colors -- carrier -> HEX colour code Mapping from network ``carrier`` to a colour (`HEX colour code <https://en.wikipedia.org/wiki/Web_colors#Hex_triplet>`_).
10 nice_names -- str -> str Mapping from network ``carrier`` to a more readable name.

View File

@ -1,5 +1,5 @@
Trigger, Description, Definition, Status Trigger, Description, Definition, Status
``nH``, i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L110>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L146>`__), In active use ``nH``, i.e. ``2H``-``6H``, "Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L110>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L146>`__)", In active use
``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L19>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L154>`__, In active use ``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L19>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L154>`__, In active use
``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use ``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use
``T``,Add land transport sector,,In active use ``T``,Add land transport sector,,In active use

Can't render this file because it has a wrong number of fields in line 2.

122
doc/configtables/sector.csv Normal file
View File

@ -0,0 +1,122 @@
,Unit,Values,Description
district_heating,--,,`prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_
-- potential,--,float,maximum fraction of urban demand which can be supplied by district heating
-- progress,--,Dictionary with planning horizons as keys., Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating
-- district_heating_loss,--,float,Share increase in district heat demand in urban central due to heat losses
cluster_heat_buses,--,"{true, false}",Cluster residential and service heat buses in `prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_ to one to save memory.
,,,
bev_dsm_restriction _value,--,float,Adds a lower state of charge (SOC) limit for battery electric vehicles (BEV) to manage its own energy demand (DSM). Located in `build_transport_demand.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/build_transport_demand.py>`_. Set to 0 for no restriction on BEV DSM
bev_dsm_restriction _time,--,float,Time at which SOC of BEV has to be dsm_restriction_value
transport_heating _deadband_upper,°C,float,"The maximum temperature in the vehicle. At higher temperatures, the energy required for cooling in the vehicle increases."
transport_heating _deadband_lower,°C,float,"The minimum temperature in the vehicle. At lower temperatures, the energy required for heating in the vehicle increases."
,,,
ICE_lower_degree_factor,--,float,Share increase in energy demand in internal combustion engine (ICE) for each degree difference between the cold environment and the minimum temperature.
ICE_upper_degree_factor,--,float,Share increase in energy demand in internal combustion engine (ICE) for each degree difference between the hot environment and the maximum temperature.
EV_lower_degree_factor,--,float,Share increase in energy demand in electric vehicles (EV) for each degree difference between the cold environment and the minimum temperature.
EV_upper_degree_factor,--,float,Share increase in energy demand in electric vehicles (EV) for each degree difference between the hot environment and the maximum temperature.
bev_dsm,--,"{true, false}",Add the option for battery electric vehicles (BEV) to participate in demand-side management (DSM)
,,,
bev_availability,--,float,The share for battery electric vehicles (BEV) that are able to do demand side management (DSM)
bev_energy,--,float,The average size of battery electric vehicles (BEV) in MWh
bev_charge_efficiency,--,float,Battery electric vehicles (BEV) charge and discharge efficiency
bev_plug_to_wheel _efficiency,km/kWh,float,The distance battery electric vehicles (BEV) can travel in km per kWh of energy charge in battery. Base value comes from `Tesla Model S <https://www.fueleconomy.gov/feg/>`_
bev_charge_rate,MWh,float,The power consumption for one electric vehicle (EV) in MWh. Value derived from 3-phase charger with 11 kW.
bev_avail_max,--,float,The maximum share plugged-in availability for passenger electric vehicles.
bev_avail_mean,--,float,The average share plugged-in availability for passenger electric vehicles.
v2g,--,"{true, false}",Allows feed-in to grid from EV battery
land_transport_fuel_cell _share,--,Dictionary with planning horizons as keys.,The share of vehicles that uses fuel cells in a given year
land_transport_electric _share,--,Dictionary with planning horizons as keys.,The share of vehicles that uses electric vehicles (EV) in a given year
land_transport_ice _share,--,Dictionary with planning horizons as keys.,The share of vehicles that uses internal combustion engines (ICE) in a given year. What is not EV or FCEV is oil-fuelled ICE.
transport_fuel_cell _efficiency,--,float,The H2 conversion efficiencies of fuel cells in transport
transport_internal _combustion_efficiency,--,float,The oil conversion efficiencies of internal combustion engine (ICE) in transport
agriculture_machinery _electric_share,--,float,The share for agricultural machinery that uses electricity
agriculture_machinery _oil_share,--,float,The share for agricultural machinery that uses oil
agriculture_machinery _fuel_efficiency,--,float,The efficiency of electric-powered machinery in the conversion of electricity to meet agricultural needs.
agriculture_machinery _electric_efficiency,--,float,The efficiency of oil-powered machinery in the conversion of oil to meet agricultural needs.
Mwh_MeOH_per_MWh_H2,LHV,float,"The energy amount of the produced methanol per energy amount of hydrogen. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 64."
MWh_MeOH_per_tCO2,LHV,float,"The energy amount of the produced methanol per ton of CO2. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 64."
MWh_MeOH_per_MWh_e,LHV,float,"The energy amount of the produced methanol per energy amount of electricity. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 64."
shipping_hydrogen _liquefaction,--,"{true, false}",Whether to include liquefaction costs for hydrogen demand in shipping.
,,,
shipping_hydrogen_share,--,Dictionary with planning horizons as keys.,The share of ships powered by hydrogen in a given year
shipping_methanol_share,--,Dictionary with planning horizons as keys.,The share of ships powered by methanol in a given year
shipping_oil_share,--,Dictionary with planning horizons as keys.,The share of ships powered by oil in a given year
shipping_methanol _efficiency,--,float,The efficiency of methanol-powered ships in the conversion of methanol to meet shipping needs (propulsion). The efficiency increase from oil can be 10-15% higher according to the `IEA <https://www.iea-amf.org/app/webroot/files/file/Annex%20Reports/AMF_Annex_56.pdf>`_
,,,
shipping_oil_efficiency,--,float,The efficiency of oil-powered ships in the conversion of oil to meet shipping needs (propulsion). Base value derived from 2011
aviation_demand_factor,--,float,The proportion of demand for aviation compared to today's consumption
HVC_demand_factor,--,float,The proportion of demand for high-value chemicals compared to today's consumption
,,,
time_dep_hp_cop,--,"{true, false}",Consider the time dependent coefficient of performance (COP) of the heat pump
heat_pump_sink_T,°C,float,The temperature heat sink used in heat pumps based on DTU / large area radiators. The value is conservatively high to cover hot water and space heating in poorly-insulated buildings
reduce_space_heat _exogenously,--,"{true, false}",Influence on space heating demand by a certain factor (applied before losses in district heating).
reduce_space_heat _exogenously_factor,--,Dictionary with planning horizons as keys.,"A positive factor can mean renovation or demolition of a building. If the factor is negative, it can mean an increase in floor area, increased thermal comfort, population growth. The default factors are determined by the `Eurocalc Homes and buildings decarbonization scenario <http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221>`_"
retrofitting,,,
-- retro_endogen,--,"{true, false}",Add retrofitting as an endogenous system which co-optimise space heat savings.
-- cost_factor,--,float,Weight costs for building renovation
-- interest_rate,--,float,The interest rate for investment in building components
-- annualise_cost,--,"{true, false}",Annualise the investment costs of retrofitting
-- tax_weighting,--,"{true, false}",Weight the costs of retrofitting depending on taxes in countries
-- construction_index,--,"{true, false}",Weight the costs of retrofitting depending on labour/material costs per country
tes,--,"{true, false}",Add option for storing thermal energy in large water pits associated with district heating systems and individual thermal energy storage (TES)
tes_tau,,,The time constant used to calculate the decay of thermal energy in thermal energy storage (TES): 1- :math:`e^{-1/24τ}`.
-- decentral,days,float,The time constant in decentralized thermal energy storage (TES)
-- central,days,float,The time constant in centralized thermal energy storage (TES)
boilers,--,"{true, false}",Add option for transforming electricity into heat using resistive heater
oil_boilers,--,"{true, false}",Add option for transforming oil into heat using boilers
biomass_boiler,--,"{true, false}",Add option for transforming biomass into heat using boilers
chp,--,"{true, false}",Add option for using Combined Heat and Power (CHP)
micro_chp,--,"{true, false}",Add option for using Combined Heat and Power (CHP) for decentral areas.
solar_thermal,--,"{true, false}",Add option for using solar thermal to generate heat.
solar_cf_correction,--,float,The correction factor for the value provided by the solar thermal profile calculations
marginal_cost_storage,currency/MWh ,float,The marginal cost of discharging batteries in distributed grids
methanation,--,"{true, false}",Add option for transforming hydrogen and CO2 into methane using methanation.
helmeth,--,"{true, false}",Add option for transforming power into gas using HELMETH (Integrated High-Temperature ELectrolysis and METHanation for Effective Power to Gas Conversion)
coal_cc,--,"{true, false}",Add option for coal CHPs with carbon capture
dac,--,"{true, false}",Add option for Direct Air Capture (DAC)
co2_vent,--,"{true, false}",Add option for vent out CO2 from storages to the atmosphere.
allam_cycle,--,"{true, false}",Add option to include `Allam cycle gas power plants <https://en.wikipedia.org/wiki/Allam_power_cycle>`_
hydrogen_fuel_cell,--,"{true, false}",Add option to include hydrogen fuel cell for re-electrification. Assuming OCGT technology costs
hydrogen_turbine,--,"{true, false}",Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs
SMR,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR)
regional_co2 _sequestration_potential,,,
-- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_.
-- attribute,--,string,Name of the attribute for the sequestration potential
-- include_onshore,--,"{true, false}",Add options for including onshore sequestration potentials
-- min_size,Gt ,float,Any sites with lower potential than this value will be excluded
-- max_size,Gt ,float,The maximum sequestration potential for any one site.
-- years_of_storage,years,float,The years until potential exhausted at optimised annual rate
co2_sequestration_potential,MtCO2/a,float,The potential of sequestering CO2 in Europe per year
co2_sequestration_cost,currency/tCO2,float,The cost of sequestering a ton of CO2
co2_spatial,--,"{true, false}","Add option to spatially resolve carrier representing stored carbon dioxide. This allows for more detailed modelling of CCUTS, e.g. regarding the capturing of industrial process emissions, usage as feedstock for electrofuels, transport of carbon dioxide, and geological sequestration sites."
,,,
co2network,--,"{true, false}",Add option for planning a new carbon dioxide transmission network
,,,
cc_fraction,--,float,The default fraction of CO2 captured with post-combustion capture
hydrogen_underground _storage,--,"{true, false}",Add options for storing hydrogen underground. Storage potential depends regionally.
hydrogen_underground _storage_locations,,"{onshore, nearshore, offshore}","The location where hydrogen underground storage can be located. Onshore, nearshore, offshore means it must be located more than 50 km away from the sea, within 50 km of the sea, or within the sea itself respectively."
,,,
ammonia,--,"{true, false, regional}","Add ammonia as a carrrier. It can be either true (copperplated NH3), false (no NH3 carrier) or ""regional"" (regionalised NH3 without network)"
min_part_load_fischer _tropsch,per unit of p_nom ,float,The minimum unit dispatch (``p_min_pu``) for the Fischer-Tropsch process
min_part_load _methanolisation,per unit of p_nom ,float,The minimum unit dispatch (``p_min_pu``) for the methanolisation process
,,,
use_fischer_tropsch _waste_heat,--,"{true, false}",Add option for using waste heat of Fischer Tropsch in district heating networks
use_fuel_cell_waste_heat,--,"{true, false}",Add option for using waste heat of fuel cells in district heating networks
use_electrolysis_waste _heat,--,"{true, false}",Add option for using waste heat of electrolysis in district heating networks
electricity_distribution _grid,--,"{true, false}",Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link.
electricity_distribution _grid_cost_factor,,,Multiplies the investment cost of the electricity distribution grid
,,,
electricity_grid _connection,--,"{true, false}",Add the cost of electricity grid connection for onshore wind and solar
H2_network,--,"{true, false}",Add option for new hydrogen pipelines
gas_network,--,"{true, false}","Add existing natural gas infrastructure, incl. LNG terminals, production and entry-points. The existing gas network is added with a lossless transport model. A length-weighted `k-edge augmentation algorithm <https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation.html#networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation>`_ can be run to add new candidate gas pipelines such that all regions of the model can be connected to the gas network. When activated, all the gas demands are regionally disaggregated as well."
H2_retrofit,--,"{true, false}",Add option for retrofiting existing pipelines to transport hydrogen.
H2_retrofit_capacity _per_CH4,--,float,"The ratio for H2 capacity per original CH4 capacity of retrofitted pipelines. The `European Hydrogen Backbone (April, 2020) p.15 <https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf>`_ 60% of original natural gas capacity could be used in cost-optimal case as H2 capacity."
gas_network_connectivity _upgrade ,--,float,The number of desired edge connectivity (k) in the length-weighted `k-edge augmentation algorithm <https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation.html#networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation>`_ used for the gas network
gas_distribution_grid,--,"{true, false}",Add a gas distribution grid
gas_distribution_grid _cost_factor,,,Multiplier for the investment cost of the gas distribution grid
,,,
biomass_spatial,--,"{true, false}",Add option for resolving biomass demand regionally
biomass_transport,--,"{true, false}",Add option for transporting solid biomass between nodes
conventional_generation,,,Add a more detailed description of conventional carriers. Any power generation requires the consumption of fuel from nodes representing that fuel.
biomass_to_liquid,--,"{true, false}",Add option for transforming solid biomass into liquid fuel with the same properties as oil
biosng,--,"{true, false}",Add option for transforming solid biomass into synthesis gas with the same properties as natural gas
1 Unit Values Description
2 district_heating -- `prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_
3 -- potential -- float maximum fraction of urban demand which can be supplied by district heating
4 -- progress -- Dictionary with planning horizons as keys. Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating
5 -- district_heating_loss -- float Share increase in district heat demand in urban central due to heat losses
6 cluster_heat_buses -- {true, false} Cluster residential and service heat buses in `prepare_sector_network.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/prepare_sector_network.py>`_ to one to save memory.
7
8 bev_dsm_restriction _value -- float Adds a lower state of charge (SOC) limit for battery electric vehicles (BEV) to manage its own energy demand (DSM). Located in `build_transport_demand.py <https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/build_transport_demand.py>`_. Set to 0 for no restriction on BEV DSM
9 bev_dsm_restriction _time -- float Time at which SOC of BEV has to be dsm_restriction_value
10 transport_heating _deadband_upper °C float The maximum temperature in the vehicle. At higher temperatures, the energy required for cooling in the vehicle increases.
11 transport_heating _deadband_lower °C float The minimum temperature in the vehicle. At lower temperatures, the energy required for heating in the vehicle increases.
12
13 ICE_lower_degree_factor -- float Share increase in energy demand in internal combustion engine (ICE) for each degree difference between the cold environment and the minimum temperature.
14 ICE_upper_degree_factor -- float Share increase in energy demand in internal combustion engine (ICE) for each degree difference between the hot environment and the maximum temperature.
15 EV_lower_degree_factor -- float Share increase in energy demand in electric vehicles (EV) for each degree difference between the cold environment and the minimum temperature.
16 EV_upper_degree_factor -- float Share increase in energy demand in electric vehicles (EV) for each degree difference between the hot environment and the maximum temperature.
17 bev_dsm -- {true, false} Add the option for battery electric vehicles (BEV) to participate in demand-side management (DSM)
18
19 bev_availability -- float The share for battery electric vehicles (BEV) that are able to do demand side management (DSM)
20 bev_energy -- float The average size of battery electric vehicles (BEV) in MWh
21 bev_charge_efficiency -- float Battery electric vehicles (BEV) charge and discharge efficiency
22 bev_plug_to_wheel _efficiency km/kWh float The distance battery electric vehicles (BEV) can travel in km per kWh of energy charge in battery. Base value comes from `Tesla Model S <https://www.fueleconomy.gov/feg/>`_
23 bev_charge_rate MWh float The power consumption for one electric vehicle (EV) in MWh. Value derived from 3-phase charger with 11 kW.
24 bev_avail_max -- float The maximum share plugged-in availability for passenger electric vehicles.
25 bev_avail_mean -- float The average share plugged-in availability for passenger electric vehicles.
26 v2g -- {true, false} Allows feed-in to grid from EV battery
27 land_transport_fuel_cell _share -- Dictionary with planning horizons as keys. The share of vehicles that uses fuel cells in a given year
28 land_transport_electric _share -- Dictionary with planning horizons as keys. The share of vehicles that uses electric vehicles (EV) in a given year
29 land_transport_ice _share -- Dictionary with planning horizons as keys. The share of vehicles that uses internal combustion engines (ICE) in a given year. What is not EV or FCEV is oil-fuelled ICE.
30 transport_fuel_cell _efficiency -- float The H2 conversion efficiencies of fuel cells in transport
31 transport_internal _combustion_efficiency -- float The oil conversion efficiencies of internal combustion engine (ICE) in transport
32 agriculture_machinery _electric_share -- float The share for agricultural machinery that uses electricity
33 agriculture_machinery _oil_share -- float The share for agricultural machinery that uses oil
34 agriculture_machinery _fuel_efficiency -- float The efficiency of electric-powered machinery in the conversion of electricity to meet agricultural needs.
35 agriculture_machinery _electric_efficiency -- float The efficiency of oil-powered machinery in the conversion of oil to meet agricultural needs.
36 Mwh_MeOH_per_MWh_H2 LHV float The energy amount of the produced methanol per energy amount of hydrogen. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 64.
37 MWh_MeOH_per_tCO2 LHV float The energy amount of the produced methanol per ton of CO2. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 64.
38 MWh_MeOH_per_MWh_e LHV float The energy amount of the produced methanol per energy amount of electricity. From `DECHEMA (2017) <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry-p-20002750.pdf>`_, page 64.
39 shipping_hydrogen _liquefaction -- {true, false} Whether to include liquefaction costs for hydrogen demand in shipping.
40
41 shipping_hydrogen_share -- Dictionary with planning horizons as keys. The share of ships powered by hydrogen in a given year
42 shipping_methanol_share -- Dictionary with planning horizons as keys. The share of ships powered by methanol in a given year
43 shipping_oil_share -- Dictionary with planning horizons as keys. The share of ships powered by oil in a given year
44 shipping_methanol _efficiency -- float The efficiency of methanol-powered ships in the conversion of methanol to meet shipping needs (propulsion). The efficiency increase from oil can be 10-15% higher according to the `IEA <https://www.iea-amf.org/app/webroot/files/file/Annex%20Reports/AMF_Annex_56.pdf>`_
45
46 shipping_oil_efficiency -- float The efficiency of oil-powered ships in the conversion of oil to meet shipping needs (propulsion). Base value derived from 2011
47 aviation_demand_factor -- float The proportion of demand for aviation compared to today's consumption
48 HVC_demand_factor -- float The proportion of demand for high-value chemicals compared to today's consumption
49
50 time_dep_hp_cop -- {true, false} Consider the time dependent coefficient of performance (COP) of the heat pump
51 heat_pump_sink_T °C float The temperature heat sink used in heat pumps based on DTU / large area radiators. The value is conservatively high to cover hot water and space heating in poorly-insulated buildings
52 reduce_space_heat _exogenously -- {true, false} Influence on space heating demand by a certain factor (applied before losses in district heating).
53 reduce_space_heat _exogenously_factor -- Dictionary with planning horizons as keys. A positive factor can mean renovation or demolition of a building. If the factor is negative, it can mean an increase in floor area, increased thermal comfort, population growth. The default factors are determined by the `Eurocalc Homes and buildings decarbonization scenario <http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221>`_
54 retrofitting
55 -- retro_endogen -- {true, false} Add retrofitting as an endogenous system which co-optimise space heat savings.
56 -- cost_factor -- float Weight costs for building renovation
57 -- interest_rate -- float The interest rate for investment in building components
58 -- annualise_cost -- {true, false} Annualise the investment costs of retrofitting
59 -- tax_weighting -- {true, false} Weight the costs of retrofitting depending on taxes in countries
60 -- construction_index -- {true, false} Weight the costs of retrofitting depending on labour/material costs per country
61 tes -- {true, false} Add option for storing thermal energy in large water pits associated with district heating systems and individual thermal energy storage (TES)
62 tes_tau The time constant used to calculate the decay of thermal energy in thermal energy storage (TES): 1- :math:`e^{-1/24τ}`.
63 -- decentral days float The time constant in decentralized thermal energy storage (TES)
64 -- central days float The time constant in centralized thermal energy storage (TES)
65 boilers -- {true, false} Add option for transforming electricity into heat using resistive heater
66 oil_boilers -- {true, false} Add option for transforming oil into heat using boilers
67 biomass_boiler -- {true, false} Add option for transforming biomass into heat using boilers
68 chp -- {true, false} Add option for using Combined Heat and Power (CHP)
69 micro_chp -- {true, false} Add option for using Combined Heat and Power (CHP) for decentral areas.
70 solar_thermal -- {true, false} Add option for using solar thermal to generate heat.
71 solar_cf_correction -- float The correction factor for the value provided by the solar thermal profile calculations
72 marginal_cost_storage currency/MWh float The marginal cost of discharging batteries in distributed grids
73 methanation -- {true, false} Add option for transforming hydrogen and CO2 into methane using methanation.
74 helmeth -- {true, false} Add option for transforming power into gas using HELMETH (Integrated High-Temperature ELectrolysis and METHanation for Effective Power to Gas Conversion)
75 coal_cc -- {true, false} Add option for coal CHPs with carbon capture
76 dac -- {true, false} Add option for Direct Air Capture (DAC)
77 co2_vent -- {true, false} Add option for vent out CO2 from storages to the atmosphere.
78 allam_cycle -- {true, false} Add option to include `Allam cycle gas power plants <https://en.wikipedia.org/wiki/Allam_power_cycle>`_
79 hydrogen_fuel_cell -- {true, false} Add option to include hydrogen fuel cell for re-electrification. Assuming OCGT technology costs
80 hydrogen_turbine -- {true, false} Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs
81 SMR -- {true, false} Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR)
82 regional_co2 _sequestration_potential
83 -- enable -- {true, false} Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_.
84 -- attribute -- string Name of the attribute for the sequestration potential
85 -- include_onshore -- {true, false} Add options for including onshore sequestration potentials
86 -- min_size Gt float Any sites with lower potential than this value will be excluded
87 -- max_size Gt float The maximum sequestration potential for any one site.
88 -- years_of_storage years float The years until potential exhausted at optimised annual rate
89 co2_sequestration_potential MtCO2/a float The potential of sequestering CO2 in Europe per year
90 co2_sequestration_cost currency/tCO2 float The cost of sequestering a ton of CO2
91 co2_spatial -- {true, false} Add option to spatially resolve carrier representing stored carbon dioxide. This allows for more detailed modelling of CCUTS, e.g. regarding the capturing of industrial process emissions, usage as feedstock for electrofuels, transport of carbon dioxide, and geological sequestration sites.
92
93 co2network -- {true, false} Add option for planning a new carbon dioxide transmission network
94
95 cc_fraction -- float The default fraction of CO2 captured with post-combustion capture
96 hydrogen_underground _storage -- {true, false} Add options for storing hydrogen underground. Storage potential depends regionally.
97 hydrogen_underground _storage_locations {onshore, nearshore, offshore} The location where hydrogen underground storage can be located. Onshore, nearshore, offshore means it must be located more than 50 km away from the sea, within 50 km of the sea, or within the sea itself respectively.
98
99 ammonia -- {true, false, regional} Add ammonia as a carrrier. It can be either true (copperplated NH3), false (no NH3 carrier) or "regional" (regionalised NH3 without network)
100 min_part_load_fischer _tropsch per unit of p_nom float The minimum unit dispatch (``p_min_pu``) for the Fischer-Tropsch process
101 min_part_load _methanolisation per unit of p_nom float The minimum unit dispatch (``p_min_pu``) for the methanolisation process
102
103 use_fischer_tropsch _waste_heat -- {true, false} Add option for using waste heat of Fischer Tropsch in district heating networks
104 use_fuel_cell_waste_heat -- {true, false} Add option for using waste heat of fuel cells in district heating networks
105 use_electrolysis_waste _heat -- {true, false} Add option for using waste heat of electrolysis in district heating networks
106 electricity_distribution _grid -- {true, false} Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link.
107 electricity_distribution _grid_cost_factor Multiplies the investment cost of the electricity distribution grid
108
109 electricity_grid _connection -- {true, false} Add the cost of electricity grid connection for onshore wind and solar
110 H2_network -- {true, false} Add option for new hydrogen pipelines
111 gas_network -- {true, false} Add existing natural gas infrastructure, incl. LNG terminals, production and entry-points. The existing gas network is added with a lossless transport model. A length-weighted `k-edge augmentation algorithm <https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation.html#networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation>`_ can be run to add new candidate gas pipelines such that all regions of the model can be connected to the gas network. When activated, all the gas demands are regionally disaggregated as well.
112 H2_retrofit -- {true, false} Add option for retrofiting existing pipelines to transport hydrogen.
113 H2_retrofit_capacity _per_CH4 -- float The ratio for H2 capacity per original CH4 capacity of retrofitted pipelines. The `European Hydrogen Backbone (April, 2020) p.15 <https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf>`_ 60% of original natural gas capacity could be used in cost-optimal case as H2 capacity.
114 gas_network_connectivity _upgrade -- float The number of desired edge connectivity (k) in the length-weighted `k-edge augmentation algorithm <https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation.html#networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation>`_ used for the gas network
115 gas_distribution_grid -- {true, false} Add a gas distribution grid
116 gas_distribution_grid _cost_factor Multiplier for the investment cost of the gas distribution grid
117
118 biomass_spatial -- {true, false} Add option for resolving biomass demand regionally
119 biomass_transport -- {true, false} Add option for transporting solid biomass between nodes
120 conventional_generation Add a more detailed description of conventional carriers. Any power generation requires the consumption of fuel from nodes representing that fuel.
121 biomass_to_liquid -- {true, false} Add option for transforming solid biomass into liquid fuel with the same properties as oil
122 biosng -- {true, false} Add option for transforming solid biomass into synthesis gas with the same properties as natural gas

View File

@ -0,0 +1,6 @@
,Unit,Values,Description
clearsky_model ,--,"{simple, enhanced}",Type of clearsky model for diffuse irradiation
orientation ,--,"{units of degrees, latitude_optimal}",Panel orientation with slope and azimuth
-- azimuth,float,units of degrees,The angle between the North and the sun with panels on the local horizon
-- slope,float,units of degrees,The angle between the ground and the panels
1 Unit Values Description
2 clearsky_model -- {‘simple’, ‘enhanced’} Type of clearsky model for diffuse irradiation
3 orientation -- {units of degrees, ‘latitude_optimal’} Panel orientation with slope and azimuth
4 -- azimuth float units of degrees The angle between the North and the sun with panels on the local horizon
5 -- slope float units of degrees The angle between the ground and the panels

View File

@ -1,7 +1,7 @@
,Unit,Values,Description ,Unit,Values,Description
options,,, options,,,
-- formulation,--,"Any of {'angles', 'kirchhoff', 'cycles', 'ptdf'}","Specifies which variant of linearized power flow formulations to use in the optimisation problem. Recommended is 'kirchhoff'. Explained in `this article <https://arxiv.org/abs/1704.01881>`_."
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh." -- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
-- transmission_losses,int,"[0-9]","Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored."
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`." -- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
-- min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run." -- min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
-- max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run." -- max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."

1 Unit Values Description
2 options
-- formulation -- Any of {'angles', 'kirchhoff', 'cycles', 'ptdf'} Specifies which variant of linearized power flow formulations to use in the optimisation problem. Recommended is 'kirchhoff'. Explained in `this article <https://arxiv.org/abs/1704.01881>`_.
3 -- load_shedding bool/float {'true','false', float} Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh.
4 -- transmission_losses int [0-9] Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored.
5 -- noisy_costs bool {'true','false'} Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`.
6 -- min_iterations -- int Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
7 -- max_iterations -- int Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.

View File

@ -4,6 +4,7 @@ tutorial,bool,"{true, false}","Switch to retrieve the tutorial data set instead
logging,,, logging,,,
-- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only" -- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only"
-- format,--,"","Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes." -- format,--,"","Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes."
<<<<<<< HEAD
foresight,string,"{overnight, myopic, perfect}","Defaults to overnight scenarios." foresight,string,"{overnight, myopic, perfect}","Defaults to overnight scenarios."
countries,--,"Subset of {'AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK'}","European countries defined by their `Two-letter country codes (ISO 3166-1) <https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`_ which should be included in the energy system model." countries,--,"Subset of {'AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK'}","European countries defined by their `Two-letter country codes (ISO 3166-1) <https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`_ which should be included in the energy system model."
focus_weights,--,"Keys should be two-digit country codes (e.g. DE) and values should range between 0 and 1","Ratio of total clusters for particular countries. the remaining weight is distributed according to mean load. An example: ``focus_weights: 'DE': 0.6 'FR': 0.2``." focus_weights,--,"Keys should be two-digit country codes (e.g. DE) and values should range between 0 and 1","Ratio of total clusters for particular countries. the remaining weight is distributed according to mean load. An example: ``focus_weights: 'DE': 0.6 'FR': 0.2``."
@ -20,3 +21,5 @@ enable,,,
======= =======
co2_budget,--,"Dictionary with planning horizons as keys.","CO2 budget as a fraction of 1990 emissions. Overwritten if ``CO2Lx`` or ``cb`` are set in ``{sector_opts}`` wildcard" co2_budget,--,"Dictionary with planning horizons as keys.","CO2 budget as a fraction of 1990 emissions. Overwritten if ``CO2Lx`` or ``cb`` are set in ``{sector_opts}`` wildcard"
>>>>>>> master >>>>>>> master
=======
>>>>>>> origin/master

Can't render this file because it has a wrong number of fields in line 10.

View File

@ -18,15 +18,16 @@ Top-level configuration
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:lines: 5-11,18-19,62,80-90 :start-at: version:
:end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/toplevel.csv :file: configtables/toplevel.csv
.. _scenario: .. _run_cf:
``run`` ``run``
======= =======
@ -40,13 +41,34 @@ The ``run`` section is used for running and storing scenarios with different con
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: run: :start-at: run:
:end-before: foresight: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/run.csv :file: configtables/run.csv
.. _foresight_cf:
``foresight``
=============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: foresight:
:end-at: foresight:
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/foresight.csv
.. note::
If you use myopic or perfect foresight, the planning horizon in
:ref:`planning_horizons` in scenario has to be set.
.. _scenario:
``scenario`` ``scenario``
============ ============
@ -83,13 +105,28 @@ An exemplary dependency graph (starting from the simplification rules) then look
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: scenario: :start-at: scenario:
:end-before: countries: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/scenario.csv :file: configtables/scenario.csv
.. _countries:
``countries``
=============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: countries:
:end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/countries.csv
.. _snapshots_cf: .. _snapshots_cf:
``snapshots`` ``snapshots``
@ -100,11 +137,11 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: snapshots: :start-at: snapshots:
:end-before: enable: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/snapshots.csv :file: configtables/snapshots.csv
.. _enable_cf: .. _enable_cf:
@ -117,13 +154,32 @@ Switches for some rules and optional features.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: enable: :start-at: enable:
:end-before: co2_budget: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/enable.csv :file: configtables/enable.csv
.. _CO2_budget_cf:
``co2 budget``
==============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: co2_budget:
:end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/co2_budget.csv
.. note::
this parameter is over-ridden if ``CO2Lx`` or ``cb`` is set in
sector_opts.
.. _electricity_cf: .. _electricity_cf:
``electricity`` ``electricity``
@ -132,11 +188,11 @@ Switches for some rules and optional features.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: electricity: :start-at: electricity:
:end-before: atlite: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/electricity.csv :file: configtables/electricity.csv
.. _atlite_cf: .. _atlite_cf:
@ -149,11 +205,11 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: atlite: :start-at: atlite:
:end-before: renewable: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/atlite.csv :file: configtables/atlite.csv
.. _renewable_cf: .. _renewable_cf:
@ -171,9 +227,18 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/onwind.csv :file: configtables/onwind.csv
.. note::
Notes on ``capacity_per_sqkm``. ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 30% fraction of the already restricted
area is available for installation of wind generators due to competing land use and likely public
acceptance issues.
.. note::
The default choice for corine ``grid_codes`` was based on Scholz, Y. (2012). Renewable energy based electricity supply at low costs
development of the REMix model and application for Europe. ( p.42 / p.28)
``offwind-ac`` ``offwind-ac``
-------------- --------------
@ -184,9 +249,19 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/offwind-ac.csv :file: configtables/offwind-ac.csv
.. note::
Notes on ``capacity_per_sqkm``. ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 20% fraction of the already restricted
area is available for installation of wind generators due to competing land use and likely public
acceptance issues.
.. note::
Notes on ``correction_factor``. Correction due to proxy for wake losses
from 10.1016/j.energy.2018.08.153
until done more rigorously in #153
``offwind-dc`` ``offwind-dc``
--------------- ---------------
@ -197,9 +272,13 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/offwind-dc.csv :file: configtables/offwind-dc.csv
.. note::
both ``offwind-ac`` and ``offwind-dc`` have the same assumption on
``capacity_per_sqkm`` and ``correction_factor``.
``solar`` ``solar``
--------------- ---------------
@ -210,20 +289,29 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/solar.csv :file: configtables/solar.csv
.. note::
Notes on ``capacity_per_sqkm``. ScholzPhd Tab 4.3.1: 170 MW/km^2 and assuming 1% of the area can be used for solar PV panels.
Correction factor determined by comparing uncorrected area-weighted full-load hours to those
published in Supplementary Data to Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power
sector -- The economic potential of photovoltaics and concentrating solar
power." Applied Energy 135 (2014): 704-720.
This correction factor of 0.854337 may be in order if using reanalysis data.
for discussion refer to this <issue https://github.com/PyPSA/pypsa-eur/issues/285>
``hydro`` ``hydro``
--------------- ---------------
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: hydro: :start-at: hydro:
:end-before: conventional: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/hydro.csv :file: configtables/hydro.csv
.. _lines_cf: .. _lines_cf:
@ -241,11 +329,11 @@ overwrite the existing values.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: conventional: :start-at: conventional:
:end-before: lines: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/conventional.csv :file: configtables/conventional.csv
``lines`` ``lines``
@ -254,11 +342,11 @@ overwrite the existing values.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: lines: :start-at: lines:
:end-before: links: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/lines.csv :file: configtables/lines.csv
.. _links_cf: .. _links_cf:
@ -269,11 +357,11 @@ overwrite the existing values.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: links: :start-at: links:
:end-before: transformers: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/links.csv :file: configtables/links.csv
.. _transformers_cf: .. _transformers_cf:
@ -284,11 +372,11 @@ overwrite the existing values.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: transformers: :start-at: transformers:
:end-before: load: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/transformers.csv :file: configtables/transformers.csv
.. _load_cf: .. _load_cf:
@ -299,45 +387,13 @@ overwrite the existing values.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-after: type: :start-after: type:
:end-at: scaling_factor: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/load.csv :file: configtables/load.csv
.. _costs_cf:
``costs``
=============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: costs:
:end-before: clustering:
.. csv-table::
:header-rows: 1
:widths: 25,7,22,30
:file: configtables/costs.csv
.. _clustering_cf:
``clustering``
==============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: clustering:
:end-before: solving:
.. csv-table::
:header-rows: 1
:widths: 25,7,22,30
:file: configtables/clustering.csv
.. _energy_cf: .. _energy_cf:
``energy`` ``energy``
@ -346,14 +402,15 @@ overwrite the existing values.
.. note:: .. note::
Only used for sector-coupling studies. Only used for sector-coupling studies.
.. warning::
More comprehensive documentation for this segment will be released soon.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: energy: :start-at: energy:
:end-before: biomass: :end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/energy.csv
.. _biomass_cf: .. _biomass_cf:
@ -363,13 +420,35 @@ overwrite the existing values.
.. note:: .. note::
Only used for sector-coupling studies. Only used for sector-coupling studies.
.. warning::
More comprehensive documentation for this segment will be released soon.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: biomass: :start-at: biomass:
:end-before: solar_thermal: :end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/biomass.csv
The list of available biomass is given by the category in `ENSPRESO_BIOMASS <https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx>`_, namely:
- Agricultural waste
- Manure solid, liquid
- Residues from landscape care
- Bioethanol barley, wheat, grain maize, oats, other cereals and rye
- Sugar from sugar beet
- Miscanthus, switchgrass, RCG
- Willow
- Poplar
- Sunflower, soya seed
- Rape seed
- Fuelwood residues
- FuelwoodRW
- C&P_RW
- Secondary Forestry residues - woodchips
- Sawdust
- Municipal waste
- Sludge
.. _solar_thermal_cf: .. _solar_thermal_cf:
@ -379,13 +458,15 @@ overwrite the existing values.
.. note:: .. note::
Only used for sector-coupling studies. Only used for sector-coupling studies.
.. warning::
More comprehensive documentation for this segment will be released soon.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: solar_thermal: :start-at: solar_thermal:
:end-before: existing_capacities: :end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/solar-thermal.csv
.. _existing_capacities_cf: .. _existing_capacities_cf:
@ -393,15 +474,17 @@ overwrite the existing values.
======================= =======================
.. note:: .. note::
Only used for sector-coupling studies. Only used for sector-coupling studies. The value for grouping years are only used in myopic or perfect foresight scenarios.
.. warning::
More comprehensive documentation for this segment will be released soon.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: existing_capacities: :start-at: existing_capacities:
:end-before: sector: :end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/existing_capacities.csv
.. _sector_cf: .. _sector_cf:
@ -411,13 +494,15 @@ overwrite the existing values.
.. note:: .. note::
Only used for sector-coupling studies. Only used for sector-coupling studies.
.. warning::
More comprehensive documentation for this segment will be released soon.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: sector: :start-at: sector:
:end-before: industry: :end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/sector.csv
.. _industry_cf: .. _industry_cf:
@ -427,13 +512,57 @@ overwrite the existing values.
.. note:: .. note::
Only used for sector-coupling studies. Only used for sector-coupling studies.
.. warning::
More comprehensive documentation for this segment will be released soon.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: industry: :start-at: industry:
:end-before: costs: :end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/industry.csv
.. _costs_cf:
``costs``
=============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: costs:
:end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/costs.csv
.. note::
``rooftop_share:`` are based on the potentials, assuming
(0.1 kW/m2 and 10 m2/person)
.. _clustering_cf:
``clustering``
==============
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: clustering:
:end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33
:file: configtables/clustering.csv
.. note::
``feature:`` in ``simplify_network:``
are only relevant if ``hac`` were chosen in ``algorithm``.
.. tip::
use ``min`` in ``p_nom_max:`` for more `
conservative assumptions.
.. _solving_cf: .. _solving_cf:
@ -443,16 +572,11 @@ overwrite the existing values.
.. literalinclude:: ../config/config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: solving: :start-at: solving:
:end-before: plotting: :end-before: # docs
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/solving.csv
.. csv-table::
:header-rows: 1
:widths: 25,7,22,30
:file: configtables/solving.csv :file: configtables/solving.csv
.. _plotting_cf: .. _plotting_cf:
@ -469,5 +593,5 @@ overwrite the existing values.
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
:widths: 25,7,22,30 :widths: 22,7,22,33
:file: configtables/plotting.csv :file: configtables/plotting.csv

View File

@ -12,7 +12,7 @@ The database of cost assumptions is retrieved from the repository
saved to a file ``data/costs_{year}.csv``. The ``config/config.yaml`` provides options saved to a file ``data/costs_{year}.csv``. The ``config/config.yaml`` provides options
to choose a reference year and use a specific version of the repository. to choose a reference year and use a specific version of the repository.
.. literalinclude:: ../config.default.yaml .. literalinclude:: ../config/config.default.yaml
:language: yaml :language: yaml
:start-at: costs: :start-at: costs:
:end-at: version: :end-at: version:

View File

@ -87,8 +87,12 @@ evolve with the myopic approach:
vehicle-to-grid services. vehicle-to-grid services.
- The annual biomass potential (default year and scenario for which potential is - The annual biomass potential (default year and scenario for which potential is
taken is 2030, defined `here taken is 2030, as defined in config)
<https://github.com/PyPSA/pypsa-eur-sec/blob/413254e241fb37f55b41caba7264644805ad8e97/config.default.yaml#L109>`_)
.. literalinclude:: ../config/test/config.myopic.yaml
:language: yaml
:start-at: biomass:
:end-at: year:
Configuration Configuration
@ -108,7 +112,7 @@ optimized. For a myopic optimization, this is equivalent to the investment year.
To set the investment years which are sequentially simulated for the myopic To set the investment years which are sequentially simulated for the myopic
investment planning, select for example: investment planning, select for example:
.. literalinclude:: ../test/config.myopic.yaml .. literalinclude:: ../config/test/config.myopic.yaml
:language: yaml :language: yaml
:start-at: planning_horizons: :start-at: planning_horizons:
:end-before: countries: :end-before: countries:
@ -203,6 +207,7 @@ The myopic code solves the network for the time steps included in
network comprises additional generator, storage, and link capacities with network comprises additional generator, storage, and link capacities with
p_nom_extendable=True. The non-solved network is saved in p_nom_extendable=True. The non-solved network is saved in
``results/run_name/networks/prenetworks-brownfield``. ``results/run_name/networks/prenetworks-brownfield``.
The base year is the first element in ``planning_horizons``. Step 1 is The base year is the first element in ``planning_horizons``. Step 1 is
implemented with the rule add_baseyear for the base year and with the rule implemented with the rule add_baseyear for the base year and with the rule
add_brownfield for the remaining planning_horizons. add_brownfield for the remaining planning_horizons.

View File

@ -31,7 +31,9 @@ PyPSA-Eur: A Sector-Coupled Open Optimisation Model of the European Energy Syste
:target: https://api.reuse.software/info/github.com/pypsa/pypsa-eur :target: https://api.reuse.software/info/github.com/pypsa/pypsa-eur
:alt: REUSE :alt: REUSE
| .. image:: https://img.shields.io/stackexchange/stackoverflow/t/pypsa
:target: https://stackoverflow.com/questions/tagged/pypsa
:alt: Stackoverflow
PyPSA-Eur is an open model dataset of the European energy system at the PyPSA-Eur is an open model dataset of the European energy system at the
transmission network level that covers the full ENTSO-E area. It covers demand transmission network level that covers the full ENTSO-E area. It covers demand
@ -222,7 +224,10 @@ The included ``.nc`` files are PyPSA network files which can be imported with Py
n = pypsa.Network(filename) n = pypsa.Network(filename)
Operating Systems
=================
The PyPSA-Eur workflow is continuously tested for Linux, macOS and Windows (WSL only).
.. toctree:: .. toctree::
@ -274,4 +279,5 @@ The included ``.nc`` files are PyPSA network files which can be imported with Py
licenses licenses
limitations limitations
contributing contributing
support
publications publications

View File

@ -89,7 +89,7 @@ Rule ``build_powerplants``
.. _electricity_demand: .. _electricity_demand:
Rule ``build_electricity_demand`` Rule ``build_electricity_demand``
============================= ==================================
.. automodule:: build_electricity_demand .. automodule:: build_electricity_demand

View File

@ -10,19 +10,45 @@ Release Notes
Upcoming Release Upcoming Release
================ ================
* ``param:`` section in rule definition are added to track changed settings in ``config.yaml``. The goal is to automatically re-execute rules whose parameters have changed. See `Non-file parameters for rules <https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#non-file-parameters-for-rules>`_ in the snakemake documentation.
* **Important:** The configuration files are now located in the ``config`` directory. This counts for ``config.default.yaml``, ``config.yaml`` as well as the test configuration files which are now located in ``config/test``. Config files that are still in the root directory will be ignored. * **Important:** The configuration files are now located in the ``config`` directory. This counts for ``config.default.yaml``, ``config.yaml`` as well as the test configuration files which are now located in ``config/test``. Config files that are still in the root directory will be ignored.
* Bugfix: Correct typo in the CPLEX solver configuration in ``config.default.yaml``. * Bugfix: Correct typo in the CPLEX solver configuration in ``config.default.yaml``.
* Bugfix: Error in ``add_electricity`` where carriers were added multiple times to the network, resulting in a non-unique carriers error.
* Renamed script file from PyPSA-EUR ``build_load_data`` to ``build_electricity_demand`` and ``retrieve_load_data`` to ``retrieve_electricity_demand``. * Renamed script file from PyPSA-EUR ``build_load_data`` to ``build_electricity_demand`` and ``retrieve_load_data`` to ``retrieve_electricity_demand``.
* Fix docs readthedocs built
* Add plain hydrogen turbine as additional re-electrification option besides * Add plain hydrogen turbine as additional re-electrification option besides
hydrogen fuel cell. Add switches for both re-electrification options under hydrogen fuel cell. Add switches for both re-electrification options under
``sector: hydrogen_turbine:`` and ``sector: hydrogen_fuel_cell:``. ``sector: hydrogen_turbine:`` and ``sector: hydrogen_fuel_cell:``.
* A new function named ``sanitize_carrier`` ensures that all unique carrier names are present in the network's carriers attribute, and adds nice names and colors for each carrier according to the provided configuration dictionary.
* Additional tech_color are added to include previously unlisted carriers.
* Remove ``vresutils`` dependency.
* Added configuration option ``lines: max_extension:`` and ``links:
max_extension:``` to control the maximum capacity addition per line or link in
MW.
* Add option to include a piecewise linear approximation of transmission losses,
e.g. by setting ``solving: options: transmission_losses: 2`` for an
approximation with two tangents.
* Handling networks with links with multiple inputs/outputs no longer requires
to override component attributes.
* Added configuration option ``enable: retrieve:`` to control whether data
retrieval rules from snakemake are enabled or not. Th default setting ``auto``
will automatically detect and enable/disable the rules based on internet connectivity.
PyPSA-Eur 0.8.0 (18th March 2023) PyPSA-Eur 0.8.0 (18th March 2023)
================================= =================================

View File

@ -2,12 +2,13 @@
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
setuptools
sphinx sphinx
sphinx_book_theme sphinx_book_theme
sphinxcontrib-bibtex sphinxcontrib-bibtex
myst-parser # recommark is deprecated, https://stackoverflow.com/a/71660856/13573820
pypsa pypsa
vresutils>=0.3.1
powerplantmatching>=0.5.5 powerplantmatching>=0.5.5
atlite>=0.2.9 atlite>=0.2.9
dask[distributed] dask[distributed]

View File

@ -133,12 +133,12 @@ The coefficient of performance (COP) of air- and ground-sourced heat pumps depen
For the sink water temperature Tsink we assume 55 °C [`Config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L207>`_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 <https://doi.org/10.1002/qj.3803>`_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. <https://pubs.rsc.org/en/content/articlelanding/2012/EE/c2ee22653g>`_. For air-sourced heat pumps (ASHP), we use the function: For the sink water temperature Tsink we assume 55 °C [`Config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L207>`_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 <https://doi.org/10.1002/qj.3803>`_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. <https://pubs.rsc.org/en/content/articlelanding/2012/EE/c2ee22653g>`_. For air-sourced heat pumps (ASHP), we use the function:
.. math:: .. math::
COP (\Delta T) = 6.81 + 0.121\Delta T + 0.000630\Delta T^2 COP (\Delta T) = 6.81 - 0.121\Delta T + 0.000630\Delta T^2
for ground-sourced heat pumps (GSHP), we use the function: for ground-sourced heat pumps (GSHP), we use the function:
.. math:: .. math::
COP(\Delta T) = 8.77 + 0.150\Delta T + 0.000734\Delta T^2 COP(\Delta T) = 8.77 - 0.150\Delta T + 0.000734\Delta T^2
**Resistive heaters** **Resistive heaters**

14
doc/support.rst Normal file
View File

@ -0,0 +1,14 @@
..
SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0
#######################
Support
#######################
* In case of code-related **questions**, please post on `stack overflow <https://stackoverflow.com/questions/tagged/pypsa>`_.
* For non-programming related and more general questions please refer to the `mailing list <https://groups.google.com/group/pypsa>`_.
* To **discuss** with other PyPSA users, organise projects, share news, and get in touch with the community you can use the `discord server <https://discord.gg/AnuJBk23FU>`_.
* For **bugs and feature requests**, please use the `issue tracker <https://github.com/PyPSA/pypsa-eur/issues>`_.
* We strongly welcome anyone interested in providing **contributions** to this project. If you have any ideas, suggestions or encounter problems, feel invited to file issues or make pull requests on `Github <https://github.com/PyPSA/PyPSA>`_. For further information on how to contribute, please refer to :ref:`contributing`.

View File

@ -32,7 +32,7 @@ configuration, execute
.. code:: bash .. code:: bash
:class: full-width :class: full-width
snakemake -call results/test-elec/networks/elec_s_6_ec_lcopt_Co2L-24H.nc --configfile test/config.electricity.yaml snakemake -call results/test-elec/networks/elec_s_6_ec_lcopt_Co2L-24H.nc --configfile config/test/config.electricity.yaml
This configuration is set to download a reduced data set via the rules :mod:`retrieve_databundle`, This configuration is set to download a reduced data set via the rules :mod:`retrieve_databundle`,
:mod:`retrieve_natura_raster`, :mod:`retrieve_cutout`. :mod:`retrieve_natura_raster`, :mod:`retrieve_cutout`.
@ -43,21 +43,21 @@ How to configure runs?
The model can be adapted to only include selected countries (e.g. Belgium) instead of all European countries to limit the spatial scope. The model can be adapted to only include selected countries (e.g. Belgium) instead of all European countries to limit the spatial scope.
.. literalinclude:: ../test/config.electricity.yaml .. literalinclude:: ../config/test/config.electricity.yaml
:language: yaml :language: yaml
:start-at: countries: :start-at: countries:
:end-before: snapshots: :end-before: snapshots:
Likewise, the example's temporal scope can be restricted (e.g. to a single week). Likewise, the example's temporal scope can be restricted (e.g. to a single week).
.. literalinclude:: ../test/config.electricity.yaml .. literalinclude:: ../config/test/config.electricity.yaml
:language: yaml :language: yaml
:start-at: snapshots: :start-at: snapshots:
:end-before: electricity: :end-before: electricity:
It is also possible to allow less or more carbon-dioxide emissions. Here, we limit the emissions of Belgium to 100 Mt per year. It is also possible to allow less or more carbon-dioxide emissions. Here, we limit the emissions of Belgium to 100 Mt per year.
.. literalinclude:: ../test/config.electricity.yaml .. literalinclude:: ../config/test/config.electricity.yaml
:language: yaml :language: yaml
:start-at: electricity: :start-at: electricity:
:end-before: extendable_carriers: :end-before: extendable_carriers:
@ -65,7 +65,7 @@ It is also possible to allow less or more carbon-dioxide emissions. Here, we lim
PyPSA-Eur also includes a database of existing conventional powerplants. PyPSA-Eur also includes a database of existing conventional powerplants.
We can select which types of existing powerplants we like to be extendable: We can select which types of existing powerplants we like to be extendable:
.. literalinclude:: ../test/config.electricity.yaml .. literalinclude:: ../config/test/config.electricity.yaml
:language: yaml :language: yaml
:start-at: extendable_carriers: :start-at: extendable_carriers:
:end-before: renewable_carriers: :end-before: renewable_carriers:
@ -74,7 +74,7 @@ To accurately model the temporal and spatial availability of renewables such as
wind and solar energy, we rely on historical weather data. It is advisable to wind and solar energy, we rely on historical weather data. It is advisable to
adapt the required range of coordinates to the selection of countries. adapt the required range of coordinates to the selection of countries.
.. literalinclude:: ../test/config.electricity.yaml .. literalinclude:: ../config/test/config.electricity.yaml
:language: yaml :language: yaml
:start-at: atlite: :start-at: atlite:
:end-before: renewable: :end-before: renewable:
@ -83,7 +83,7 @@ We can also decide which weather data source should be used to calculate
potentials and capacity factor time-series for each carrier. For example, we may potentials and capacity factor time-series for each carrier. For example, we may
want to use the ERA-5 dataset for solar and not the default SARAH-2 dataset. want to use the ERA-5 dataset for solar and not the default SARAH-2 dataset.
.. literalinclude:: ../test/config.electricity.yaml .. literalinclude:: ../config/test/config.electricity.yaml
:language: yaml :language: yaml
:start-at: solar: :start-at: solar:
:end-at: cutout: :end-at: cutout:
@ -91,7 +91,7 @@ want to use the ERA-5 dataset for solar and not the default SARAH-2 dataset.
Finally, it is possible to pick a solver. For instance, this tutorial uses the Finally, it is possible to pick a solver. For instance, this tutorial uses the
open-source solver GLPK. open-source solver GLPK.
.. literalinclude:: ../test/config.electricity.yaml .. literalinclude:: ../config/test/config.electricity.yaml
:language: yaml :language: yaml
:start-at: solver: :start-at: solver:
:end-before: plotting: :end-before: plotting:
@ -115,7 +115,7 @@ clustered down to 6 buses and every 24 hours aggregated to one snapshot. The com
.. code:: bash .. code:: bash
snakemake -call results/test-elec/networks/elec_s_6_ec_lcopt_Co2L-24H.nc --configfile test/config.electricity.yaml snakemake -call results/test-elec/networks/elec_s_6_ec_lcopt_Co2L-24H.nc --configfile config/test/config.electricity.yaml
orders ``snakemake`` to run the rule :mod:`solve_network` that produces the solved network and stores it in ``results/networks`` with the name ``elec_s_6_ec_lcopt_Co2L-24H.nc``: orders ``snakemake`` to run the rule :mod:`solve_network` that produces the solved network and stores it in ``results/networks`` with the name ``elec_s_6_ec_lcopt_Co2L-24H.nc``:
@ -288,18 +288,18 @@ You can produce any output file occurring in the ``Snakefile`` by running
For example, you can explore the evolution of the PyPSA networks by running For example, you can explore the evolution of the PyPSA networks by running
#. ``snakemake resources/networks/base.nc -call --configfile test/config.electricity.yaml`` #. ``snakemake resources/networks/base.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/elec.nc -call --configfile test/config.electricity.yaml`` #. ``snakemake resources/networks/elec.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/elec_s.nc -call --configfile test/config.electricity.yaml`` #. ``snakemake resources/networks/elec_s.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/elec_s_6.nc -call --configfile test/config.electricity.yaml`` #. ``snakemake resources/networks/elec_s_6.nc -call --configfile config/test/config.electricity.yaml``
#. ``snakemake resources/networks/elec_s_6_ec_lcopt_Co2L-24H.nc -call --configfile test/config.electricity.yaml`` #. ``snakemake resources/networks/elec_s_6_ec_lcopt_Co2L-24H.nc -call --configfile config/test/config.electricity.yaml``
To run all combinations of wildcard values provided in the ``config/config.yaml`` under ``scenario:``, To run all combinations of wildcard values provided in the ``config/config.yaml`` under ``scenario:``,
you can use the collection rule ``solve_elec_networks``. you can use the collection rule ``solve_elec_networks``.
.. code:: bash .. code:: bash
snakemake -call solve_elec_networks --configfile test/config.electricity.yaml snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml
If you now feel confident and want to tackle runs with larger temporal and If you now feel confident and want to tackle runs with larger temporal and
spatial scope, clean-up the repository and after modifying the ``config/config.yaml`` file spatial scope, clean-up the repository and after modifying the ``config/config.yaml`` file

View File

@ -35,7 +35,7 @@ configuration options. In the example below, we say that the gas network should
be added and spatially resolved. We also say that the existing gas network may be added and spatially resolved. We also say that the existing gas network may
be retrofitted to transport hydrogen instead. be retrofitted to transport hydrogen instead.
.. literalinclude:: ../test/config.overnight.yaml .. literalinclude:: ../config/test/config.overnight.yaml
:language: yaml :language: yaml
:start-at: sector: :start-at: sector:
:end-before: solving: :end-before: solving:
@ -45,7 +45,7 @@ Documentation for all options will be added successively to :ref:`config`.
Scenarios can be defined like for electricity-only studies, but with additional Scenarios can be defined like for electricity-only studies, but with additional
wildcard options. wildcard options.
.. literalinclude:: ../test/config.overnight.yaml .. literalinclude:: ../config/test/config.overnight.yaml
:language: yaml :language: yaml
:start-at: scenario: :start-at: scenario:
:end-before: countries: :end-before: countries:
@ -59,7 +59,7 @@ To run an overnight / greenfiled scenario with the specifications above, run
.. code:: bash .. code:: bash
snakemake -call --configfile test/config.overnight.yaml all snakemake -call --configfile config/test/config.overnight.yaml all
which will result in the following *additional* jobs ``snakemake`` wants to run which will result in the following *additional* jobs ``snakemake`` wants to run
on top of those already included in the electricity-only tutorial: on top of those already included in the electricity-only tutorial:
@ -294,7 +294,7 @@ Scenarios can be defined like for electricity-only studies, but with additional
wildcard options. For the myopic foresight mode, the ``{planning_horizons}`` wildcard wildcard options. For the myopic foresight mode, the ``{planning_horizons}`` wildcard
defines the sequence of investment horizons. defines the sequence of investment horizons.
.. literalinclude:: ../test/config.myopic.yaml .. literalinclude:: ../config/test/config.myopic.yaml
:language: yaml :language: yaml
:start-at: scenario: :start-at: scenario:
:end-before: countries: :end-before: countries:
@ -304,7 +304,7 @@ For allowed wildcard values, refer to :ref:`wildcards`.
In the myopic foresight mode, you can tweak for instance exogenously given transition paths, like the one for In the myopic foresight mode, you can tweak for instance exogenously given transition paths, like the one for
the share of primary steel production we change below: the share of primary steel production we change below:
.. literalinclude:: ../test/config.myopic.yaml .. literalinclude:: ../config/test/config.myopic.yaml
:language: yaml :language: yaml
:start-at: industry: :start-at: industry:
:end-before: solving: :end-before: solving:
@ -318,7 +318,7 @@ To run a myopic foresight scenario with the specifications above, run
.. code:: bash .. code:: bash
snakemake -call --configfile test/config.myopic.yaml all snakemake -call --configfile config/test/config.myopic.yaml all
which will result in the following *additional* jobs ``snakemake`` wants to run: which will result in the following *additional* jobs ``snakemake`` wants to run:

View File

@ -117,6 +117,23 @@ The ``{sector_opts}`` wildcard
.. warning:: .. warning::
More comprehensive documentation for this wildcard will be added soon. More comprehensive documentation for this wildcard will be added soon.
To really understand the options here, look in scripts/prepare_sector_network.py
# Co2Lx specifies the CO2 target in x% of the 1990 values; default will give default (5%);
# Co2L0p25 will give 25% CO2 emissions; Co2Lm0p05 will give 5% negative emissions
# xH is the temporal resolution; 3H is 3-hourly, i.e. one snapshot every 3 hours
# single letters are sectors: T for land transport, H for building heating,
# B for biomass supply, I for industry, shipping and aviation,
# A for agriculture, forestry and fishing
# solar+c0.5 reduces the capital cost of solar to 50\% of reference value
# solar+p3 multiplies the available installable potential by factor 3
# seq400 sets the potential of CO2 sequestration to 400 Mt CO2 per year
# dist{n} includes distribution grids with investment cost of n times cost in data/costs.csv
# for myopic/perfect foresight cb states the carbon budget in GtCO2 (cumulative
# emissions throughout the transition path in the timeframe determined by the
# planning_horizons), be:beta decay; ex:exponential decay
# cb40ex0 distributes a carbon budget of 40 GtCO2 following an exponential
# decay with initial growth rate 0
The ``{sector_opts}`` wildcard is only used for sector-coupling studies. The ``{sector_opts}`` wildcard is only used for sector-coupling studies.

View File

@ -226,7 +226,7 @@ dependencies:
- nspr=4.35 - nspr=4.35
- nss=3.88 - nss=3.88
- numexpr=2.8.3 - numexpr=2.8.3
- numpy=1.23.5 - numpy=1.24
- openjdk=17.0.3 - openjdk=17.0.3
- openjpeg=2.5.0 - openjpeg=2.5.0
- openpyxl=3.1.0 - openpyxl=3.1.0
@ -378,4 +378,3 @@ dependencies:
- highspy==1.5.0.dev0 - highspy==1.5.0.dev0
- pybind11==2.10.3 - pybind11==2.10.3
- tsam==2.2.2 - tsam==2.2.2
- vresutils==0.3.1

View File

@ -10,7 +10,6 @@ dependencies:
- python>=3.8 - python>=3.8
- pip - pip
- pypsa>=0.21.3
- atlite>=0.2.9 - atlite>=0.2.9
- dask - dask
@ -25,7 +24,7 @@ dependencies:
- pytables - pytables
- lxml - lxml
- powerplantmatching>=0.5.5 - powerplantmatching>=0.5.5
- numpy<1.24 - numpy
- pandas>=1.4 - pandas>=1.4
- geopandas>=0.11.0 - geopandas>=0.11.0
- xarray - xarray
@ -55,5 +54,5 @@ dependencies:
- rasterio!=1.2.10 - rasterio!=1.2.10
- pip: - pip:
- vresutils>=0.3.1
- tsam>=1.1.0 - tsam>=1.1.0
- git+https://github.com/PyPSA/PyPSA.git@master

View File

@ -21,12 +21,16 @@ if config["enable"].get("prepare_links_p_nom", False):
if config["enable"].get("retrieve_opsd_load_data", True): if config["enable"].get("retrieve_opsd_load_data", True):
rule build_electricity_demand: rule build_electricity_demand:
params:
snapshots=config["snapshots"],
countries=config["countries"],
load=config["load"],
input: input:
ancient("data/load_raw.csv"), ancient("data/load_raw.csv"),
output: output:
RESOURCES + "load{weather_year}.csv", RESOURCES + "load.csv",
log: log:
LOGS + "build_electricity_demand{weather_year}.log", LOGS + "build_electricity_demand.log",
resources: resources:
mem_mb=5000, mem_mb=5000,
conda: conda:
@ -52,7 +56,12 @@ if config["enable"].get("retrieve_artificial_load_data", False):
"../scripts/build_artificial_load_data.py" "../scripts/build_artificial_load_data.py"
rule build_powerplants: rule build_powerplants:
params:
powerplants_filter=config["electricity"]["powerplants_filter"],
custom_powerplants=config["electricity"]["custom_powerplants"],
countries=config["countries"],
input: input:
base_network=RESOURCES + "networks/base.nc", base_network=RESOURCES + "networks/base.nc",
custom_powerplants="data/custom_powerplants.csv", custom_powerplants="data/custom_powerplants.csv",
@ -70,6 +79,9 @@ rule build_powerplants:
rule base_network: rule base_network:
params:
countries=config["countries"],
snapshots=config["snapshots"],
input: input:
eg_buses="data/entsoegridkit/buses.csv", eg_buses="data/entsoegridkit/buses.csv",
eg_lines="data/entsoegridkit/lines.csv", eg_lines="data/entsoegridkit/lines.csv",
@ -98,6 +110,8 @@ rule base_network:
rule build_shapes: rule build_shapes:
params:
countries=config["countries"],
input: input:
naturalearth=ancient("data/bundle/naturalearth/ne_10m_admin_0_countries.shp"), naturalearth=ancient("data/bundle/naturalearth/ne_10m_admin_0_countries.shp"),
eez=ancient("data/bundle/eez/World_EEZ_v8_2014.shp"), eez=ancient("data/bundle/eez/World_EEZ_v8_2014.shp"),
@ -123,6 +137,8 @@ rule build_shapes:
rule build_bus_regions: rule build_bus_regions:
params:
countries=config["countries"],
input: input:
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=RESOURCES + "country_shapes.geojson",
offshore_shapes=RESOURCES + "offshore_shapes.geojson", offshore_shapes=RESOURCES + "offshore_shapes.geojson",
@ -146,6 +162,9 @@ if config["enable"].get("build_cutout", False):
ruleorder: build_cutout_year > build_cutout ruleorder: build_cutout_year > build_cutout
rule build_cutout: rule build_cutout:
params:
snapshots=config["snapshots"],
cutouts=config["atlite"]["cutouts"],
input: input:
regions_onshore=RESOURCES + "regions_onshore.geojson", regions_onshore=RESOURCES + "regions_onshore.geojson",
regions_offshore=RESOURCES + "regions_offshore.geojson", regions_offshore=RESOURCES + "regions_offshore.geojson",
@ -224,6 +243,8 @@ rule build_ship_raster:
rule build_renewable_profiles: rule build_renewable_profiles:
params:
renewable=config["renewable"],
input: input:
base_network=RESOURCES + "networks/base.nc", base_network=RESOURCES + "networks/base.nc",
corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"), corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"),
@ -273,6 +294,9 @@ rule build_renewable_profiles:
rule build_hydro_profile: rule build_hydro_profile:
params:
hydro=config["renewable"]["hydro"],
countries=config["countries"],
input: input:
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=RESOURCES + "country_shapes.geojson",
eia_hydro_generation="data/eia_hydro_annual_generation.csv", eia_hydro_generation="data/eia_hydro_annual_generation.csv",
@ -293,6 +317,14 @@ rule build_hydro_profile:
rule add_electricity: rule add_electricity:
params:
length_factor=config["lines"]["length_factor"],
scaling_factor=config["load"]["scaling_factor"],
countries=config["countries"],
renewable=config["renewable"],
electricity=config["electricity"],
conventional=config.get("conventional", {}),
costs=config["costs"],
input: input:
**{ **{
f"profile_{tech}": RESOURCES + f"profile{weather_year}_{tech}.nc" f"profile_{tech}": RESOURCES + f"profile{weather_year}_{tech}.nc"
@ -328,6 +360,15 @@ rule add_electricity:
rule simplify_network: rule simplify_network:
params:
simplify_network=config["clustering"]["simplify_network"],
aggregation_strategies=config["clustering"].get("aggregation_strategies", {}),
focus_weights=config.get("focus_weights", None),
renewable_carriers=config["electricity"]["renewable_carriers"],
max_hours=config["electricity"]["max_hours"],
length_factor=config["lines"]["length_factor"],
p_max_pu=config["links"].get("p_max_pu", 1.0),
costs=config["costs"],
input: input:
network=RESOURCES + "networks/elec{weather_year}.nc", network=RESOURCES + "networks/elec{weather_year}.nc",
tech_costs=COSTS, tech_costs=COSTS,
@ -355,6 +396,16 @@ rule simplify_network:
rule cluster_network: rule cluster_network:
params:
cluster_network=config["clustering"]["cluster_network"],
aggregation_strategies=config["clustering"].get("aggregation_strategies", {}),
custom_busmap=config["enable"].get("custom_busmap", False),
focus_weights=config.get("focus_weights", None),
renewable_carriers=config["electricity"]["renewable_carriers"],
conventional_carriers=config["electricity"].get("conventional_carriers", []),
max_hours=config["electricity"]["max_hours"],
length_factor=config["lines"]["length_factor"],
costs=config["costs"],
input: input:
network=RESOURCES + "networks/elec{weather_year}_s{simpl}.nc", network=RESOURCES + "networks/elec{weather_year}_s{simpl}.nc",
regions_onshore=RESOURCES regions_onshore=RESOURCES
@ -390,6 +441,10 @@ rule cluster_network:
rule add_extra_components: rule add_extra_components:
params:
extendable_carriers=config["electricity"]["extendable_carriers"],
max_hours=config["electricity"]["max_hours"],
costs=config["costs"],
input: input:
network=RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}.nc", network=RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}.nc",
tech_costs=COSTS, tech_costs=COSTS,
@ -409,6 +464,14 @@ rule add_extra_components:
rule prepare_network: rule prepare_network:
params:
links=config["links"],
lines=config["lines"],
co2base=config["electricity"]["co2base"],
co2limit=config["electricity"]["co2limit"],
gaslimit=config["electricity"].get("gaslimit"),
max_hours=config["electricity"]["max_hours"],
costs=config["costs"],
input: input:
RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}_ec.nc", RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}_ec.nc",
tech_costs=COSTS, tech_costs=COSTS,

View File

@ -140,6 +140,8 @@ if not (config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]):
rule build_heat_demands: rule build_heat_demands:
params:
snapshots=config["snapshots"],
input: input:
pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc", pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc",
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson", regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
@ -160,6 +162,8 @@ rule build_heat_demands:
rule build_temperature_profiles: rule build_temperature_profiles:
params:
snapshots=config["snapshots"],
input: input:
pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc", pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc",
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson", regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
@ -181,6 +185,8 @@ rule build_temperature_profiles:
rule build_cop_profiles: rule build_cop_profiles:
params:
heat_pump_sink_T=config["sector"]["heat_pump_sink_T"],
input: input:
temp_soil_total=RESOURCES + "temp_soil_total_elec{weather_year}_s{simpl}_{clusters}.nc", temp_soil_total=RESOURCES + "temp_soil_total_elec{weather_year}_s{simpl}_{clusters}.nc",
temp_soil_rural=RESOURCES + "temp_soil_rural_elec{weather_year}_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
@ -208,6 +214,9 @@ rule build_cop_profiles:
rule build_solar_thermal_profiles: rule build_solar_thermal_profiles:
params:
snapshots=config["snapshots"],
solar_thermal=config["solar_thermal"],
input: input:
pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc", pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc",
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson", regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
@ -228,6 +237,9 @@ rule build_solar_thermal_profiles:
rule build_energy_totals: rule build_energy_totals:
params:
countries=config["countries"],
energy=config["energy"],
input: input:
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
co2="data/eea/UNFCCC_v23.csv", co2="data/eea/UNFCCC_v23.csv",
@ -271,6 +283,8 @@ rule build_heat_totals:
rule build_biomass_potentials: rule build_biomass_potentials:
params:
biomass=config["biomass"],
input: input:
enspreso_biomass=HTTP.remote( enspreso_biomass=HTTP.remote(
"https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx", "https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx",
@ -333,6 +347,10 @@ if not config["sector"]["biomass_transport"]:
if config["sector"]["regional_co2_sequestration_potential"]["enable"]: if config["sector"]["regional_co2_sequestration_potential"]["enable"]:
rule build_sequestration_potentials: rule build_sequestration_potentials:
params:
sequestration_potential=config["sector"][
"regional_co2_sequestration_potential"
],
input: input:
sequestration_potential=HTTP.remote( sequestration_potential=HTTP.remote(
"https://raw.githubusercontent.com/ericzhou571/Co2Storage/main/resources/complete_map_2020_unit_Mt.geojson", "https://raw.githubusercontent.com/ericzhou571/Co2Storage/main/resources/complete_map_2020_unit_Mt.geojson",
@ -386,6 +404,8 @@ rule build_salt_cavern_potentials:
rule build_ammonia_production: rule build_ammonia_production:
params:
countries=config["countries"],
input: input:
usgs="data/myb1-2017-nitro.xls", usgs="data/myb1-2017-nitro.xls",
output: output:
@ -404,6 +424,9 @@ rule build_ammonia_production:
rule build_industry_sector_ratios: rule build_industry_sector_ratios:
params:
industry=config["industry"],
ammonia=config["sector"].get("ammonia", False),
input: input:
ammonia_production=RESOURCES + "ammonia_production.csv", ammonia_production=RESOURCES + "ammonia_production.csv",
idees="data/jrc-idees-2015", idees="data/jrc-idees-2015",
@ -423,6 +446,9 @@ rule build_industry_sector_ratios:
rule build_industrial_production_per_country: rule build_industrial_production_per_country:
params:
industry=config["industry"],
countries=config["countries"],
input: input:
ammonia_production=RESOURCES + "ammonia_production.csv", ammonia_production=RESOURCES + "ammonia_production.csv",
jrc="data/jrc-idees-2015", jrc="data/jrc-idees-2015",
@ -444,6 +470,8 @@ rule build_industrial_production_per_country:
rule build_industrial_production_per_country_tomorrow: rule build_industrial_production_per_country_tomorrow:
params:
industry=config["industry"],
input: input:
industrial_production_per_country=RESOURCES industrial_production_per_country=RESOURCES
+ "industrial_production_per_country.csv", + "industrial_production_per_country.csv",
@ -468,6 +496,9 @@ rule build_industrial_production_per_country_tomorrow:
rule build_industrial_distribution_key: rule build_industrial_distribution_key:
params:
hotmaps_locate_missing=config["industry"].get("hotmaps_locate_missing", False),
countries=config["countries"],
input: input:
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson", regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
@ -542,6 +573,9 @@ rule build_industrial_energy_demand_per_node:
rule build_industrial_energy_demand_per_country_today: rule build_industrial_energy_demand_per_country_today:
params:
countries=config["countries"],
industry=config["industry"],
input: input:
jrc="data/jrc-idees-2015", jrc="data/jrc-idees-2015",
ammonia_production=RESOURCES + "ammonia_production.csv", ammonia_production=RESOURCES + "ammonia_production.csv",
@ -588,6 +622,9 @@ rule build_industrial_energy_demand_per_node_today:
if config["sector"]["retrofitting"]["retro_endogen"]: if config["sector"]["retrofitting"]["retro_endogen"]:
rule build_retro_cost: rule build_retro_cost:
params:
retrofitting=config["sector"]["retrofitting"],
countries=config["countries"],
input: input:
building_stock="data/retro/data_building_stock.csv", building_stock="data/retro/data_building_stock.csv",
data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv",
@ -658,6 +695,9 @@ rule build_shipping_demand:
rule build_transport_demand: rule build_transport_demand:
params:
snapshots=config["snapshots"],
sector=config["sector"],
input: input:
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
pop_weighted_energy_totals=RESOURCES pop_weighted_energy_totals=RESOURCES
@ -684,14 +724,27 @@ rule build_transport_demand:
rule prepare_sector_network: rule prepare_sector_network:
params: params:
co2_budget=config["co2_budget"],
conventional_carriers=config["existing_capacities"]["conventional_carriers"],
foresight=config["foresight"],
costs=config["costs"],
sector=config["sector"],
industry=config["industry"],
pypsa_eur=config["pypsa_eur"],
length_factor=config["lines"]["length_factor"],
planning_horizons=config["scenario"]["planning_horizons"],
countries=config["countries"],
emissions_scope=config["energy"]["emissions"],
eurostat_report_year=config["energy"]["eurostat_report_year"],
RDIR=RDIR, RDIR=RDIR,
input: input:
**build_retro_cost_output, **build_retro_cost_output,
**build_biomass_transport_costs_output, **build_biomass_transport_costs_output,
**gas_infrastructure, **gas_infrastructure,
**build_sequestration_potentials_output, **build_sequestration_potentials_output,
overrides="data/override_component_attrs",
network=RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
energy_totals_name=RESOURCES + "energy_totals.csv",
eurostat=input_eurostat,
pop_weighted_energy_totals=RESOURCES pop_weighted_energy_totals=RESOURCES
+ "pop_weighted_energy_totals{weather_year}_s{simpl}_{clusters}.csv", + "pop_weighted_energy_totals{weather_year}_s{simpl}_{clusters}.csv",
pop_weighted_heat_totals=RESOURCES pop_weighted_heat_totals=RESOURCES

View File

@ -23,6 +23,28 @@ def memory(w):
return int(factor * (10000 + 195 * int(w.clusters))) return int(factor * (10000 + 195 * int(w.clusters)))
# Check if the workflow has access to the internet by trying to access the HEAD of specified url
def has_internet_access(url="www.zenodo.org") -> bool:
import http.client as http_client
# based on answer and comments from
# https://stackoverflow.com/a/29854274/11318472
conn = http_client.HTTPConnection(url, timeout=5) # need access to zenodo anyway
try:
conn.request("HEAD", "/")
return True
except:
return False
finally:
conn.close()
def input_eurostat(w):
# 2016 includes BA, 2017 does not
report_year = config["energy"]["eurostat_report_year"]
return f"data/eurostat-energy_balances-june_{report_year}_edition"
def solved_previous_horizon(wildcards): def solved_previous_horizon(wildcards):
planning_horizons = config["scenario"]["planning_horizons"] planning_horizons = config["scenario"]["planning_horizons"]
i = planning_horizons.index(int(wildcards.planning_horizons)) i = planning_horizons.index(int(wildcards.planning_horizons))

View File

@ -9,8 +9,10 @@ localrules:
rule plot_network: rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input: input:
overrides="data/override_component_attrs",
network=RESULTS network=RESULTS
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES regions=RESOURCES
@ -68,9 +70,12 @@ rule copy_conda_env:
rule make_summary: rule make_summary:
params: params:
foresight=config["foresight"],
costs=config["costs"],
snapshots=config["snapshots"],
scenario=config["scenario"],
RDIR=RDIR, RDIR=RDIR,
input: input:
overrides="data/override_component_attrs",
networks=expand( networks=expand(
RESULTS RESULTS
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
@ -115,6 +120,10 @@ rule make_summary:
rule plot_summary: rule plot_summary:
params: params:
countries=config["countries"],
planning_horizons=config["scenario"]["planning_horizons"],
sector_opts=config["scenario"]["sector_opts"],
plotting=config["plotting"],
RDIR=RDIR, RDIR=RDIR,
input: input:
costs=RESULTS + "csvs/costs.csv", costs=RESULTS + "csvs/costs.csv",

View File

@ -2,7 +2,14 @@
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
if config["enable"].get("retrieve_databundle", True): if config["enable"].get("retrieve", "auto") == "auto":
config["enable"]["retrieve"] = has_internet_access()
if config["enable"]["retrieve"] is False:
print("Datafile downloads disabled in config[retrieve] or no internet access.")
if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle", True):
datafiles = [ datafiles = [
"ch_cantons.csv", "ch_cantons.csv",
"je-e-21.03.02.xls", "je-e-21.03.02.xls",
@ -32,7 +39,7 @@ if config["enable"].get("retrieve_databundle", True):
"../scripts/retrieve_databundle.py" "../scripts/retrieve_databundle.py"
if config["enable"].get("retrieve_cutout", True): if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True):
rule retrieve_cutout: rule retrieve_cutout:
input: input:
@ -51,7 +58,7 @@ if config["enable"].get("retrieve_cutout", True):
move(input[0], output[0]) move(input[0], output[0])
if config["enable"].get("retrieve_cost_data", True): if config["enable"]["retrieve"] and config["enable"].get("retrieve_cost_data", True):
rule retrieve_cost_data: rule retrieve_cost_data:
input: input:
@ -73,7 +80,9 @@ if config["enable"].get("retrieve_cost_data", True):
move(input[0], output[0]) move(input[0], output[0])
if config["enable"].get("retrieve_natura_raster", True): if config["enable"]["retrieve"] and config["enable"].get(
"retrieve_natura_raster", True
):
rule retrieve_natura_raster: rule retrieve_natura_raster:
input: input:
@ -93,7 +102,9 @@ if config["enable"].get("retrieve_natura_raster", True):
move(input[0], output[0]) move(input[0], output[0])
if config["enable"].get("retrieve_sector_databundle", True): if config["enable"]["retrieve"] and config["enable"].get(
"retrieve_sector_databundle", True
):
datafiles = [ datafiles = [
"data/eea/UNFCCC_v23.csv", "data/eea/UNFCCC_v23.csv",
"data/switzerland-sfoe/switzerland-new_format.csv", "data/switzerland-sfoe/switzerland-new_format.csv",
@ -119,7 +130,9 @@ if config["enable"].get("retrieve_sector_databundle", True):
"../scripts/retrieve_sector_databundle.py" "../scripts/retrieve_sector_databundle.py"
if config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]: if config["enable"]["retrieve"] and (
config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]
):
datafiles = [ datafiles = [
"IGGIELGN_LNGs.geojson", "IGGIELGN_LNGs.geojson",
"IGGIELGN_BorderPoints.geojson", "IGGIELGN_BorderPoints.geojson",
@ -138,7 +151,7 @@ if config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]:
script: script:
"../scripts/retrieve_gas_infrastructure_data.py" "../scripts/retrieve_gas_infrastructure_data.py"
if config["enable"].get("retrieve_opsd_load_data", True): if config["enable"]["retrieve"] and config["enable"].get("retrieve_opsd_load_data", True):
rule retrieve_electricity_demand: rule retrieve_electricity_demand:
input: input:
@ -158,7 +171,7 @@ if config["enable"].get("retrieve_opsd_load_data", True):
move(input[0], output[0]) move(input[0], output[0])
if config["enable"].get('retrieve_artificial_load_data', False): if config["enable"]["retrieve"] and config["enable"].get('retrieve_artificial_load_data', False):
rule retrieve_artificial_load_data: rule retrieve_artificial_load_data:
input: HTTP.remote("https://zenodo.org/record/7070438/files/demand_hourly.csv", keep_local=True, static=True) input: HTTP.remote("https://zenodo.org/record/7070438/files/demand_hourly.csv", keep_local=True, static=True)
@ -169,19 +182,21 @@ if config["enable"].get('retrieve_artificial_load_data', False):
run: move(input[0], output[0]) run: move(input[0], output[0])
rule retrieve_ship_raster: if config["enable"]["retrieve"]:
input:
HTTP.remote( rule retrieve_ship_raster:
"https://zenodo.org/record/6953563/files/shipdensity_global.zip", input:
keep_local=True, HTTP.remote(
static=True, "https://zenodo.org/record/6953563/files/shipdensity_global.zip",
), keep_local=True,
output: static=True,
"data/shipdensity_global.zip", ),
log: output:
LOGS + "retrieve_ship_raster.log", "data/shipdensity_global.zip",
resources: log:
mem_mb=5000, LOGS + "retrieve_ship_raster.log",
retries: 2 resources:
run: mem_mb=5000,
move(input[0], output[0]) retries: 2
run:
move(input[0], output[0])

View File

@ -4,6 +4,13 @@
rule solve_network: rule solve_network:
params:
solving=config["solving"],
foresight=config["foresight"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
),
input: input:
network=RESOURCES network=RESOURCES
+ "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
@ -17,8 +24,6 @@ rule solve_network:
), ),
python=LOGS python=LOGS
+ "solve_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + "solve_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
memory=LOGS
+ "solve_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
benchmark: benchmark:
( (
BENCHMARKS BENCHMARKS
@ -36,6 +41,8 @@ rule solve_network:
rule solve_operations_network: rule solve_operations_network:
params:
options=config["solving"]["options"],
input: input:
network=RESULTS network=RESULTS
+ "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
@ -49,8 +56,6 @@ rule solve_operations_network:
), ),
python=LOGS python=LOGS
+ "solve_operations_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", + "solve_operations_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
memory=LOGS
+ "solve_operations_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log",
benchmark: benchmark:
( (
BENCHMARKS BENCHMARKS

View File

@ -4,8 +4,12 @@
rule add_existing_baseyear: rule add_existing_baseyear:
params:
baseyear=config["scenario"]["planning_horizons"][0],
sector=config["sector"],
existing_capacities=config["existing_capacities"],
costs=config["costs"],
input: input:
overrides="data/override_component_attrs",
network=RESULTS network=RESULTS
+ "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv", powerplants=RESOURCES + "powerplants.csv",
@ -45,8 +49,11 @@ rule add_existing_baseyear:
rule add_brownfield: rule add_brownfield:
params:
H2_retrofit=config["sector"]["H2_retrofit"],
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
input: input:
overrides="data/override_component_attrs",
network=RESULTS network=RESULTS
+ "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
network_p=solved_previous_horizon, #solved network at previous time step network_p=solved_previous_horizon, #solved network at previous time step
@ -79,8 +86,14 @@ ruleorder: add_existing_baseyear > add_brownfield
rule solve_sector_network_myopic: rule solve_sector_network_myopic:
params:
solving=config["solving"],
foresight=config["foresight"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
),
input: input:
overrides="data/override_component_attrs",
network=RESULTS network=RESULTS
+ "prenetworks-brownfield/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks-brownfield/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs="data/costs_{planning_horizons}.csv", costs="data/costs_{planning_horizons}.csv",
@ -95,8 +108,6 @@ rule solve_sector_network_myopic:
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=LOGS python=LOGS
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
memory=LOGS
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
threads: 4 threads: 4
resources: resources:
mem_mb=config["solving"]["mem"], mem_mb=config["solving"]["mem"],

View File

@ -4,8 +4,14 @@
rule solve_sector_network: rule solve_sector_network:
params:
solving=config["solving"],
foresight=config["foresight"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
),
input: input:
overrides="data/override_component_attrs",
network=RESULTS network=RESULTS
+ "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs="data/costs_{}.csv".format(config["costs"]["year"]), costs="data/costs_{}.csv".format(config["costs"]["year"]),
@ -21,8 +27,6 @@ rule solve_sector_network:
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=LOGS python=LOGS
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
memory=LOGS
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
threads: config["solving"]["solver"].get("threads", 4) threads: config["solving"]["solver"].get("threads", 4)
resources: resources:
mem_mb=config["solving"]["mem"], mem_mb=config["solving"]["mem"],

View File

@ -72,92 +72,6 @@ def configure_logging(snakemake, skip_handlers=False):
logging.basicConfig(**kwargs) logging.basicConfig(**kwargs)
def load_network(import_name=None, custom_components=None):
"""
Helper for importing a pypsa.Network with additional custom components.
Parameters
----------
import_name : str
As in pypsa.Network(import_name)
custom_components : dict
Dictionary listing custom components.
For using ``snakemake.config['override_components']``
in ``config/config.yaml`` define:
.. code:: yaml
override_components:
ShadowPrice:
component: ["shadow_prices","Shadow price for a global constraint.",np.nan]
attributes:
name: ["string","n/a","n/a","Unique name","Input (required)"]
value: ["float","n/a",0.,"shadow value","Output"]
Returns
-------
pypsa.Network
"""
import pypsa
from pypsa.descriptors import Dict
override_components = None
override_component_attrs = None
if custom_components is not None:
override_components = pypsa.components.components.copy()
override_component_attrs = Dict(
{k: v.copy() for k, v in pypsa.components.component_attrs.items()}
)
for k, v in custom_components.items():
override_components.loc[k] = v["component"]
override_component_attrs[k] = pd.DataFrame(
columns=["type", "unit", "default", "description", "status"]
)
for attr, val in v["attributes"].items():
override_component_attrs[k].loc[attr] = val
return pypsa.Network(
import_name=import_name,
override_components=override_components,
override_component_attrs=override_component_attrs,
)
def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True):
import pypsa
from add_electricity import load_costs, update_transmission_costs
n = pypsa.Network(fn)
n.loads["carrier"] = n.loads.bus.map(n.buses.carrier) + " load"
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
n.links["carrier"] = (
n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier)
)
n.lines["carrier"] = "AC line"
n.transformers["carrier"] = "AC transformer"
n.lines["s_nom"] = n.lines["s_nom_min"]
n.links["p_nom"] = n.links["p_nom_min"]
if combine_hydro_ps:
n.storage_units.loc[
n.storage_units.carrier.isin({"PHS", "hydro"}), "carrier"
] = "hydro+PHS"
# if the carrier was not set on the heat storage units
# bus_carrier = n.storage_units.bus.map(n.buses.carrier)
# n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks"
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(tech_costs, config["costs"], config["electricity"], Nyears)
update_transmission_costs(n, costs)
return n
def update_p_nom_max(n): def update_p_nom_max(n):
# if extendable carriers (solar/onwind/...) have capacity >= 0, # if extendable carriers (solar/onwind/...) have capacity >= 0,
# e.g. existing assets from the OPSD project are included to the network, # e.g. existing assets from the OPSD project are included to the network,
@ -277,23 +191,6 @@ def progress_retrieve(url, file, disable=False):
urllib.request.urlretrieve(url, file, reporthook=update_to) urllib.request.urlretrieve(url, file, reporthook=update_to)
def get_aggregation_strategies(aggregation_strategies):
# default aggregation strategies that cannot be defined in .yaml format must be specified within
# the function, otherwise (when defaults are passed in the function's definition) they get lost
# when custom values are specified in the config.
import numpy as np
from pypsa.networkclustering import _make_consense
bus_strategies = dict(country=_make_consense("Bus", "country"))
bus_strategies.update(aggregation_strategies.get("buses", {}))
generator_strategies = {"build_year": lambda x: 0, "lifetime": lambda x: np.inf}
generator_strategies.update(aggregation_strategies.get("generators", {}))
return bus_strategies, generator_strategies
def mock_snakemake(rulename, configfiles=[], **wildcards): def mock_snakemake(rulename, configfiles=[], **wildcards):
""" """
This function is expected to be executed from the 'scripts'-directory of ' This function is expected to be executed from the 'scripts'-directory of '
@ -384,33 +281,6 @@ def mock_snakemake(rulename, configfiles=[], **wildcards):
return snakemake return snakemake
def override_component_attrs(directory):
"""Tell PyPSA that links can have multiple outputs by
overriding the component_attrs. This can be done for
as many buses as you need with format busi for i = 2,3,4,5,....
See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
Parameters
----------
directory : string
Folder where component attributes to override are stored
analogous to ``pypsa/component_attrs``, e.g. `links.csv`.
Returns
-------
Dictionary of overridden component attributes.
"""
attrs = Dict({k: v.copy() for k, v in component_attrs.items()})
for component, list_name in components.list_name.items():
fn = f"{directory}/{list_name}.csv"
if os.path.isfile(fn):
overrides = pd.read_csv(fn, index_col=0, na_values="n/a")
attrs[component] = overrides.combine_first(attrs[component])
return attrs
def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None): def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
""" """
Give a 24*7 long list of weekly hourly profiles, generate this for each Give a 24*7 long list of weekly hourly profiles, generate this for each

View File

@ -16,7 +16,7 @@ idx = pd.IndexSlice
import numpy as np import numpy as np
import pypsa import pypsa
from _helpers import override_component_attrs, update_config_with_sector_opts from _helpers import update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets from add_existing_baseyear import add_build_year_to_new_assets
@ -49,7 +49,7 @@ def add_brownfield(n, n_p, year):
) )
] ]
threshold = snakemake.config["existing_capacities"]["threshold_capacity"] threshold = snakemake.params.threshold_capacity
if not chp_heat.empty: if not chp_heat.empty:
threshold_chp_heat = ( threshold_chp_heat = (
@ -87,7 +87,7 @@ def add_brownfield(n, n_p, year):
# deal with gas network # deal with gas network
pipe_carrier = ["gas pipeline"] pipe_carrier = ["gas pipeline"]
if snakemake.config["sector"]["H2_retrofit"]: if snakemake.params.H2_retrofit:
# drop capacities of previous year to avoid duplicating # drop capacities of previous year to avoid duplicating
to_drop = n.links.carrier.isin(pipe_carrier) & (n.links.build_year != year) to_drop = n.links.carrier.isin(pipe_carrier) & (n.links.build_year != year)
n.mremove("Link", n.links.loc[to_drop].index) n.mremove("Link", n.links.loc[to_drop].index)
@ -98,7 +98,7 @@ def add_brownfield(n, n_p, year):
& (n.links.build_year != year) & (n.links.build_year != year)
].index ].index
gas_pipes_i = n.links[n.links.carrier.isin(pipe_carrier)].index gas_pipes_i = n.links[n.links.carrier.isin(pipe_carrier)].index
CH4_per_H2 = 1 / snakemake.config["sector"]["H2_retrofit_capacity_per_CH4"] CH4_per_H2 = 1 / snakemake.params.H2_retrofit_capacity_per_CH4
fr = "H2 pipeline retrofitted" fr = "H2 pipeline retrofitted"
to = "gas pipeline" to = "gas pipeline"
# today's pipe capacity # today's pipe capacity
@ -148,12 +148,11 @@ if __name__ == "__main__":
year = int(snakemake.wildcards.planning_horizons) year = int(snakemake.wildcards.planning_horizons)
overrides = override_component_attrs(snakemake.input.overrides) n = pypsa.Network(snakemake.input.network)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
add_build_year_to_new_assets(n, year) add_build_year_to_new_assets(n, year)
n_p = pypsa.Network(snakemake.input.network_p, override_component_attrs=overrides) n_p = pypsa.Network(snakemake.input.network_p)
add_brownfield(n, n_p, year) add_brownfield(n, n_p, year)

View File

@ -85,16 +85,18 @@ It further adds extendable ``generators`` with **zero** capacity for
""" """
import logging import logging
from itertools import product
import geopandas as gpd import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import powerplantmatching as pm import powerplantmatching as pm
import pypsa import pypsa
import scipy.sparse as sparse
import xarray as xr import xarray as xr
from _helpers import configure_logging, update_p_nom_max from _helpers import configure_logging, update_p_nom_max
from powerplantmatching.export import map_country_bus from powerplantmatching.export import map_country_bus
from vresutils import transfer as vtransfer from shapely.prepared import prep
idx = pd.IndexSlice idx = pd.IndexSlice
@ -121,21 +123,71 @@ def calculate_annuity(n, r):
return 1 / n return 1 / n
def _add_missing_carriers_from_costs(n, costs, carriers): def add_missing_carriers(n, carriers):
missing_carriers = pd.Index(carriers).difference(n.carriers.index) """
if missing_carriers.empty: Function to add missing carriers to the network without raising errors.
return """
missing_carriers = set(carriers) - set(n.carriers.index)
if len(missing_carriers) > 0:
n.madd("Carrier", missing_carriers)
emissions_cols = (
costs.columns.to_series().loc[lambda s: s.str.endswith("_emissions")].values def sanitize_carriers(n, config):
"""
Sanitize the carrier information in a PyPSA Network object.
The function ensures that all unique carrier names are present in the network's
carriers attribute, and adds nice names and colors for each carrier according
to the provided configuration dictionary.
Parameters
----------
n : pypsa.Network
A PyPSA Network object that represents an electrical power system.
config : dict
A dictionary containing configuration information, specifically the
"plotting" key with "nice_names" and "tech_colors" keys for carriers.
Returns
-------
None
The function modifies the 'n' PyPSA Network object in-place, updating the
carriers attribute with nice names and colors.
Warnings
--------
Raises a warning if any carrier's "tech_colors" are not defined in the config dictionary.
"""
for c in n.iterate_components():
if "carrier" in c.df:
add_missing_carriers(n, c.df.carrier)
carrier_i = n.carriers.index
nice_names = (
pd.Series(config["plotting"]["nice_names"])
.reindex(carrier_i)
.fillna(carrier_i.to_series().str.title())
) )
suptechs = missing_carriers.str.split("-").str[0] n.carriers["nice_name"] = n.carriers.nice_name.where(
emissions = costs.loc[suptechs, emissions_cols].fillna(0.0) n.carriers.nice_name != "", nice_names
emissions.index = missing_carriers )
n.import_components_from_dataframe(emissions, "Carrier") colors = pd.Series(config["plotting"]["tech_colors"]).reindex(carrier_i)
if colors.isna().any():
missing_i = list(colors.index[colors.isna()])
logger.warning(f"tech_colors for carriers {missing_i} not defined in config.")
n.carriers["color"] = n.carriers.color.where(n.carriers.color != "", colors)
def load_costs(tech_costs, config, elec_config, Nyears=1.0): def add_co2_emissions(n, costs, carriers):
"""
Add CO2 emissions to the network's carriers attribute.
"""
suptechs = n.carriers.loc[carriers].index.str.split("-").str[0]
n.carriers.loc[carriers, "co2_emissions"] = costs.co2_emissions[suptechs].values
def load_costs(tech_costs, config, max_hours, Nyears=1.0):
# set all asset costs and other parameters # set all asset costs and other parameters
costs = pd.read_csv(tech_costs, index_col=[0, 1]).sort_index() costs = pd.read_csv(tech_costs, index_col=[0, 1]).sort_index()
@ -178,7 +230,6 @@ def load_costs(tech_costs, config, elec_config, Nyears=1.0):
dict(capital_cost=capital_cost, marginal_cost=0.0, co2_emissions=0.0) dict(capital_cost=capital_cost, marginal_cost=0.0, co2_emissions=0.0)
) )
max_hours = elec_config["max_hours"]
costs.loc["battery"] = costs_for_storage( costs.loc["battery"] = costs_for_storage(
costs.loc["battery storage"], costs.loc["battery storage"],
costs.loc["battery inverter"], costs.loc["battery inverter"],
@ -216,6 +267,21 @@ def load_powerplants(ppl_fn):
) )
def shapes_to_shapes(orig, dest):
"""
Adopted from vresutils.transfer.Shapes2Shapes()
"""
orig_prepped = list(map(prep, orig))
transfer = sparse.lil_matrix((len(dest), len(orig)), dtype=float)
for i, j in product(range(len(dest)), range(len(orig))):
if orig_prepped[j].intersects(dest[i]):
area = orig[j].intersection(dest[i]).area
transfer[i, j] = area / dest[i].area
return transfer
def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0): def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0):
substation_lv_i = n.buses.index[n.buses["substation_lv"]] substation_lv_i = n.buses.index[n.buses["substation_lv"]]
regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i) regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i)
@ -232,9 +298,7 @@ def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0):
return pd.DataFrame({group.index[0]: l}) return pd.DataFrame({group.index[0]: l})
else: else:
nuts3_cntry = nuts3.loc[nuts3.country == cntry] nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = vtransfer.Shapes2Shapes( transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
group, nuts3_cntry.geometry, normed=False
).T.tocsr()
gdp_n = pd.Series( gdp_n = pd.Series(
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
) )
@ -295,57 +359,56 @@ def update_transmission_costs(n, costs, length_factor=1.0):
def attach_wind_and_solar( def attach_wind_and_solar(
n, costs, input_profiles, technologies, extendable_carriers, line_length_factor=1 n, costs, input_profiles, carriers, extendable_carriers, line_length_factor=1
): ):
# TODO: rename tech -> carrier, technologies -> carriers add_missing_carriers(n, carriers)
_add_missing_carriers_from_costs(n, costs, technologies)
for tech in technologies: for car in carriers:
if tech == "hydro": if car == "hydro":
continue continue
with xr.open_dataset(getattr(input_profiles, "profile_" + tech)) as ds: with xr.open_dataset(getattr(input_profiles, "profile_" + car)) as ds:
if ds.indexes["bus"].empty: if ds.indexes["bus"].empty:
continue continue
suptech = tech.split("-", 2)[0] supcar = car.split("-", 2)[0]
if suptech == "offwind": if supcar == "offwind":
underwater_fraction = ds["underwater_fraction"].to_pandas() underwater_fraction = ds["underwater_fraction"].to_pandas()
connection_cost = ( connection_cost = (
line_length_factor line_length_factor
* ds["average_distance"].to_pandas() * ds["average_distance"].to_pandas()
* ( * (
underwater_fraction underwater_fraction
* costs.at[tech + "-connection-submarine", "capital_cost"] * costs.at[car + "-connection-submarine", "capital_cost"]
+ (1.0 - underwater_fraction) + (1.0 - underwater_fraction)
* costs.at[tech + "-connection-underground", "capital_cost"] * costs.at[car + "-connection-underground", "capital_cost"]
) )
) )
capital_cost = ( capital_cost = (
costs.at["offwind", "capital_cost"] costs.at["offwind", "capital_cost"]
+ costs.at[tech + "-station", "capital_cost"] + costs.at[car + "-station", "capital_cost"]
+ connection_cost + connection_cost
) )
logger.info( logger.info(
"Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format( "Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format(
connection_cost.min(), connection_cost.max(), tech connection_cost.min(), connection_cost.max(), car
) )
) )
else: else:
capital_cost = costs.at[tech, "capital_cost"] capital_cost = costs.at[car, "capital_cost"]
n.madd( n.madd(
"Generator", "Generator",
ds.indexes["bus"], ds.indexes["bus"],
" " + tech, " " + car,
bus=ds.indexes["bus"], bus=ds.indexes["bus"],
carrier=tech, carrier=car,
p_nom_extendable=tech in extendable_carriers["Generator"], p_nom_extendable=car in extendable_carriers["Generator"],
p_nom_max=ds["p_nom_max"].to_pandas(), p_nom_max=ds["p_nom_max"].to_pandas(),
weight=ds["weight"].to_pandas(), weight=ds["weight"].to_pandas(),
marginal_cost=costs.at[suptech, "marginal_cost"], marginal_cost=costs.at[supcar, "marginal_cost"],
capital_cost=capital_cost, capital_cost=capital_cost,
efficiency=costs.at[suptech, "efficiency"], efficiency=costs.at[supcar, "efficiency"],
p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(), p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(),
) )
@ -356,11 +419,19 @@ def attach_conventional_generators(
ppl, ppl,
conventional_carriers, conventional_carriers,
extendable_carriers, extendable_carriers,
conventional_config, conventional_params,
conventional_inputs, conventional_inputs,
): ):
carriers = set(conventional_carriers) | set(extendable_carriers["Generator"]) carriers = list(set(conventional_carriers) | set(extendable_carriers["Generator"]))
_add_missing_carriers_from_costs(n, costs, carriers) add_missing_carriers(n, carriers)
add_co2_emissions(n, costs, carriers)
# Replace carrier "natural gas" with the respective technology (OCGT or
# CCGT) to align with PyPSA names of "carriers" and avoid filtering "natural
# gas" powerplants in ppl.query("carrier in @carriers")
ppl.loc[ppl["carrier"] == "natural gas", "carrier"] = ppl.loc[
ppl["carrier"] == "natural gas", "technology"
]
ppl = ( ppl = (
ppl.query("carrier in @carriers") ppl.query("carrier in @carriers")
@ -393,17 +464,19 @@ def attach_conventional_generators(
lifetime=(ppl.dateout - ppl.datein).fillna(np.inf), lifetime=(ppl.dateout - ppl.datein).fillna(np.inf),
) )
for carrier in conventional_config: for carrier in conventional_params:
# Generators with technology affected # Generators with technology affected
idx = n.generators.query("carrier == @carrier").index idx = n.generators.query("carrier == @carrier").index
for attr in list(set(conventional_config[carrier]) & set(n.generators)): for attr in list(set(conventional_params[carrier]) & set(n.generators)):
values = conventional_config[carrier][attr] values = conventional_params[carrier][attr]
if f"conventional_{carrier}_{attr}" in conventional_inputs: if f"conventional_{carrier}_{attr}" in conventional_inputs:
# Values affecting generators of technology k country-specific # Values affecting generators of technology k country-specific
# First map generator buses to countries; then map countries to p_max_pu # First map generator buses to countries; then map countries to p_max_pu
values = pd.read_csv(values, index_col=0).iloc[:, 0] values = pd.read_csv(
snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0
).iloc[:, 0]
bus_values = n.buses.country.map(values) bus_values = n.buses.country.map(values)
n.generators[attr].update( n.generators[attr].update(
n.generators.loc[idx].bus.map(bus_values).dropna() n.generators.loc[idx].bus.map(bus_values).dropna()
@ -413,8 +486,9 @@ def attach_conventional_generators(
n.generators.loc[idx, attr] = values n.generators.loc[idx, attr] = values
def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **config): def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **params):
_add_missing_carriers_from_costs(n, costs, carriers) add_missing_carriers(n, carriers)
add_co2_emissions(n, costs, carriers)
ppl = ( ppl = (
ppl.query('carrier == "hydro"') ppl.query('carrier == "hydro"')
@ -468,9 +542,9 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **con
) )
if "PHS" in carriers and not phs.empty: if "PHS" in carriers and not phs.empty:
# fill missing max hours to config value and # fill missing max hours to params value and
# assume no natural inflow due to lack of data # assume no natural inflow due to lack of data
max_hours = config.get("PHS_max_hours", 6) max_hours = params.get("PHS_max_hours", 6)
phs = phs.replace({"max_hours": {0: max_hours}}) phs = phs.replace({"max_hours": {0: max_hours}})
n.madd( n.madd(
"StorageUnit", "StorageUnit",
@ -486,7 +560,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **con
) )
if "hydro" in carriers and not hydro.empty: if "hydro" in carriers and not hydro.empty:
hydro_max_hours = config.get("hydro_max_hours") hydro_max_hours = params.get("hydro_max_hours")
assert hydro_max_hours is not None, "No path for hydro capacities given." assert hydro_max_hours is not None, "No path for hydro capacities given."
@ -546,7 +620,8 @@ def attach_extendable_generators(n, costs, ppl, carriers):
logger.warning( logger.warning(
"The function `attach_extendable_generators` is deprecated in v0.5.0." "The function `attach_extendable_generators` is deprecated in v0.5.0."
) )
_add_missing_carriers_from_costs(n, costs, carriers) add_missing_carriers(n, carriers)
add_co2_emissions(n, costs, carriers)
for tech in carriers: for tech in carriers:
if tech.startswith("OCGT"): if tech.startswith("OCGT"):
@ -628,7 +703,7 @@ def attach_OPSD_renewables(n, tech_map):
buses = n.buses.loc[gens.bus.unique()] buses = n.buses.loc[gens.bus.unique()]
gens_per_bus = gens.groupby("bus").p_nom.count() gens_per_bus = gens.groupby("bus").p_nom.count()
caps = map_country_bus(df.query("Fueltype == @fueltype"), buses) caps = map_country_bus(df.query("Fueltype == @fueltype and lat == lat"), buses)
caps = caps.groupby(["bus"]).Capacity.sum() caps = caps.groupby(["bus"]).Capacity.sum()
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
@ -636,16 +711,7 @@ def attach_OPSD_renewables(n, tech_map):
n.generators.p_nom_min.update(gens.bus.map(caps).dropna()) n.generators.p_nom_min.update(gens.bus.map(caps).dropna())
def estimate_renewable_capacities(n, config): def estimate_renewable_capacities(n, year, tech_map, expansion_limit, countries):
year = config["electricity"]["estimate_renewable_capacities"]["year"]
tech_map = config["electricity"]["estimate_renewable_capacities"][
"technology_mapping"
]
countries = config["countries"]
expansion_limit = config["electricity"]["estimate_renewable_capacities"][
"expansion_limit"
]
if not len(countries) or not len(tech_map): if not len(countries) or not len(tech_map):
return return
@ -686,21 +752,6 @@ def estimate_renewable_capacities(n, config):
) )
def add_nice_carrier_names(n, config):
carrier_i = n.carriers.index
nice_names = (
pd.Series(config["plotting"]["nice_names"])
.reindex(carrier_i)
.fillna(carrier_i.to_series().str.title())
)
n.carriers["nice_name"] = nice_names
colors = pd.Series(config["plotting"]["tech_colors"]).reindex(carrier_i)
if colors.isna().any():
missing_i = list(colors.index[colors.isna()])
logger.warning(f"tech_colors for carriers {missing_i} not defined in config.")
n.carriers["color"] = colors
def drop_leap_day(n): def drop_leap_day(n):
if not n.snapshots.is_leap_year.any(): if not n.snapshots.is_leap_year.any():
return return
@ -717,6 +768,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake("add_electricity", weather_year="") snakemake = mock_snakemake("add_electricity", weather_year="")
configure_logging(snakemake) configure_logging(snakemake)
params = snakemake.params
n = pypsa.Network(snakemake.input.base_network) n = pypsa.Network(snakemake.input.base_network)
weather_year = snakemake.wildcards.weather_year weather_year = snakemake.wildcards.weather_year
@ -732,43 +785,26 @@ if __name__ == "__main__":
costs = load_costs( costs = load_costs(
snakemake.input.tech_costs, snakemake.input.tech_costs,
snakemake.config["costs"], params.costs,
snakemake.config["electricity"], params.electricity["max_hours"],
Nyears, Nyears,
) )
ppl = load_powerplants(snakemake.input.powerplants) ppl = load_powerplants(snakemake.input.powerplants)
if "renewable_carriers" in snakemake.config["electricity"]:
renewable_carriers = set(snakemake.config["electricity"]["renewable_carriers"])
else:
logger.warning(
"Missing key `renewable_carriers` under config entry `electricity`. "
"In future versions, this will raise an error. "
"Falling back to carriers listed under `renewable`."
)
renewable_carriers = snakemake.config["renewable"]
extendable_carriers = snakemake.config["electricity"]["extendable_carriers"]
if not (set(renewable_carriers) & set(extendable_carriers["Generator"])):
logger.warning(
"No renewables found in config entry `extendable_carriers`. "
"In future versions, these have to be explicitly listed. "
"Falling back to all renewables."
)
conventional_carriers = snakemake.config["electricity"]["conventional_carriers"]
attach_load( attach_load(
n, n,
snakemake.input.regions, snakemake.input.regions,
snakemake.input.load, snakemake.input.load,
snakemake.input.nuts3_shapes, snakemake.input.nuts3_shapes,
snakemake.config["countries"], params.countries,
snakemake.config["load"]["scaling_factor"], params.scaling_factor,
) )
update_transmission_costs(n, costs, snakemake.config["lines"]["length_factor"]) update_transmission_costs(n, costs, params.length_factor)
renewable_carriers = set(params.electricity["renewable_carriers"])
extendable_carriers = params.electricity["extendable_carriers"]
conventional_carriers = params.electricity["conventional_carriers"]
conventional_inputs = { conventional_inputs = {
k: v for k, v in snakemake.input.items() if k.startswith("conventional_") k: v for k, v in snakemake.input.items() if k.startswith("conventional_")
} }
@ -778,7 +814,7 @@ if __name__ == "__main__":
ppl, ppl,
conventional_carriers, conventional_carriers,
extendable_carriers, extendable_carriers,
snakemake.config.get("conventional", {}), params.conventional,
conventional_inputs, conventional_inputs,
) )
@ -788,71 +824,36 @@ if __name__ == "__main__":
snakemake.input, snakemake.input,
renewable_carriers, renewable_carriers,
extendable_carriers, extendable_carriers,
snakemake.config["lines"]["length_factor"], params.length_factor,
) )
if "hydro" in renewable_carriers: if "hydro" in renewable_carriers:
conf = snakemake.config["renewable"]["hydro"] para = params.renewable["hydro"]
attach_hydro( attach_hydro(
n, n,
costs, costs,
ppl, ppl,
snakemake.input.profile_hydro, snakemake.input.profile_hydro,
snakemake.input.hydro_capacities, snakemake.input.hydro_capacities,
conf.pop("carriers", []), para.pop("carriers", []),
**conf, **para,
) )
if "estimate_renewable_capacities" not in snakemake.config["electricity"]: estimate_renewable_caps = params.electricity["estimate_renewable_capacities"]
logger.warning(
"Missing key `estimate_renewable_capacities` under config entry `electricity`. "
"In future versions, this will raise an error. "
"Falling back to whether ``estimate_renewable_capacities_from_capacity_stats`` is in the config."
)
if (
"estimate_renewable_capacities_from_capacity_stats"
in snakemake.config["electricity"]
):
estimate_renewable_caps = {
"enable": True,
**snakemake.config["electricity"][
"estimate_renewable_capacities_from_capacity_stats"
],
}
else:
estimate_renewable_caps = {"enable": False}
else:
estimate_renewable_caps = snakemake.config["electricity"][
"estimate_renewable_capacities"
]
if "enable" not in estimate_renewable_caps:
logger.warning(
"Missing key `enable` under config entry `estimate_renewable_capacities`. "
"In future versions, this will raise an error. Falling back to False."
)
estimate_renewable_caps = {"enable": False}
if "from_opsd" not in estimate_renewable_caps:
logger.warning(
"Missing key `from_opsd` under config entry `estimate_renewable_capacities`. "
"In future versions, this will raise an error. "
"Falling back to whether `renewable_capacities_from_opsd` is non-empty."
)
from_opsd = bool(
snakemake.config["electricity"].get("renewable_capacities_from_opsd", False)
)
estimate_renewable_caps["from_opsd"] = from_opsd
if estimate_renewable_caps["enable"]: if estimate_renewable_caps["enable"]:
tech_map = estimate_renewable_caps["technology_mapping"]
expansion_limit = estimate_renewable_caps["expansion_limit"]
year = estimate_renewable_caps["year"]
if estimate_renewable_caps["from_opsd"]: if estimate_renewable_caps["from_opsd"]:
tech_map = snakemake.config["electricity"]["estimate_renewable_capacities"][
"technology_mapping"
]
attach_OPSD_renewables(n, tech_map) attach_OPSD_renewables(n, tech_map)
estimate_renewable_capacities(n, snakemake.config) estimate_renewable_capacities(
n, year, tech_map, expansion_limit, params.countries
)
update_p_nom_max(n) update_p_nom_max(n)
add_nice_carrier_names(n, snakemake.config) sanitize_carriers(n, snakemake.config)
if snakemake.config["enable"].get("drop_leap_days", True): if snakemake.config["enable"].get("drop_leap_days", True):
drop_leap_day(n) drop_leap_day(n)

View File

@ -21,7 +21,8 @@ import country_converter as coco
import numpy as np import numpy as np
import pypsa import pypsa
import xarray as xr import xarray as xr
from _helpers import override_component_attrs, update_config_with_sector_opts from _helpers import update_config_with_sector_opts
from add_electricity import sanitize_carriers
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
cc = coco.CountryConverter() cc = coco.CountryConverter()
@ -128,10 +129,14 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
"Oil": "oil", "Oil": "oil",
"OCGT": "OCGT", "OCGT": "OCGT",
"CCGT": "CCGT", "CCGT": "CCGT",
"Natural Gas": "gas",
"Bioenergy": "urban central solid biomass CHP", "Bioenergy": "urban central solid biomass CHP",
} }
# Replace Fueltype "Natural Gas" with the respective technology (OCGT or CCGT)
df_agg.loc[df_agg["Fueltype"] == "Natural Gas", "Fueltype"] = df_agg.loc[
df_agg["Fueltype"] == "Natural Gas", "Technology"
]
fueltype_to_drop = [ fueltype_to_drop = [
"Hydro", "Hydro",
"Wind", "Wind",
@ -157,7 +162,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
# Fill missing DateOut # Fill missing DateOut
dateout = ( dateout = (
df_agg.loc[biomass_i, "DateIn"] df_agg.loc[biomass_i, "DateIn"]
+ snakemake.config["costs"]["fill_values"]["lifetime"] + snakemake.params.costs["fill_values"]["lifetime"]
) )
df_agg.loc[biomass_i, "DateOut"] = df_agg.loc[biomass_i, "DateOut"].fillna(dateout) df_agg.loc[biomass_i, "DateOut"] = df_agg.loc[biomass_i, "DateOut"].fillna(dateout)
@ -218,7 +223,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
capacity = df.loc[grouping_year, generator] capacity = df.loc[grouping_year, generator]
capacity = capacity[~capacity.isna()] capacity = capacity[~capacity.isna()]
capacity = capacity[ capacity = capacity[
capacity > snakemake.config["existing_capacities"]["threshold_capacity"] capacity > snakemake.params.existing_capacities["threshold_capacity"]
] ]
suffix = "-ac" if generator == "offwind" else "" suffix = "-ac" if generator == "offwind" else ""
name_suffix = f" {generator}{suffix}-{grouping_year}" name_suffix = f" {generator}{suffix}-{grouping_year}"
@ -582,7 +587,7 @@ def add_heating_capacities_installed_before_baseyear(
) )
# delete links with capacities below threshold # delete links with capacities below threshold
threshold = snakemake.config["existing_capacities"]["threshold_capacity"] threshold = snakemake.params.existing_capacities["threshold_capacity"]
n.mremove( n.mremove(
"Link", "Link",
[ [
@ -601,25 +606,26 @@ if __name__ == "__main__":
snakemake = mock_snakemake( snakemake = mock_snakemake(
"add_existing_baseyear", "add_existing_baseyear",
weather_year="", weather_year="",
configfiles="config/test/config.myopic.yaml",
simpl="", simpl="",
clusters="45", clusters="5",
ll="v1.0", ll="v1.5",
opts="", opts="",
sector_opts="8760H-T-H-B-I-A-solar+p3-dist1", sector_opts="24H-T-H-B-I-A-solar+p3-dist1",
planning_horizons=2020, planning_horizons=2030,
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
options = snakemake.config["sector"] options = snakemake.params.sector
opts = snakemake.wildcards.sector_opts.split("-") opts = snakemake.wildcards.sector_opts.split("-")
baseyear = snakemake.config["scenario"]["planning_horizons"][0] baseyear = snakemake.params.baseyear
n = pypsa.Network(snakemake.input.network)
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
# define spatial resolution of carriers # define spatial resolution of carriers
spatial = define_spatial(n.buses[n.buses.carrier == "AC"].index, options) spatial = define_spatial(n.buses[n.buses.carrier == "AC"].index, options)
add_build_year_to_new_assets(n, baseyear) add_build_year_to_new_assets(n, baseyear)
@ -627,14 +633,12 @@ if __name__ == "__main__":
Nyears = n.snapshot_weightings.generators.sum() / 8760.0 Nyears = n.snapshot_weightings.generators.sum() / 8760.0
costs = prepare_costs( costs = prepare_costs(
snakemake.input.costs, snakemake.input.costs,
snakemake.config["costs"], snakemake.params.costs,
Nyears, Nyears,
) )
grouping_years_power = snakemake.config["existing_capacities"][ grouping_years_power = snakemake.params.existing_capacities["grouping_years_power"]
"grouping_years_power" grouping_years_heat = snakemake.params.existing_capacities["grouping_years_heat"]
]
grouping_years_heat = snakemake.config["existing_capacities"]["grouping_years_heat"]
add_power_capacities_installed_before_baseyear( add_power_capacities_installed_before_baseyear(
n, grouping_years_power, costs, baseyear n, grouping_years_power, costs, baseyear
) )
@ -651,7 +655,7 @@ if __name__ == "__main__":
.to_pandas() .to_pandas()
.reindex(index=n.snapshots) .reindex(index=n.snapshots)
) )
default_lifetime = snakemake.config["costs"]["fill_values"]["lifetime"] default_lifetime = snakemake.params.costs["fill_values"]["lifetime"]
add_heating_capacities_installed_before_baseyear( add_heating_capacities_installed_before_baseyear(
n, n,
baseyear, baseyear,
@ -668,4 +672,6 @@ if __name__ == "__main__":
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
sanitize_carriers(n, snakemake.config)
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -56,22 +56,17 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import configure_logging from _helpers import configure_logging
from add_electricity import ( from add_electricity import load_costs, sanitize_carriers
_add_missing_carriers_from_costs,
add_nice_carrier_names,
load_costs,
)
idx = pd.IndexSlice idx = pd.IndexSlice
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def attach_storageunits(n, costs, elec_opts): def attach_storageunits(n, costs, extendable_carriers, max_hours):
carriers = elec_opts["extendable_carriers"]["StorageUnit"] carriers = extendable_carriers["StorageUnit"]
max_hours = elec_opts["max_hours"]
_add_missing_carriers_from_costs(n, costs, carriers) n.madd("Carrier", carriers)
buses_i = n.buses.index buses_i = n.buses.index
@ -99,10 +94,10 @@ def attach_storageunits(n, costs, elec_opts):
) )
def attach_stores(n, costs, elec_opts): def attach_stores(n, costs, extendable_carriers):
carriers = elec_opts["extendable_carriers"]["Store"] carriers = extendable_carriers["Store"]
_add_missing_carriers_from_costs(n, costs, carriers) n.madd("Carrier", carriers)
buses_i = n.buses.index buses_i = n.buses.index
bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]}
@ -162,6 +157,8 @@ def attach_stores(n, costs, elec_opts):
marginal_cost=costs.at["battery", "marginal_cost"], marginal_cost=costs.at["battery", "marginal_cost"],
) )
n.madd("Carrier", ["battery charger", "battery discharger"])
n.madd( n.madd(
"Link", "Link",
b_buses_i + " charger", b_buses_i + " charger",
@ -187,11 +184,10 @@ def attach_stores(n, costs, elec_opts):
) )
def attach_hydrogen_pipelines(n, costs, elec_opts): def attach_hydrogen_pipelines(n, costs, extendable_carriers):
ext_carriers = elec_opts["extendable_carriers"] as_stores = extendable_carriers.get("Store", [])
as_stores = ext_carriers.get("Store", [])
if "H2 pipeline" not in ext_carriers.get("Link", []): if "H2 pipeline" not in extendable_carriers.get("Link", []):
return return
assert "H2" in as_stores, ( assert "H2" in as_stores, (
@ -213,6 +209,8 @@ def attach_hydrogen_pipelines(n, costs, elec_opts):
h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1) h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1)
# add pipelines # add pipelines
n.add("Carrier", "H2 pipeline")
n.madd( n.madd(
"Link", "Link",
h2_links.index, h2_links.index,
@ -237,18 +235,19 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
elec_config = snakemake.config["electricity"] extendable_carriers = snakemake.params.extendable_carriers
max_hours = snakemake.params.max_hours
Nyears = n.snapshot_weightings.objective.sum() / 8760.0 Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs( costs = load_costs(
snakemake.input.tech_costs, snakemake.config["costs"], elec_config, Nyears snakemake.input.tech_costs, snakemake.params.costs, max_hours, Nyears
) )
attach_storageunits(n, costs, elec_config) attach_storageunits(n, costs, extendable_carriers, max_hours)
attach_stores(n, costs, elec_config) attach_stores(n, costs, extendable_carriers)
attach_hydrogen_pipelines(n, costs, elec_config) attach_hydrogen_pipelines(n, costs, extendable_carriers)
add_nice_carrier_names(n, snakemake.config) sanitize_carriers(n, snakemake.config)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -712,6 +712,7 @@ def base_network(
n.name = "PyPSA-Eur" n.name = "PyPSA-Eur"
n.set_snapshots(pd.date_range(freq="h", **config["snapshots"])) n.set_snapshots(pd.date_range(freq="h", **config["snapshots"]))
n.madd("Carrier", ["AC", "DC"])
n.import_components_from_dataframe(buses, "Bus") n.import_components_from_dataframe(buses, "Bus")
n.import_components_from_dataframe(lines, "Line") n.import_components_from_dataframe(lines, "Line")

View File

@ -30,7 +30,7 @@ if __name__ == "__main__":
ammonia.index = cc.convert(ammonia.index, to="iso2") ammonia.index = cc.convert(ammonia.index, to="iso2")
years = [str(i) for i in range(2013, 2018)] years = [str(i) for i in range(2013, 2018)]
countries = ammonia.index.intersection(snakemake.config["countries"]) countries = ammonia.index.intersection(snakemake.params.countries)
ammonia = ammonia.loc[countries, years].astype(float) ammonia = ammonia.loc[countries, years].astype(float)
# convert from ktonN to ktonNH3 # convert from ktonN to ktonNH3

View File

@ -212,9 +212,9 @@ if __name__ == "__main__":
"build_biomass_potentials", weather_year="", simpl="", clusters="5" "build_biomass_potentials", weather_year="", simpl="", clusters="5"
) )
config = snakemake.config["biomass"] params = snakemake.params.biomass
year = config["year"] year = params["year"]
scenario = config["scenario"] scenario = params["scenario"]
enspreso = enspreso_biomass_potentials(year, scenario) enspreso = enspreso_biomass_potentials(year, scenario)
@ -230,7 +230,7 @@ if __name__ == "__main__":
df.to_csv(snakemake.output.biomass_potentials_all) df.to_csv(snakemake.output.biomass_potentials_all)
grouper = {v: k for k, vv in config["classes"].items() for v in vv} grouper = {v: k for k, vv in params["classes"].items() for v in vv}
df = df.groupby(grouper, axis=1).sum() df = df.groupby(grouper, axis=1).sum()
df *= 1e6 # TWh/a to MWh/a df *= 1e6 # TWh/a to MWh/a

View File

@ -116,7 +116,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_bus_regions") snakemake = mock_snakemake("build_bus_regions")
configure_logging(snakemake) configure_logging(snakemake)
countries = snakemake.config["countries"] countries = snakemake.params.countries
n = pypsa.Network(snakemake.input.base_network) n = pypsa.Network(snakemake.input.base_network)

View File

@ -40,7 +40,7 @@ if __name__ == "__main__":
for source in ["air", "soil"]: for source in ["air", "soil"]:
source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"]) source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"])
delta_T = snakemake.config["sector"]["heat_pump_sink_T"] - source_T delta_T = snakemake.params.heat_pump_sink_T - source_T
cop = coefficient_of_performance(delta_T, source) cop = coefficient_of_performance(delta_T, source)

View File

@ -106,14 +106,14 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5") snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5")
configure_logging(snakemake) configure_logging(snakemake)
cutout_params = snakemake.config["atlite"]["cutouts"][snakemake.wildcards.cutout] cutout_params = snakemake.params.cutouts[snakemake.wildcards.cutout]
if hasattr(snakemake.wildcards, "weather_year"): if hasattr(snakemake.wildcards, "weather_year"):
time = snakemake.wildcards.weather_year time = snakemake.wildcards.weather_year
cutout_params["time"] = [time, time] cutout_params["time"] = [time, time]
if "time" not in cutout_params: if "time" not in cutout_params:
snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"]) snapshots = pd.date_range(freq="h", **snakemake.params.snapshots)
cutout_params["time"] = [snapshots[0], snapshots[-1]] cutout_params["time"] = [snapshots[0], snapshots[-1]]
cutout_params["time"] = slice(*cutout_params["time"]) cutout_params["time"] = slice(*cutout_params["time"])

View File

@ -285,7 +285,7 @@ if __name__ == "__main__":
start=weather_year, end=str(int(weather_year) + 1), inclusive="left" start=weather_year, end=str(int(weather_year) + 1), inclusive="left"
) )
else: else:
snapshots = snakemake.config["snapshots"] snapshots = snakemake.params.snapshots
snapshots = pd.date_range(freq="h", **snapshots) snapshots = pd.date_range(freq="h", **snapshots)
fixed_year = snakemake.config["load"].get("fixed_year", False) fixed_year = snakemake.config["load"].get("fixed_year", False)
@ -295,16 +295,17 @@ if __name__ == "__main__":
else slice(snapshots[0], snapshots[-1]) else slice(snapshots[0], snapshots[-1])
) )
powerstatistics = snakemake.config["load"]["power_statistics"] powerstatistics = snakemake.params.load["power_statistics"]
interpolate_limit = snakemake.config["load"]["interpolate_limit"] interpolate_limit = snakemake.params.load["interpolate_limit"]
countries = snakemake.config["countries"] countries = snakemake.params.countries
snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"]) snapshots = pd.date_range(freq="h", **snakemake.params.snapshots)
years = slice(snapshots[0], snapshots[-1]) years = slice(snapshots[0], snapshots[-1])
time_shift = snakemake.config["load"]["time_shift_for_large_gaps"] time_shift = snakemake.params.load["time_shift_for_large_gaps"]
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics) load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
if snakemake.config["load"]["manual_adjustments"]: if snakemake.params.load["manual_adjustments"]:
load = manual_adjustment(load, snakemake.input[0], powerstatistics) load = manual_adjustment(load, snakemake.input[0], powerstatistics)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.") logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")

View File

@ -736,23 +736,26 @@ if __name__ == "__main__":
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])
config = snakemake.config["energy"] params = snakemake.params.energy
data_year = int(config["energy_totals_year"])
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index") nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
population = nuts3["pop"].groupby(nuts3.country).sum() population = nuts3["pop"].groupby(nuts3.country).sum()
countries = snakemake.config["countries"] countries = snakemake.params.countries
idees_countries = pd.Index(countries).intersection(eu28) idees_countries = pd.Index(countries).intersection(eu28)
eurostat = build_eurostat(countries.difference(['CH'])) data_year = params["energy_totals_year"]
swiss = build_swiss() report_year = snakemake.params.energy["eurostat_report_year"]
idees = build_idees(idees_countries) input_eurostat = snakemake.input.eurostat
eurostat = build_eurostat(input_eurostat, countries, report_year, data_year)
swiss = build_swiss(data_year)
idees = build_idees(idees_countries, data_year)
energy = build_energy_totals(countries, eurostat, swiss, idees) energy = build_energy_totals(countries, eurostat, swiss, idees)
energy.to_csv(snakemake.output.energy_name) energy.to_csv(snakemake.output.energy_name)
base_year_emissions = config["base_emissions_year"] base_year_emissions = params["base_emissions_year"]
emissions_scope = snakemake.params.energy["emissions"]
eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope)
eurostat_co2 = build_eurostat_co2(countries, eurostat, base_year_emissions) eurostat_co2 = build_eurostat_co2(countries, eurostat, base_year_emissions)

View File

@ -35,7 +35,7 @@ if __name__ == "__main__":
snapshots = dict(start=year, end=str(int(year) + 1), inclusive="left") snapshots = dict(start=year, end=str(int(year) + 1), inclusive="left")
cutout_name = cutout_name.format(weather_year=year) cutout_name = cutout_name.format(weather_year=year)
else: else:
snapshots = snakemake.config["snapshots"] snapshots = snakemake.params.snapshots
drop_leap_day = snakemake.config["atlite"].get("drop_leap_day", False) drop_leap_day = snakemake.config["atlite"].get("drop_leap_day", False)
time = pd.date_range(freq="h", **snapshots) time = pd.date_range(freq="h", **snapshots)

View File

@ -169,10 +169,10 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_hydro_profile", weather_year="") snakemake = mock_snakemake("build_hydro_profile", weather_year="")
configure_logging(snakemake) configure_logging(snakemake)
config_hydro = snakemake.config["renewable"]["hydro"] params_hydro = snakemake.params.hydro
cutout = atlite.Cutout(snakemake.input.cutout) cutout = atlite.Cutout(snakemake.input.cutout)
countries = snakemake.config["countries"] countries = snakemake.params.countries
country_shapes = ( country_shapes = (
gpd.read_file(snakemake.input.country_shapes) gpd.read_file(snakemake.input.country_shapes)
.set_index("name")["geometry"] .set_index("name")["geometry"]
@ -207,7 +207,7 @@ if __name__ == "__main__":
normalize_using_yearly=eia_stats, normalize_using_yearly=eia_stats,
) )
if "clip_min_inflow" in config_hydro: if "clip_min_inflow" in params_hydro:
inflow = inflow.where(inflow > config_hydro["clip_min_inflow"], 0) inflow = inflow.where(inflow > params_hydro["clip_min_inflow"], 0)
inflow.to_netcdf(snakemake.output.profile) inflow.to_netcdf(snakemake.output.profile)

View File

@ -73,7 +73,7 @@ def prepare_hotmaps_database(regions):
df[["srid", "coordinates"]] = df.geom.str.split(";", expand=True) df[["srid", "coordinates"]] = df.geom.str.split(";", expand=True)
if snakemake.config["industry"].get("hotmaps_locate_missing", False): if snakemake.params.hotmaps_locate_missing:
df = locate_missing_industrial_sites(df) df = locate_missing_industrial_sites(df)
# remove those sites without valid locations # remove those sites without valid locations
@ -144,7 +144,7 @@ if __name__ == "__main__":
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])
countries = snakemake.config["countries"] countries = snakemake.params.countries
regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name") regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name")

View File

@ -101,8 +101,8 @@ def add_ammonia_energy_demand(demand):
def get_ammonia_by_fuel(x): def get_ammonia_by_fuel(x):
fuels = { fuels = {
"gas": config["MWh_CH4_per_tNH3_SMR"], "gas": params["MWh_CH4_per_tNH3_SMR"],
"electricity": config["MWh_elec_per_tNH3_SMR"], "electricity": params["MWh_elec_per_tNH3_SMR"],
} }
return pd.Series({k: x * v for k, v in fuels.items()}) return pd.Series({k: x * v for k, v in fuels.items()})
@ -112,7 +112,7 @@ def add_ammonia_energy_demand(demand):
index=demand.index, fill_value=0.0 index=demand.index, fill_value=0.0
) )
ammonia = pd.DataFrame({"ammonia": ammonia * config["MWh_NH3_per_tNH3"]}).T ammonia = pd.DataFrame({"ammonia": ammonia * params["MWh_NH3_per_tNH3"]}).T
demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0) demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0)
@ -178,9 +178,9 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today") snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today")
config = snakemake.config["industry"] params = snakemake.params.industry
year = config.get("reference_year", 2015) year = params.get("reference_year", 2015)
countries = pd.Index(snakemake.config["countries"]) countries = pd.Index(snakemake.params.countries)
demand = industrial_energy_demand(countries.intersection(eu28), year) demand = industrial_energy_demand(countries.intersection(eu28), year)

View File

@ -264,9 +264,9 @@ def separate_basic_chemicals(demand, year):
# assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals # assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals
distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum() distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum()
demand["HVC"] = config["HVC_production_today"] * 1e3 * distribution_key demand["HVC"] = params["HVC_production_today"] * 1e3 * distribution_key
demand["Chlorine"] = config["chlorine_production_today"] * 1e3 * distribution_key demand["Chlorine"] = params["chlorine_production_today"] * 1e3 * distribution_key
demand["Methanol"] = config["methanol_production_today"] * 1e3 * distribution_key demand["Methanol"] = params["methanol_production_today"] * 1e3 * distribution_key
demand.drop(columns=["Basic chemicals"], inplace=True) demand.drop(columns=["Basic chemicals"], inplace=True)
@ -279,11 +279,11 @@ if __name__ == "__main__":
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])
countries = snakemake.config["countries"] countries = snakemake.params.countries
year = snakemake.config["industry"]["reference_year"] year = snakemake.params.industry["reference_year"]
config = snakemake.config["industry"] params = snakemake.params.industry
jrc_dir = snakemake.input.jrc jrc_dir = snakemake.input.jrc
eurostat_dir = snakemake.input.eurostat eurostat_dir = snakemake.input.eurostat

View File

@ -15,7 +15,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow") snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow")
config = snakemake.config["industry"] params = snakemake.params.industry
investment_year = int(snakemake.wildcards.planning_horizons) investment_year = int(snakemake.wildcards.planning_horizons)
@ -25,8 +25,8 @@ if __name__ == "__main__":
keys = ["Integrated steelworks", "Electric arc"] keys = ["Integrated steelworks", "Electric arc"]
total_steel = production[keys].sum(axis=1) total_steel = production[keys].sum(axis=1)
st_primary_fraction = get(config["St_primary_fraction"], investment_year) st_primary_fraction = get(params["St_primary_fraction"], investment_year)
dri_fraction = get(config["DRI_fraction"], investment_year) dri_fraction = get(params["DRI_fraction"], investment_year)
int_steel = production["Integrated steelworks"].sum() int_steel = production["Integrated steelworks"].sum()
fraction_persistent_primary = st_primary_fraction * total_steel.sum() / int_steel fraction_persistent_primary = st_primary_fraction * total_steel.sum() / int_steel
@ -51,7 +51,7 @@ if __name__ == "__main__":
key_pri = "Aluminium - primary production" key_pri = "Aluminium - primary production"
key_sec = "Aluminium - secondary production" key_sec = "Aluminium - secondary production"
al_primary_fraction = get(config["Al_primary_fraction"], investment_year) al_primary_fraction = get(params["Al_primary_fraction"], investment_year)
fraction_persistent_primary = ( fraction_persistent_primary = (
al_primary_fraction * total_aluminium.sum() / production[key_pri].sum() al_primary_fraction * total_aluminium.sum() / production[key_pri].sum()
) )
@ -60,15 +60,15 @@ if __name__ == "__main__":
production[key_sec] = total_aluminium - production[key_pri] production[key_sec] = total_aluminium - production[key_pri]
production["HVC (mechanical recycling)"] = ( production["HVC (mechanical recycling)"] = (
get(config["HVC_mechanical_recycling_fraction"], investment_year) get(params["HVC_mechanical_recycling_fraction"], investment_year)
* production["HVC"] * production["HVC"]
) )
production["HVC (chemical recycling)"] = ( production["HVC (chemical recycling)"] = (
get(config["HVC_chemical_recycling_fraction"], investment_year) get(params["HVC_chemical_recycling_fraction"], investment_year)
* production["HVC"] * production["HVC"]
) )
production["HVC"] *= get(config["HVC_primary_fraction"], investment_year) production["HVC"] *= get(params["HVC_primary_fraction"], investment_year)
fn = snakemake.output.industrial_production_per_country_tomorrow fn = snakemake.output.industrial_production_per_country_tomorrow
production.to_csv(fn, float_format="%.2f") production.to_csv(fn, float_format="%.2f")

View File

@ -185,10 +185,10 @@ def iron_and_steel():
df[sector] = df["Electric arc"] df[sector] = df["Electric arc"]
# add H2 consumption for DRI at 1.7 MWh H2 /ton steel # add H2 consumption for DRI at 1.7 MWh H2 /ton steel
df.at["hydrogen", sector] = config["H2_DRI"] df.at["hydrogen", sector] = params["H2_DRI"]
# add electricity consumption in DRI shaft (0.322 MWh/tSl) # add electricity consumption in DRI shaft (0.322 MWh/tSl)
df.at["elec", sector] += config["elec_DRI"] df.at["elec", sector] += params["elec_DRI"]
## Integrated steelworks ## Integrated steelworks
# could be used in combination with CCS) # could be used in combination with CCS)
@ -383,19 +383,19 @@ def chemicals_industry():
assert s_emi.index[0] == sector assert s_emi.index[0] == sector
# convert from MtHVC/a to ktHVC/a # convert from MtHVC/a to ktHVC/a
s_out = config["HVC_production_today"] * 1e3 s_out = params["HVC_production_today"] * 1e3
# tCO2/t material # tCO2/t material
df.loc["process emission", sector] += ( df.loc["process emission", sector] += (
s_emi["Process emissions"] s_emi["Process emissions"]
- config["petrochemical_process_emissions"] * 1e3 - params["petrochemical_process_emissions"] * 1e3
- config["NH3_process_emissions"] * 1e3 - params["NH3_process_emissions"] * 1e3
) / s_out ) / s_out
# emissions originating from feedstock, could be non-fossil origin # emissions originating from feedstock, could be non-fossil origin
# tCO2/t material # tCO2/t material
df.loc["process emission from feedstock", sector] += ( df.loc["process emission from feedstock", sector] += (
config["petrochemical_process_emissions"] * 1e3 params["petrochemical_process_emissions"] * 1e3
) / s_out ) / s_out
# convert from ktoe/a to GWh/a # convert from ktoe/a to GWh/a
@ -405,18 +405,18 @@ def chemicals_industry():
# subtract ammonia energy demand (in ktNH3/a) # subtract ammonia energy demand (in ktNH3/a)
ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0) ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0)
ammonia_total = ammonia.loc[ammonia.index.intersection(eu28), str(year)].sum() ammonia_total = ammonia.loc[ammonia.index.intersection(eu28), str(year)].sum()
df.loc["methane", sector] -= ammonia_total * config["MWh_CH4_per_tNH3_SMR"] df.loc["methane", sector] -= ammonia_total * params["MWh_CH4_per_tNH3_SMR"]
df.loc["elec", sector] -= ammonia_total * config["MWh_elec_per_tNH3_SMR"] df.loc["elec", sector] -= ammonia_total * params["MWh_elec_per_tNH3_SMR"]
# subtract chlorine demand # subtract chlorine demand
chlorine_total = config["chlorine_production_today"] chlorine_total = params["chlorine_production_today"]
df.loc["hydrogen", sector] -= chlorine_total * config["MWh_H2_per_tCl"] df.loc["hydrogen", sector] -= chlorine_total * params["MWh_H2_per_tCl"]
df.loc["elec", sector] -= chlorine_total * config["MWh_elec_per_tCl"] df.loc["elec", sector] -= chlorine_total * params["MWh_elec_per_tCl"]
# subtract methanol demand # subtract methanol demand
methanol_total = config["methanol_production_today"] methanol_total = params["methanol_production_today"]
df.loc["methane", sector] -= methanol_total * config["MWh_CH4_per_tMeOH"] df.loc["methane", sector] -= methanol_total * params["MWh_CH4_per_tMeOH"]
df.loc["elec", sector] -= methanol_total * config["MWh_elec_per_tMeOH"] df.loc["elec", sector] -= methanol_total * params["MWh_elec_per_tMeOH"]
# MWh/t material # MWh/t material
df.loc[sources, sector] = df.loc[sources, sector] / s_out df.loc[sources, sector] = df.loc[sources, sector] / s_out
@ -427,37 +427,37 @@ def chemicals_industry():
sector = "HVC (mechanical recycling)" sector = "HVC (mechanical recycling)"
df[sector] = 0.0 df[sector] = 0.0
df.loc["elec", sector] = config["MWh_elec_per_tHVC_mechanical_recycling"] df.loc["elec", sector] = params["MWh_elec_per_tHVC_mechanical_recycling"]
# HVC chemical recycling # HVC chemical recycling
sector = "HVC (chemical recycling)" sector = "HVC (chemical recycling)"
df[sector] = 0.0 df[sector] = 0.0
df.loc["elec", sector] = config["MWh_elec_per_tHVC_chemical_recycling"] df.loc["elec", sector] = params["MWh_elec_per_tHVC_chemical_recycling"]
# Ammonia # Ammonia
sector = "Ammonia" sector = "Ammonia"
df[sector] = 0.0 df[sector] = 0.0
if snakemake.config["sector"].get("ammonia", False): if snakemake.params.ammonia:
df.loc["ammonia", sector] = config["MWh_NH3_per_tNH3"] df.loc["ammonia", sector] = params["MWh_NH3_per_tNH3"]
else: else:
df.loc["hydrogen", sector] = config["MWh_H2_per_tNH3_electrolysis"] df.loc["hydrogen", sector] = params["MWh_H2_per_tNH3_electrolysis"]
df.loc["elec", sector] = config["MWh_elec_per_tNH3_electrolysis"] df.loc["elec", sector] = params["MWh_elec_per_tNH3_electrolysis"]
# Chlorine # Chlorine
sector = "Chlorine" sector = "Chlorine"
df[sector] = 0.0 df[sector] = 0.0
df.loc["hydrogen", sector] = config["MWh_H2_per_tCl"] df.loc["hydrogen", sector] = params["MWh_H2_per_tCl"]
df.loc["elec", sector] = config["MWh_elec_per_tCl"] df.loc["elec", sector] = params["MWh_elec_per_tCl"]
# Methanol # Methanol
sector = "Methanol" sector = "Methanol"
df[sector] = 0.0 df[sector] = 0.0
df.loc["methane", sector] = config["MWh_CH4_per_tMeOH"] df.loc["methane", sector] = params["MWh_CH4_per_tMeOH"]
df.loc["elec", sector] = config["MWh_elec_per_tMeOH"] df.loc["elec", sector] = params["MWh_elec_per_tMeOH"]
# Other chemicals # Other chemicals
@ -1465,10 +1465,10 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_industry_sector_ratios") snakemake = mock_snakemake("build_industry_sector_ratios")
# TODO make config option # TODO make params option
year = 2015 year = 2015
config = snakemake.config["industry"] params = snakemake.params.industry
df = pd.concat( df = pd.concat(
[ [

View File

@ -98,13 +98,15 @@ def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
def replace_natural_gas_technology(df): def replace_natural_gas_technology(df):
mapping = {"Steam Turbine": "OCGT", "Combustion Engine": "OCGT"} mapping = {"Steam Turbine": "CCGT", "Combustion Engine": "OCGT"}
tech = df.Technology.replace(mapping).fillna("OCGT") tech = df.Technology.replace(mapping).fillna("CCGT")
return df.Technology.where(df.Fueltype != "Natural Gas", tech) return df.Technology.mask(df.Fueltype == "Natural Gas", tech)
def replace_natural_gas_fueltype(df): def replace_natural_gas_fueltype(df):
return df.Fueltype.where(df.Fueltype != "Natural Gas", df.Technology) return df.Fueltype.mask(
(df.Technology == "OCGT") | (df.Technology == "CCGT"), "Natural Gas"
)
if __name__ == "__main__": if __name__ == "__main__":
@ -115,7 +117,7 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.base_network) n = pypsa.Network(snakemake.input.base_network)
countries = snakemake.config["countries"] countries = snakemake.params.countries
ppl = ( ppl = (
pm.powerplants(from_url=True) pm.powerplants(from_url=True)
@ -134,12 +136,12 @@ if __name__ == "__main__":
ppl = ppl.query('not (Country in @available_countries and Fueltype == "Bioenergy")') ppl = ppl.query('not (Country in @available_countries and Fueltype == "Bioenergy")')
ppl = pd.concat([ppl, opsd]) ppl = pd.concat([ppl, opsd])
ppl_query = snakemake.config["electricity"]["powerplants_filter"] ppl_query = snakemake.params.powerplants_filter
if isinstance(ppl_query, str): if isinstance(ppl_query, str):
ppl.query(ppl_query, inplace=True) ppl.query(ppl_query, inplace=True)
# add carriers from own powerplant files: # add carriers from own powerplant files:
custom_ppl_query = snakemake.config["electricity"]["custom_powerplants"] custom_ppl_query = snakemake.params.custom_powerplants
ppl = add_custom_powerplants( ppl = add_custom_powerplants(
ppl, snakemake.input.custom_powerplants, custom_ppl_query ppl, snakemake.input.custom_powerplants, custom_ppl_query
) )
@ -149,6 +151,7 @@ if __name__ == "__main__":
logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}") logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}")
substations = n.buses.query("substation_lv") substations = n.buses.query("substation_lv")
ppl = ppl.dropna(subset=["lat", "lon"])
ppl = map_country_bus(ppl, substations) ppl = map_country_bus(ppl, substations)
bus_null_b = ppl["bus"].isnull() bus_null_b = ppl["bus"].isnull()

View File

@ -64,7 +64,7 @@ Inputs
- ``resources/offshore_shapes.geojson``: confer :ref:`shapes` - ``resources/offshore_shapes.geojson``: confer :ref:`shapes`
- ``resources/regions_onshore.geojson``: (if not offshore wind), confer :ref:`busregions` - ``resources/regions_onshore.geojson``: (if not offshore wind), confer :ref:`busregions`
- ``resources/regions_offshore.geojson``: (if offshore wind), :ref:`busregions` - ``resources/regions_offshore.geojson``: (if offshore wind), :ref:`busregions`
- ``"cutouts/" + config["renewable"][{technology}]['cutout']``: :ref:`cutout` - ``"cutouts/" + params["renewable"][{technology}]['cutout']``: :ref:`cutout`
- ``networks/base.nc``: :ref:`base` - ``networks/base.nc``: :ref:`base`
Outputs Outputs
@ -188,7 +188,7 @@ import geopandas as gpd
import numpy as np import numpy as np
import xarray as xr import xarray as xr
from _helpers import configure_logging from _helpers import configure_logging
from dask.distributed import Client, LocalCluster from dask.distributed import Client
from pypsa.geo import haversine from pypsa.geo import haversine
from shapely.geometry import LineString from shapely.geometry import LineString
@ -206,20 +206,23 @@ if __name__ == "__main__":
nprocesses = int(snakemake.threads) nprocesses = int(snakemake.threads)
noprogress = snakemake.config["run"].get("disable_progressbar", True) noprogress = snakemake.config["run"].get("disable_progressbar", True)
config = snakemake.config["renewable"][snakemake.wildcards.technology] noprogress = noprogress or not snakemake.config["atlite"]["show_progress"]
resource = config["resource"] # pv panel config / wind turbine config params = snakemake.params.renewable[snakemake.wildcards.technology]
correction_factor = config.get("correction_factor", 1.0) resource = params["resource"] # pv panel params / wind turbine params
capacity_per_sqkm = config["capacity_per_sqkm"] correction_factor = params.get("correction_factor", 1.0)
p_nom_max_meth = config.get("potential", "conservative") capacity_per_sqkm = params["capacity_per_sqkm"]
p_nom_max_meth = params.get("potential", "conservative")
if isinstance(config.get("corine", {}), list): if isinstance(params.get("corine", {}), list):
config["corine"] = {"grid_codes": config["corine"]} params["corine"] = {"grid_codes": params["corine"]}
if correction_factor != 1.0: if correction_factor != 1.0:
logger.info(f"correction_factor is set as {correction_factor}") logger.info(f"correction_factor is set as {correction_factor}")
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) if nprocesses > 1:
client = Client(cluster, asynchronous=True) client = Client(n_workers=nprocesses, threads_per_worker=1)
else:
client = None
cutout = atlite.Cutout(snakemake.input.cutout) cutout = atlite.Cutout(snakemake.input.cutout)
regions = gpd.read_file(snakemake.input.regions) regions = gpd.read_file(snakemake.input.regions)
@ -231,13 +234,13 @@ if __name__ == "__main__":
regions = regions.set_index("name").rename_axis("bus") regions = regions.set_index("name").rename_axis("bus")
buses = regions.index buses = regions.index
res = config.get("excluder_resolution", 100) res = params.get("excluder_resolution", 100)
excluder = atlite.ExclusionContainer(crs=3035, res=res) excluder = atlite.ExclusionContainer(crs=3035, res=res)
if config["natura"]: if params["natura"]:
excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True) excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True)
corine = config.get("corine", {}) corine = params.get("corine", {})
if "grid_codes" in corine: if "grid_codes" in corine:
codes = corine["grid_codes"] codes = corine["grid_codes"]
excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035) excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035)
@ -248,28 +251,28 @@ if __name__ == "__main__":
snakemake.input.corine, codes=codes, buffer=buffer, crs=3035 snakemake.input.corine, codes=codes, buffer=buffer, crs=3035
) )
if "ship_threshold" in config: if "ship_threshold" in params:
shipping_threshold = ( shipping_threshold = (
config["ship_threshold"] * 8760 * 6 params["ship_threshold"] * 8760 * 6
) # approximation because 6 years of data which is hourly collected ) # approximation because 6 years of data which is hourly collected
func = functools.partial(np.less, shipping_threshold) func = functools.partial(np.less, shipping_threshold)
excluder.add_raster( excluder.add_raster(
snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True
) )
if config.get("max_depth"): if params.get("max_depth"):
# lambda not supported for atlite + multiprocessing # lambda not supported for atlite + multiprocessing
# use named function np.greater with partially frozen argument instead # use named function np.greater with partially frozen argument instead
# and exclude areas where: -max_depth > grid cell depth # and exclude areas where: -max_depth > grid cell depth
func = functools.partial(np.greater, -config["max_depth"]) func = functools.partial(np.greater, -params["max_depth"])
excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000) excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000)
if "min_shore_distance" in config: if "min_shore_distance" in params:
buffer = config["min_shore_distance"] buffer = params["min_shore_distance"]
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer) excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer)
if "max_shore_distance" in config: if "max_shore_distance" in params:
buffer = config["max_shore_distance"] buffer = params["max_shore_distance"]
excluder.add_geometry( excluder.add_geometry(
snakemake.input.country_shapes, buffer=buffer, invert=True snakemake.input.country_shapes, buffer=buffer, invert=True
) )
@ -291,7 +294,8 @@ if __name__ == "__main__":
potential = capacity_per_sqkm * availability.sum("bus") * area potential = capacity_per_sqkm * availability.sum("bus") * area
func = getattr(cutout, resource.pop("method")) func = getattr(cutout, resource.pop("method"))
resource["dask_kwargs"] = {"scheduler": client} if client is not None:
resource["dask_kwargs"] = {"scheduler": client}
capacity_factor = correction_factor * func(capacity_factor=True, **resource) capacity_factor = correction_factor * func(capacity_factor=True, **resource)
layout = capacity_factor * area * capacity_per_sqkm layout = capacity_factor * area * capacity_per_sqkm
profile, capacities = func( profile, capacities = func(
@ -360,13 +364,13 @@ if __name__ == "__main__":
# select only buses with some capacity and minimal capacity factor # select only buses with some capacity and minimal capacity factor
ds = ds.sel( ds = ds.sel(
bus=( bus=(
(ds["profile"].mean("time") > config.get("min_p_max_pu", 0.0)) (ds["profile"].mean("time") > params.get("min_p_max_pu", 0.0))
& (ds["p_nom_max"] > config.get("min_p_nom_max", 0.0)) & (ds["p_nom_max"] > params.get("min_p_nom_max", 0.0))
) )
) )
if "clip_p_max_pu" in config: if "clip_p_max_pu" in params:
min_p_max_pu = config["clip_p_max_pu"] min_p_max_pu = params["clip_p_max_pu"]
ds["profile"] = ds["profile"].where(ds["profile"] >= min_p_max_pu, 0) ds["profile"] = ds["profile"].where(ds["profile"] >= min_p_max_pu, 0)
ds.to_netcdf(snakemake.output.profile) ds.to_netcdf(snakemake.output.profile)

View File

@ -305,7 +305,7 @@ def prepare_building_stock_data():
u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True) u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True)
# only take in config.yaml specified countries into account # only take in config.yaml specified countries into account
countries = snakemake.config["countries"] countries = snakemake.params.countries
area_tot = area_tot.loc[countries] area_tot = area_tot.loc[countries]
return u_values, country_iso_dic, countries, area_tot, area return u_values, country_iso_dic, countries, area_tot, area
@ -513,7 +513,7 @@ def prepare_cost_retro(country_iso_dic):
def prepare_temperature_data(): def prepare_temperature_data():
""" """
returns the temperature dependent data for each country: Returns the temperature dependent data for each country:
d_heat : length of heating season pd.Series(index=countries) [days/year] d_heat : length of heating season pd.Series(index=countries) [days/year]
on those days, daily average temperature is below on those days, daily average temperature is below
@ -621,7 +621,7 @@ def calculate_costs(u_values, l, cost_retro, window_assumptions):
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035):
""" """
calculate U-values after building retrofitting, depending on the old Calculate U-values after building retrofitting, depending on the old
U-values (u_values). This is for simple insulation measuers, adding an U-values (u_values). This is for simple insulation measuers, adding an
additional layer of insulation. additional layer of insulation.
@ -682,7 +682,7 @@ def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix):
def get_solar_gains_per_year(window_area): def get_solar_gains_per_year(window_area):
""" """
returns solar heat gains during heating season in [kWh/a] depending on the Returns solar heat gains during heating season in [kWh/a] depending on the
window area [m^2] of the building, assuming a equal distributed window window area [m^2] of the building, assuming a equal distributed window
orientation (east, south, north, west) orientation (east, south, north, west)
""" """
@ -698,8 +698,8 @@ def get_solar_gains_per_year(window_area):
def map_to_lstrength(l_strength, df): def map_to_lstrength(l_strength, df):
""" """
renames column names from a pandas dataframe to map tabula retrofitting Renames column names from a pandas dataframe to map tabula retrofitting
strengths [2 = moderate, 3 = ambitious] to l_strength strengths [2 = moderate, 3 = ambitious] to l_strength.
""" """
middle = len(l_strength) // 2 middle = len(l_strength) // 2
map_to_l = pd.MultiIndex.from_arrays( map_to_l = pd.MultiIndex.from_arrays(
@ -718,7 +718,7 @@ def map_to_lstrength(l_strength, df):
def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor): def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor):
""" """
calculates total annual heat losses Q_ht for different insulation Calculates total annual heat losses Q_ht for different insulation
thicknesses (l_strength), depending on current insulation state (u_values), thicknesses (l_strength), depending on current insulation state (u_values),
standard building topologies and air ventilation from TABULA (data_tabula) standard building topologies and air ventilation from TABULA (data_tabula)
and the accumulated difference between internal and external temperature and the accumulated difference between internal and external temperature
@ -840,7 +840,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat): def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat):
""" """
calculates heat gains Q_gain [W/m^2], which consititure from gains by: Calculates heat gains Q_gain [W/m^2], which consititure from gains by:
(1) solar radiation (2) internal heat gains (1) solar radiation (2) internal heat gains
""" """
@ -885,7 +885,7 @@ def calculate_space_heat_savings(
u_values, data_tabula, l_strength, temperature_factor, d_heat u_values, data_tabula, l_strength, temperature_factor, d_heat
): ):
""" """
calculates space heat savings (dE_space [per unit of unrefurbished state]) Calculates space heat savings (dE_space [per unit of unrefurbished state])
through retrofitting of the thermal envelope by additional insulation through retrofitting of the thermal envelope by additional insulation
material (l_strength[m]) material (l_strength[m])
""" """
@ -1040,7 +1040,7 @@ if __name__ == "__main__":
# ******** config ********************************************************* # ******** config *********************************************************
retro_opts = snakemake.config["sector"]["retrofitting"] retro_opts = snakemake.params.retrofitting
interest_rate = retro_opts["interest_rate"] interest_rate = retro_opts["interest_rate"]
annualise_cost = retro_opts["annualise_cost"] # annualise the investment costs annualise_cost = retro_opts["annualise_cost"] # annualise the investment costs
tax_weighting = retro_opts[ tax_weighting = retro_opts[

View File

@ -41,7 +41,7 @@ if __name__ == "__main__":
"build_sequestration_potentials", simpl="", clusters="181" "build_sequestration_potentials", simpl="", clusters="181"
) )
cf = snakemake.config["sector"]["regional_co2_sequestration_potential"] cf = snakemake.params.sequestration_potential
gdf = gpd.read_file(snakemake.input.sequestration_potential[0]) gdf = gpd.read_file(snakemake.input.sequestration_potential[0])

View File

@ -234,6 +234,7 @@ def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp):
manual = gpd.GeoDataFrame( manual = gpd.GeoDataFrame(
[["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]], [["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]],
columns=["NUTS_ID", "country", "pop"], columns=["NUTS_ID", "country", "pop"],
geometry=gpd.GeoSeries(),
) )
manual["geometry"] = manual["country"].map(country_shapes) manual["geometry"] = manual["country"].map(country_shapes)
manual = manual.dropna() manual = manual.dropna()
@ -254,13 +255,11 @@ if __name__ == "__main__":
snakemake = mock_snakemake("build_shapes") snakemake = mock_snakemake("build_shapes")
configure_logging(snakemake) configure_logging(snakemake)
country_shapes = countries( country_shapes = countries(snakemake.input.naturalearth, snakemake.params.countries)
snakemake.input.naturalearth, snakemake.config["countries"]
)
country_shapes.reset_index().to_file(snakemake.output.country_shapes) country_shapes.reset_index().to_file(snakemake.output.country_shapes)
offshore_shapes = eez( offshore_shapes = eez(
country_shapes, snakemake.input.eez, snakemake.config["countries"] country_shapes, snakemake.input.eez, snakemake.params.countries
) )
offshore_shapes.reset_index().to_file(snakemake.output.offshore_shapes) offshore_shapes.reset_index().to_file(snakemake.output.offshore_shapes)

View File

@ -28,7 +28,7 @@ if __name__ == "__main__":
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
client = Client(cluster, asynchronous=True) client = Client(cluster, asynchronous=True)
config = snakemake.config["solar_thermal"] config = snakemake.params.solar_thermal
cutout_name = snakemake.input.cutout cutout_name = snakemake.input.cutout
year = snakemake.wildcards.weather_year year = snakemake.wildcards.weather_year
@ -37,7 +37,7 @@ if __name__ == "__main__":
snapshots = dict(start=year, end=str(int(year) + 1), inclusive="left") snapshots = dict(start=year, end=str(int(year) + 1), inclusive="left")
cutout_name = cutout_name.format(weather_year=year) cutout_name = cutout_name.format(weather_year=year)
else: else:
snapshots = snakemake.config["snapshots"] snapshots = snakemake.params.snapshots
time = pd.date_range(freq="h", **snapshots) time = pd.date_range(freq="h", **snapshots)
if snakemake.config["atlite"].get("drop_leap_day", False): if snakemake.config["atlite"].get("drop_leap_day", False):

View File

@ -34,7 +34,7 @@ if __name__ == "__main__":
snapshots = dict(start=year, end=str(int(year) + 1), inclusive="left") snapshots = dict(start=year, end=str(int(year) + 1), inclusive="left")
cutout_name = cutout_name.format(weather_year=year) cutout_name = cutout_name.format(weather_year=year)
else: else:
snapshots = snakemake.config["snapshots"] snapshots = snakemake.params.snapshots
time = pd.date_range(freq="h", **snapshots) time = pd.date_range(freq="h", **snapshots)
if snakemake.config["atlite"].get("drop_leap_day", False): if snakemake.config["atlite"].get("drop_leap_day", False):

View File

@ -176,13 +176,13 @@ if __name__ == "__main__":
snakemake.input.pop_weighted_energy_totals, index_col=0 snakemake.input.pop_weighted_energy_totals, index_col=0
) )
options = snakemake.config["sector"] options = snakemake.params.sector
year = snakemake.wildcards.weather_year year = snakemake.wildcards.weather_year
snapshots = ( snapshots = (
dict(start=year, end=str(int(year) + 1), inclusive="left") dict(start=year, end=str(int(year) + 1), inclusive="left")
if year if year
else snakemake.config["snapshots"] else snakemake.params.snapshots
) )
snapshots = pd.date_range(freq="h", **snapshots, tz="UTC") snapshots = pd.date_range(freq="h", **snapshots, tz="UTC")
if snakemake.config["atlite"].get("drop_leap_day", False): if snakemake.config["atlite"].get("drop_leap_day", False):

View File

@ -89,7 +89,7 @@ Description
**Is it possible to run the model without the** ``simplify_network`` **rule?** **Is it possible to run the model without the** ``simplify_network`` **rule?**
No, the network clustering methods in the PyPSA module No, the network clustering methods in the PyPSA module
`pypsa.networkclustering <https://github.com/PyPSA/PyPSA/blob/master/pypsa/networkclustering.py>`_ `pypsa.clustering.spatial <https://github.com/PyPSA/PyPSA/blob/master/pypsa/clustering/spatial.py>`_
do not work reliably with multiple voltage levels and transformers. do not work reliably with multiple voltage levels and transformers.
.. tip:: .. tip::
@ -133,8 +133,8 @@ import pandas as pd
import pyomo.environ as po import pyomo.environ as po
import pypsa import pypsa
import seaborn as sns import seaborn as sns
from _helpers import configure_logging, get_aggregation_strategies, update_p_nom_max from _helpers import configure_logging, update_p_nom_max
from pypsa.networkclustering import ( from pypsa.clustering.spatial import (
busmap_by_greedy_modularity, busmap_by_greedy_modularity,
busmap_by_hac, busmap_by_hac,
busmap_by_kmeans, busmap_by_kmeans,
@ -186,7 +186,7 @@ def get_feature_for_hac(n, buses_i=None, feature=None):
if "offwind" in carriers: if "offwind" in carriers:
carriers.remove("offwind") carriers.remove("offwind")
carriers = np.append( carriers = np.append(
carriers, network.generators.carrier.filter(like="offwind").unique() carriers, n.generators.carrier.filter(like="offwind").unique()
) )
if feature.split("-")[1] == "cap": if feature.split("-")[1] == "cap":
@ -395,10 +395,6 @@ def clustering_for_n_clusters(
extended_link_costs=0, extended_link_costs=0,
focus_weights=None, focus_weights=None,
): ):
bus_strategies, generator_strategies = get_aggregation_strategies(
aggregation_strategies
)
if not isinstance(custom_busmap, pd.Series): if not isinstance(custom_busmap, pd.Series):
busmap = busmap_for_n_clusters( busmap = busmap_for_n_clusters(
n, n_clusters, solver_name, focus_weights, algorithm, feature n, n_clusters, solver_name, focus_weights, algorithm, feature
@ -406,15 +402,20 @@ def clustering_for_n_clusters(
else: else:
busmap = custom_busmap busmap = custom_busmap
line_strategies = aggregation_strategies.get("lines", dict())
generator_strategies = aggregation_strategies.get("generators", dict())
one_port_strategies = aggregation_strategies.get("one_ports", dict())
clustering = get_clustering_from_busmap( clustering = get_clustering_from_busmap(
n, n,
busmap, busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True, aggregate_generators_weighted=True,
aggregate_generators_carriers=aggregate_carriers, aggregate_generators_carriers=aggregate_carriers,
aggregate_one_ports=["Load", "StorageUnit"], aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=line_length_factor, line_length_factor=line_length_factor,
line_strategies=line_strategies,
generator_strategies=generator_strategies, generator_strategies=generator_strategies,
one_port_strategies=one_port_strategies,
scale_link_capital_costs=False, scale_link_capital_costs=False,
) )
@ -424,7 +425,10 @@ def clustering_for_n_clusters(
n.links.eval("underwater_fraction * length").div(nc.links.length).dropna() n.links.eval("underwater_fraction * length").div(nc.links.length).dropna()
) )
nc.links["capital_cost"] = nc.links["capital_cost"].add( nc.links["capital_cost"] = nc.links["capital_cost"].add(
(nc.links.length - n.links.length).clip(lower=0).mul(extended_link_costs), (nc.links.length - n.links.length)
.clip(lower=0)
.mul(extended_link_costs)
.dropna(),
fill_value=0, fill_value=0,
) )
@ -462,28 +466,20 @@ if __name__ == "__main__":
) )
configure_logging(snakemake) configure_logging(snakemake)
params = snakemake.params
solver_name = snakemake.config["solving"]["solver"]["name"]
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
focus_weights = snakemake.config.get("focus_weights", None) exclude_carriers = params.cluster_network["exclude_carriers"]
renewable_carriers = pd.Index(
[
tech
for tech in n.generators.carrier.unique()
if tech in snakemake.config["renewable"]
]
)
exclude_carriers = snakemake.config["clustering"]["cluster_network"].get(
"exclude_carriers", []
)
aggregate_carriers = set(n.generators.carrier) - set(exclude_carriers) aggregate_carriers = set(n.generators.carrier) - set(exclude_carriers)
conventional_carriers = set(params.conventional_carriers)
if snakemake.wildcards.clusters.endswith("m"): if snakemake.wildcards.clusters.endswith("m"):
n_clusters = int(snakemake.wildcards.clusters[:-1]) n_clusters = int(snakemake.wildcards.clusters[:-1])
conventional = set( aggregate_carriers = params.conventional_carriers & aggregate_carriers
snakemake.config["electricity"].get("conventional_carriers", []) elif snakemake.wildcards.clusters.endswith("c"):
) n_clusters = int(snakemake.wildcards.clusters[:-1])
aggregate_carriers = conventional.intersection(aggregate_carriers) aggregate_carriers = aggregate_carriers - conventional_carriers
elif snakemake.wildcards.clusters == "all": elif snakemake.wildcards.clusters == "all":
n_clusters = len(n.buses) n_clusters = len(n.buses)
else: else:
@ -493,37 +489,20 @@ if __name__ == "__main__":
# Fast-path if no clustering is necessary # Fast-path if no clustering is necessary
busmap = n.buses.index.to_series() busmap = n.buses.index.to_series()
linemap = n.lines.index.to_series() linemap = n.lines.index.to_series()
clustering = pypsa.networkclustering.Clustering( clustering = pypsa.clustering.spatial.Clustering(
n, busmap, linemap, linemap, pd.Series(dtype="O") n, busmap, linemap, linemap, pd.Series(dtype="O")
) )
else: else:
line_length_factor = snakemake.config["lines"]["length_factor"]
Nyears = n.snapshot_weightings.objective.sum() / 8760 Nyears = n.snapshot_weightings.objective.sum() / 8760
hvac_overhead_cost = load_costs( hvac_overhead_cost = load_costs(
snakemake.input.tech_costs, snakemake.input.tech_costs,
snakemake.config["costs"], params.costs,
snakemake.config["electricity"], params.max_hours,
Nyears, Nyears,
).at["HVAC overhead", "capital_cost"] ).at["HVAC overhead", "capital_cost"]
def consense(x): custom_busmap = params.custom_busmap
v = x.iat[0]
assert (
x == v
).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!"
return v
aggregation_strategies = snakemake.config["clustering"].get(
"aggregation_strategies", {}
)
# translate str entries of aggregation_strategies to pd.Series functions:
aggregation_strategies = {
p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()}
for p in aggregation_strategies.keys()
}
custom_busmap = snakemake.config["enable"].get("custom_busmap", False)
if custom_busmap: if custom_busmap:
custom_busmap = pd.read_csv( custom_busmap = pd.read_csv(
snakemake.input.custom_busmap, index_col=0, squeeze=True snakemake.input.custom_busmap, index_col=0, squeeze=True
@ -531,21 +510,18 @@ if __name__ == "__main__":
custom_busmap.index = custom_busmap.index.astype(str) custom_busmap.index = custom_busmap.index.astype(str)
logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}") logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}")
cluster_config = snakemake.config.get("clustering", {}).get(
"cluster_network", {}
)
clustering = clustering_for_n_clusters( clustering = clustering_for_n_clusters(
n, n,
n_clusters, n_clusters,
custom_busmap, custom_busmap,
aggregate_carriers, aggregate_carriers,
line_length_factor, params.length_factor,
aggregation_strategies, params.aggregation_strategies,
snakemake.config["solving"]["solver"]["name"], solver_name,
cluster_config.get("algorithm", "hac"), params.cluster_network["algorithm"],
cluster_config.get("feature", "solar+onwind-time"), params.cluster_network["feature"],
hvac_overhead_cost, hvac_overhead_cost,
focus_weights, params.focus_weights,
) )
update_p_nom_max(clustering.network) update_p_nom_max(clustering.network)

View File

@ -16,7 +16,6 @@ import sys
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import override_component_attrs
from prepare_sector_network import prepare_costs from prepare_sector_network import prepare_costs
idx = pd.IndexSlice idx = pd.IndexSlice
@ -198,7 +197,7 @@ def calculate_costs(n, label, costs):
def calculate_cumulative_cost(): def calculate_cumulative_cost():
planning_horizons = snakemake.config["scenario"]["planning_horizons"] planning_horizons = snakemake.params.scenario["planning_horizons"]
cumulative_cost = pd.DataFrame( cumulative_cost = pd.DataFrame(
index=df["costs"].sum().index, index=df["costs"].sum().index,
@ -300,9 +299,9 @@ def calculate_energy(n, label, energy):
) )
# remove values where bus is missing (bug in nomopyomo) # remove values where bus is missing (bug in nomopyomo)
no_bus = c.df.index[c.df["bus" + port] == ""] no_bus = c.df.index[c.df["bus" + port] == ""]
totals.loc[no_bus] = n.component_attrs[c.name].loc[ totals.loc[no_bus] = float(
"p" + port, "default" n.component_attrs[c.name].loc["p" + port, "default"]
] )
c_energies -= totals.groupby(c.df.carrier).sum() c_energies -= totals.groupby(c.df.carrier).sum()
c_energies = pd.concat([c_energies], keys=[c.list_name]) c_energies = pd.concat([c_energies], keys=[c.list_name])
@ -660,8 +659,7 @@ def make_summaries(networks_dict):
for label, filename in networks_dict.items(): for label, filename in networks_dict.items():
logger.info(f"Make summary for scenario {label}, using {filename}") logger.info(f"Make summary for scenario {label}, using {filename}")
overrides = override_component_attrs(snakemake.input.overrides) n = pypsa.Network(filename)
n = pypsa.Network(filename, override_component_attrs=overrides)
assign_carriers(n) assign_carriers(n)
assign_locations(n) assign_locations(n)
@ -689,20 +687,20 @@ if __name__ == "__main__":
(weather_year, cluster, ll, opt + sector_opt, planning_horizon): "results/" (weather_year, cluster, ll, opt + sector_opt, planning_horizon): "results/"
+ snakemake.params.RDIR + snakemake.params.RDIR
+ f"/postnetworks/elec_s{simpl}_{cluster}_l{ll}_{opt}_{sector_opt}_{planning_horizon}.nc" + f"/postnetworks/elec_s{simpl}_{cluster}_l{ll}_{opt}_{sector_opt}_{planning_horizon}.nc"
for weather_year in snakemake.config["scenario"]["weather_year"] for weather_year in snakemake.params.scenario["weather_year"]
for simpl in snakemake.config["scenario"]["simpl"] for simpl in snakemake.params.scenario["simpl"]
for cluster in snakemake.config["scenario"]["clusters"] for cluster in snakemake.params.scenario["clusters"]
for opt in snakemake.config["scenario"]["opts"] for opt in snakemake.params.scenario["opts"]
for sector_opt in snakemake.config["scenario"]["sector_opts"] for sector_opt in snakemake.params.scenario["sector_opts"]
for ll in snakemake.config["scenario"]["ll"] for ll in snakemake.params.scenario["ll"]
for planning_horizon in snakemake.config["scenario"]["planning_horizons"] for planning_horizon in snakemake.params.scenario["planning_horizons"]
} }
Nyears = len(pd.date_range(freq="h", **snakemake.config["snapshots"])) / 8760 Nyears = len(pd.date_range(freq="h", **snakemake.params.snapshots)) / 8760
costs_db = prepare_costs( costs_db = prepare_costs(
snakemake.input.costs, snakemake.input.costs,
snakemake.config["costs"], snakemake.params.costs,
Nyears, Nyears,
) )
@ -712,7 +710,7 @@ if __name__ == "__main__":
to_csv(df) to_csv(df)
if snakemake.config["foresight"] == "myopic": if snakemake.params.foresight == "myopic":
cumulative_cost = calculate_cumulative_cost() cumulative_cost = calculate_cumulative_cost()
cumulative_cost.to_csv( cumulative_cost.to_csv(
"results/" + snakemake.params.RDIR + "/csvs/cumulative_cost.csv" "results/" + snakemake.params.RDIR + "/csvs/cumulative_cost.csv"

View File

@ -20,7 +20,6 @@ import geopandas as gpd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import pypsa import pypsa
from _helpers import override_component_attrs
from make_summary import assign_carriers from make_summary import assign_carriers
from plot_summary import preferred_order, rename_techs from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
@ -70,7 +69,7 @@ def plot_map(
transmission=False, transmission=False,
with_legend=True, with_legend=True,
): ):
tech_colors = snakemake.config["plotting"]["tech_colors"] tech_colors = snakemake.params.plotting["tech_colors"]
n = network.copy() n = network.copy()
assign_location(n) assign_location(n)
@ -116,9 +115,7 @@ def plot_map(
costs = costs.stack() # .sort_index() costs = costs.stack() # .sort_index()
# hack because impossible to drop buses... # hack because impossible to drop buses...
eu_location = snakemake.config["plotting"].get( eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46))
"eu_node_location", dict(x=-5.5, y=46)
)
n.buses.loc["EU gas", "x"] = eu_location["x"] n.buses.loc["EU gas", "x"] = eu_location["x"]
n.buses.loc["EU gas", "y"] = eu_location["y"] n.buses.loc["EU gas", "y"] = eu_location["y"]
@ -315,7 +312,7 @@ def plot_h2_map(network, regions):
h2_new = n.links[n.links.carrier == "H2 pipeline"] h2_new = n.links[n.links.carrier == "H2 pipeline"]
h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"]
if snakemake.config["foresight"] == "myopic": if snakemake.params.foresight == "myopic":
# sum capacitiy for pipelines from different investment periods # sum capacitiy for pipelines from different investment periods
h2_new = group_pipes(h2_new) h2_new = group_pipes(h2_new)
@ -558,7 +555,7 @@ def plot_ch4_map(network):
link_widths_used = max_usage / linewidth_factor link_widths_used = max_usage / linewidth_factor
link_widths_used[max_usage < line_lower_threshold] = 0.0 link_widths_used[max_usage < line_lower_threshold] = 0.0
tech_colors = snakemake.config["plotting"]["tech_colors"] tech_colors = snakemake.params.plotting["tech_colors"]
pipe_colors = { pipe_colors = {
"gas pipeline": "#f08080", "gas pipeline": "#f08080",
@ -700,7 +697,7 @@ def plot_map_without(network):
# hack because impossible to drop buses... # hack because impossible to drop buses...
if "EU gas" in n.buses.index: if "EU gas" in n.buses.index:
eu_location = snakemake.config["plotting"].get( eu_location = snakemake.params.plotting.get(
"eu_node_location", dict(x=-5.5, y=46) "eu_node_location", dict(x=-5.5, y=46)
) )
n.buses.loc["EU gas", "x"] = eu_location["x"] n.buses.loc["EU gas", "x"] = eu_location["x"]
@ -876,7 +873,7 @@ def plot_series(network, carrier="AC", name="test"):
stacked=True, stacked=True,
linewidth=0.0, linewidth=0.0,
color=[ color=[
snakemake.config["plotting"]["tech_colors"][i.replace(suffix, "")] snakemake.params.plotting["tech_colors"][i.replace(suffix, "")]
for i in new_columns for i in new_columns
], ],
) )
@ -933,12 +930,11 @@ if __name__ == "__main__":
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])
overrides = override_component_attrs(snakemake.input.overrides) n = pypsa.Network(snakemake.input.network)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
regions = gpd.read_file(snakemake.input.regions).set_index("name") regions = gpd.read_file(snakemake.input.regions).set_index("name")
map_opts = snakemake.config["plotting"]["map"] map_opts = snakemake.params.plotting["map"]
if map_opts["boundaries"] is None: if map_opts["boundaries"] is None:
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]

View File

@ -142,10 +142,10 @@ def plot_costs():
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.max(axis=1) < snakemake.config["plotting"]["costs_threshold"]] to_drop = df.index[df.max(axis=1) < snakemake.params.plotting["costs_threshold"]]
logger.info( logger.info(
f"Dropping technology with costs below {snakemake.config['plotting']['costs_threshold']} EUR billion per year" f"Dropping technology with costs below {snakemake.params['plotting']['costs_threshold']} EUR billion per year"
) )
logger.debug(df.loc[to_drop]) logger.debug(df.loc[to_drop])
@ -165,7 +165,7 @@ def plot_costs():
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index], color=[snakemake.params.plotting["tech_colors"][i] for i in new_index],
) )
handles, labels = ax.get_legend_handles_labels() handles, labels = ax.get_legend_handles_labels()
@ -173,7 +173,7 @@ def plot_costs():
handles.reverse() handles.reverse()
labels.reverse() labels.reverse()
ax.set_ylim([0, snakemake.config["plotting"]["costs_max"]]) ax.set_ylim([0, snakemake.params.plotting["costs_max"]])
ax.set_ylabel("System Cost [EUR billion per year]") ax.set_ylabel("System Cost [EUR billion per year]")
@ -201,11 +201,11 @@ def plot_energy():
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[ to_drop = df.index[
df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"] df.abs().max(axis=1) < snakemake.params.plotting["energy_threshold"]
] ]
logger.info( logger.info(
f"Dropping all technology with energy consumption or production below {snakemake.config['plotting']['energy_threshold']} TWh/a" f"Dropping all technology with energy consumption or production below {snakemake.params['plotting']['energy_threshold']} TWh/a"
) )
logger.debug(df.loc[to_drop]) logger.debug(df.loc[to_drop])
@ -227,7 +227,7 @@ def plot_energy():
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index], color=[snakemake.params.plotting["tech_colors"][i] for i in new_index],
) )
handles, labels = ax.get_legend_handles_labels() handles, labels = ax.get_legend_handles_labels()
@ -237,8 +237,8 @@ def plot_energy():
ax.set_ylim( ax.set_ylim(
[ [
snakemake.config["plotting"]["energy_min"], snakemake.params.plotting["energy_min"],
snakemake.config["plotting"]["energy_max"], snakemake.params.plotting["energy_max"],
] ]
) )
@ -287,7 +287,7 @@ def plot_balances():
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[ to_drop = df.index[
df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"] / 10 df.abs().max(axis=1) < snakemake.params.plotting["energy_threshold"] / 10
] ]
if v[0] in co2_carriers: if v[0] in co2_carriers:
@ -296,7 +296,7 @@ def plot_balances():
units = "TWh/a" units = "TWh/a"
logger.debug( logger.debug(
f"Dropping technology energy balance smaller than {snakemake.config['plotting']['energy_threshold']/10} {units}" f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}"
) )
logger.debug(df.loc[to_drop]) logger.debug(df.loc[to_drop])
@ -317,7 +317,7 @@ def plot_balances():
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index], color=[snakemake.params.plotting["tech_colors"][i] for i in new_index],
) )
handles, labels = ax.get_legend_handles_labels() handles, labels = ax.get_legend_handles_labels()
@ -455,10 +455,10 @@ def plot_carbon_budget_distribution(input_eurostat):
ax1 = plt.subplot(gs1[0, 0]) ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22) ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22)
ax1.set_ylim([0, 5]) ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.config["scenario"]["planning_horizons"][-1] + 1]) ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
path_cb = "results/" + snakemake.params.RDIR + "/csvs/" path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
countries = snakemake.config["countries"] countries = snakemake.params.countries
e_1990 = co2_emissions_year(countries, input_eurostat, opts, year=1990) e_1990 = co2_emissions_year(countries, input_eurostat, opts, year=1990)
CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0) CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
@ -555,7 +555,7 @@ if __name__ == "__main__":
plot_balances() plot_balances()
for sector_opts in snakemake.config["scenario"]["sector_opts"]: for sector_opts in snakemake.params.sector_opts:
opts = sector_opts.split("-") opts = sector_opts.split("-")
for o in opts: for o in opts:
if "cb" in o: if "cb" in o:

View File

@ -233,7 +233,22 @@ def enforce_autarky(n, only_crossborder=False):
n.mremove("Link", links_rm) n.mremove("Link", links_rm)
def set_line_nom_max(n, s_nom_max_set=np.inf, p_nom_max_set=np.inf): def set_line_nom_max(
n,
s_nom_max_set=np.inf,
p_nom_max_set=np.inf,
s_nom_max_ext=np.inf,
p_nom_max_ext=np.inf,
):
if np.isfinite(s_nom_max_ext) and s_nom_max_ext > 0:
logger.info(f"Limiting line extensions to {s_nom_max_ext} MW")
n.lines["s_nom_max"] = n.lines["s_nom"] + s_nom_max_ext
if np.isfinite(p_nom_max_ext) and p_nom_max_ext > 0:
logger.info(f"Limiting line extensions to {p_nom_max_ext} MW")
hvdc = n.links.index[n.links.carrier == "DC"]
n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True)
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
@ -258,12 +273,12 @@ if __name__ == "__main__":
Nyears = n.snapshot_weightings.objective.sum() / 8760.0 Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs( costs = load_costs(
snakemake.input.tech_costs, snakemake.input.tech_costs,
snakemake.config["costs"], snakemake.params.costs,
snakemake.config["electricity"], snakemake.params.max_hours,
Nyears, Nyears,
) )
set_line_s_max_pu(n, snakemake.config["lines"]["s_max_pu"]) set_line_s_max_pu(n, snakemake.params.lines["s_max_pu"])
for o in opts: for o in opts:
m = re.match(r"^\d+h$", o, re.IGNORECASE) m = re.match(r"^\d+h$", o, re.IGNORECASE)
@ -282,11 +297,11 @@ if __name__ == "__main__":
if "Co2L" in o: if "Co2L" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o) m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0: if len(m) > 0:
co2limit = float(m[0]) * snakemake.config["electricity"]["co2base"] co2limit = float(m[0]) * snakemake.params.co2base
add_co2limit(n, co2limit, Nyears) add_co2limit(n, co2limit, Nyears)
logger.info("Setting CO2 limit according to wildcard value.") logger.info("Setting CO2 limit according to wildcard value.")
else: else:
add_co2limit(n, snakemake.config["electricity"]["co2limit"], Nyears) add_co2limit(n, snakemake.params.co2limit, Nyears)
logger.info("Setting CO2 limit according to config value.") logger.info("Setting CO2 limit according to config value.")
break break
@ -298,11 +313,13 @@ if __name__ == "__main__":
add_gaslimit(n, limit, Nyears) add_gaslimit(n, limit, Nyears)
logger.info("Setting gas usage limit according to wildcard value.") logger.info("Setting gas usage limit according to wildcard value.")
else: else:
add_gaslimit(n, snakemake.config["electricity"].get("gaslimit"), Nyears) add_gaslimit(n, snakemake.params.gaslimit, Nyears)
logger.info("Setting gas usage limit according to config value.") logger.info("Setting gas usage limit according to config value.")
break break
for o in opts: for o in opts:
if "+" not in o:
continue
oo = o.split("+") oo = o.split("+")
suptechs = map(lambda c: c.split("-", 2)[0], n.carriers.index) suptechs = map(lambda c: c.split("-", 2)[0], n.carriers.index)
if oo[0].startswith(tuple(suptechs)): if oo[0].startswith(tuple(suptechs)):
@ -327,7 +344,7 @@ if __name__ == "__main__":
add_emission_prices(n, dict(co2=float(m[0]))) add_emission_prices(n, dict(co2=float(m[0])))
else: else:
logger.info("Setting emission prices according to config value.") logger.info("Setting emission prices according to config value.")
add_emission_prices(n, snakemake.config["costs"]["emission_prices"]) add_emission_prices(n, snakemake.params.costs["emission_prices"])
break break
ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:] ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:]
@ -335,8 +352,10 @@ if __name__ == "__main__":
set_line_nom_max( set_line_nom_max(
n, n,
s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf), s_nom_max_set=snakemake.params.lines.get("s_nom_max", np.inf),
p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf), p_nom_max_set=snakemake.params.links.get("p_nom_max", np.inf),
s_nom_max_ext=snakemake.params.lines.get("max_extension", np.inf),
p_nom_max_ext=snakemake.params.links.get("max_extension", np.inf),
) )
if "ATK" in opts: if "ATK" in opts:

View File

@ -17,18 +17,14 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import xarray as xr import xarray as xr
from _helpers import ( from _helpers import generate_periodic_profiles, update_config_with_sector_opts
generate_periodic_profiles, from add_electricity import calculate_annuity, sanitize_carriers
override_component_attrs,
update_config_with_sector_opts,
)
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
from networkx.algorithms import complement from networkx.algorithms import complement
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from pypsa.geo import haversine_pts from pypsa.geo import haversine_pts
from pypsa.io import import_components_from_dataframe from pypsa.io import import_components_from_dataframe
from scipy.stats import beta from scipy.stats import beta
from vresutils.costdata import annuity
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -200,12 +196,12 @@ def co2_emissions_year(
""" """
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018). Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
""" """
emissions_scope = snakemake.config["energy"]["emissions"] emissions_scope = snakemake.params.energy["emissions"]
eea_co2 = build_eea_co2(snakemake.input.co2, year, emissions_scope) eea_co2 = build_eea_co2(snakemake.input.co2, year, emissions_scope)
# TODO: read Eurostat data from year > 2014 # TODO: read Eurostat data from year > 2014
# this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK # this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK
report_year = snakemake.config["energy"]["eurostat_report_year"] report_year = snakemake.params.energy["eurostat_report_year"]
if year > 2014: if year > 2014:
eurostat_co2 = build_eurostat_co2( eurostat_co2 = build_eurostat_co2(
input_eurostat, countries, report_year, year=2014 input_eurostat, countries, report_year, year=2014
@ -241,7 +237,7 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
carbon_budget = float(o[o.find("cb") + 2 : o.find("ex")]) carbon_budget = float(o[o.find("cb") + 2 : o.find("ex")])
r = float(o[o.find("ex") + 2 :]) r = float(o[o.find("ex") + 2 :])
countries = snakemake.config["countries"] countries = snakemake.params.countries
e_1990 = co2_emissions_year( e_1990 = co2_emissions_year(
countries, input_eurostat, opts, emissions_scope, report_year, year=1990 countries, input_eurostat, opts, emissions_scope, report_year, year=1990
@ -252,7 +248,7 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
countries, input_eurostat, opts, emissions_scope, report_year, year=2018 countries, input_eurostat, opts, emissions_scope, report_year, year=2018
) )
planning_horizons = snakemake.config["scenario"]["planning_horizons"] planning_horizons = snakemake.params.planning_horizons
t_0 = planning_horizons[0] t_0 = planning_horizons[0]
if "be" in o: if "be" in o:
@ -391,7 +387,7 @@ def update_wind_solar_costs(n, costs):
with xr.open_dataset(profile) as ds: with xr.open_dataset(profile) as ds:
underwater_fraction = ds["underwater_fraction"].to_pandas() underwater_fraction = ds["underwater_fraction"].to_pandas()
connection_cost = ( connection_cost = (
snakemake.config["lines"]["length_factor"] snakemake.params.length_factor
* ds["average_distance"].to_pandas() * ds["average_distance"].to_pandas()
* ( * (
underwater_fraction underwater_fraction
@ -483,8 +479,8 @@ def remove_elec_base_techs(n):
batteries and H2) from base electricity-only network, since they're added batteries and H2) from base electricity-only network, since they're added
here differently using links. here differently using links.
""" """
for c in n.iterate_components(snakemake.config["pypsa_eur"]): for c in n.iterate_components(snakemake.params.pypsa_eur):
to_keep = snakemake.config["pypsa_eur"][c.name] to_keep = snakemake.params.pypsa_eur[c.name]
to_remove = pd.Index(c.df.carrier.unique()).symmetric_difference(to_keep) to_remove = pd.Index(c.df.carrier.unique()).symmetric_difference(to_keep)
if to_remove.empty: if to_remove.empty:
continue continue
@ -674,7 +670,7 @@ def add_dac(n, costs):
def add_co2limit(n, nyears=1.0, limit=0.0): def add_co2limit(n, nyears=1.0, limit=0.0):
logger.info(f"Adding CO2 budget limit as per unit of 1990 levels of {limit}") logger.info(f"Adding CO2 budget limit as per unit of 1990 levels of {limit}")
countries = snakemake.config["countries"] countries = snakemake.params.countries
sectors = emission_sectors_from_opts(opts) sectors = emission_sectors_from_opts(opts)
@ -731,7 +727,7 @@ def cycling_shift(df, steps=1):
return df return df
def prepare_costs(cost_file, config, nyears): def prepare_costs(cost_file, params, nyears):
# set all asset costs and other parameters # set all asset costs and other parameters
costs = pd.read_csv(cost_file, index_col=[0, 1]).sort_index() costs = pd.read_csv(cost_file, index_col=[0, 1]).sort_index()
@ -743,10 +739,10 @@ def prepare_costs(cost_file, config, nyears):
costs.loc[:, "value"].unstack(level=1).groupby("technology").sum(min_count=1) costs.loc[:, "value"].unstack(level=1).groupby("technology").sum(min_count=1)
) )
costs = costs.fillna(config["fill_values"]) costs = costs.fillna(params["fill_values"])
def annuity_factor(v): def annuity_factor(v):
return annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100 return calculate_annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100
costs["fixed"] = [ costs["fixed"] = [
annuity_factor(v) * v["investment"] * nyears for i, v in costs.iterrows() annuity_factor(v) * v["investment"] * nyears for i, v in costs.iterrows()
@ -791,7 +787,7 @@ def add_ammonia(n, costs):
nodes = pop_layout.index nodes = pop_layout.index
cf_industry = snakemake.config["industry"] cf_industry = snakemake.params.industry
n.add("Carrier", "NH3") n.add("Carrier", "NH3")
@ -855,7 +851,7 @@ def add_wave(n, wave_cost_factor):
capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600}) capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600})
# in EUR/MW # in EUR/MW
annuity_factor = annuity(25, 0.07) + 0.03 annuity_factor = calculate_annuity(25, 0.07) + 0.03
costs = ( costs = (
1e6 1e6
* wave_cost_factor * wave_cost_factor
@ -1106,10 +1102,14 @@ def add_storage_and_grids(n, costs):
lifetime=costs.at["OCGT", "lifetime"], lifetime=costs.at["OCGT", "lifetime"],
) )
cavern_types = snakemake.config["sector"]["hydrogen_underground_storage_locations"] cavern_types = snakemake.params.sector["hydrogen_underground_storage_locations"]
h2_caverns = pd.read_csv(snakemake.input.h2_cavern, index_col=0) h2_caverns = pd.read_csv(snakemake.input.h2_cavern, index_col=0)
if not h2_caverns.empty and options["hydrogen_underground_storage"]: if (
not h2_caverns.empty
and options["hydrogen_underground_storage"]
and set(cavern_types).intersection(h2_caverns.columns)
):
h2_caverns = h2_caverns[cavern_types].sum(axis=1) h2_caverns = h2_caverns[cavern_types].sum(axis=1)
# only use sites with at least 2 TWh potential # only use sites with at least 2 TWh potential
@ -3056,7 +3056,6 @@ def maybe_adjust_costs_and_potentials(n, opts):
logger.info(f"changing {attr} for {carrier} by factor {factor}") logger.info(f"changing {attr} for {carrier} by factor {factor}")
# TODO this should rather be a config no wildcard
def limit_individual_line_extension(n, maxext): def limit_individual_line_extension(n, maxext):
logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW") logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW")
n.lines["s_nom_max"] = n.lines["s_nom"] + maxext n.lines["s_nom_max"] = n.lines["s_nom"] + maxext
@ -3275,14 +3274,13 @@ if __name__ == "__main__":
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
options = snakemake.config["sector"] options = snakemake.params.sector
opts = snakemake.wildcards.sector_opts.split("-") opts = snakemake.wildcards.sector_opts.split("-")
investment_year = int(snakemake.wildcards.planning_horizons[-4:]) investment_year = int(snakemake.wildcards.planning_horizons[-4:])
overrides = override_component_attrs(snakemake.input.overrides) n = pypsa.Network(snakemake.input.network)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
nhours = n.snapshot_weightings.generators.sum() nhours = n.snapshot_weightings.generators.sum()
@ -3290,7 +3288,7 @@ if __name__ == "__main__":
costs = prepare_costs( costs = prepare_costs(
snakemake.input.costs, snakemake.input.costs,
snakemake.config["costs"], snakemake.params.costs,
nyears, nyears,
) )
@ -3306,10 +3304,10 @@ if __name__ == "__main__":
spatial = define_spatial(pop_layout.index, options) spatial = define_spatial(pop_layout.index, options)
if snakemake.config["foresight"] == "myopic": if snakemake.params.foresight == "myopic":
add_lifetime_wind_solar(n, costs) add_lifetime_wind_solar(n, costs)
conventional = snakemake.config["existing_capacities"]["conventional_carriers"] conventional = snakemake.params.conventional_carriers
for carrier in conventional: for carrier in conventional:
add_carrier_buses(n, carrier) add_carrier_buses(n, carrier)
@ -3379,15 +3377,15 @@ if __name__ == "__main__":
n = set_temporal_aggregation(n, opts, solver_name, drop_leap_day) n = set_temporal_aggregation(n, opts, solver_name, drop_leap_day)
limit_type = "config" limit_type = "config"
limit = get(snakemake.config["co2_budget"], investment_year) limit = get(snakemake.params.co2_budget, investment_year)
for o in opts: for o in opts:
if "cb" not in o: if "cb" not in o:
continue continue
limit_type = "carbon budget" limit_type = "carbon budget"
fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv" fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv"
if not os.path.exists(fn): if not os.path.exists(fn):
emissions_scope = snakemake.config["energy"]["emissions"] emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.config["energy"]["eurostat_report_year"] report_year = snakemake.params.eurostat_report_year
build_carbon_budget( build_carbon_budget(
o, snakemake.input.eurostat, fn, emissions_scope, report_year o, snakemake.input.eurostat, fn, emissions_scope, report_year
) )
@ -3422,8 +3420,8 @@ if __name__ == "__main__":
if options["electricity_grid_connection"]: if options["electricity_grid_connection"]:
add_electricity_grid_connection(n, costs) add_electricity_grid_connection(n, costs)
first_year_myopic = (snakemake.config["foresight"] == "myopic") and ( first_year_myopic = (snakemake.params.foresight == "myopic") and (
snakemake.config["scenario"]["planning_horizons"][0] == investment_year snakemake.params.planning_horizons[0] == investment_year
) )
if options.get("cluster_heat_buses", False) and not first_year_myopic: if options.get("cluster_heat_buses", False) and not first_year_myopic:
@ -3431,4 +3429,6 @@ if __name__ == "__main__":
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
sanitize_carriers(n, snakemake.config)
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -58,9 +58,8 @@ if __name__ == "__main__":
else: else:
url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz" url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz"
# Save locations
tarball_fn = Path(f"{rootpath}/bundle.tar.xz") tarball_fn = Path(f"{rootpath}/bundle.tar.xz")
to_fn = Path(f"{rootpath}/data") to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent
logger.info(f"Downloading databundle from '{url}'.") logger.info(f"Downloading databundle from '{url}'.")
disable_progress = snakemake.config["run"].get("disable_progressbar", False) disable_progress = snakemake.config["run"].get("disable_progressbar", False)

View File

@ -29,7 +29,7 @@ if __name__ == "__main__":
# Save locations # Save locations
zip_fn = Path(f"{rootpath}/IGGIELGN.zip") zip_fn = Path(f"{rootpath}/IGGIELGN.zip")
to_fn = Path(f"{rootpath}/data/gas_network/scigrid-gas") to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent
logger.info(f"Downloading databundle from '{url}'.") logger.info(f"Downloading databundle from '{url}'.")
disable_progress = snakemake.config["run"].get("disable_progressbar", False) disable_progress = snakemake.config["run"].get("disable_progressbar", False)

View File

@ -10,23 +10,25 @@ import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import os
import sys
import tarfile import tarfile
from pathlib import Path from pathlib import Path
# Add pypsa-eur scripts to path for import of _helpers
sys.path.insert(0, os.getcwd() + "/../pypsa-eur/scripts")
from _helpers import configure_logging, progress_retrieve from _helpers import configure_logging, progress_retrieve
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("retrieve_databundle")
rootpath = ".."
else:
rootpath = "."
configure_logging(snakemake) configure_logging(snakemake)
url = "https://zenodo.org/record/5824485/files/pypsa-eur-sec-data-bundle.tar.gz" url = "https://zenodo.org/record/5824485/files/pypsa-eur-sec-data-bundle.tar.gz"
tarball_fn = Path("sector-bundle.tar.gz") tarball_fn = Path(f"{rootpath}/sector-bundle.tar.gz")
to_fn = Path("data") to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent
logger.info(f"Downloading databundle from '{url}'.") logger.info(f"Downloading databundle from '{url}'.")
disable_progress = snakemake.config["run"].get("disable_progressbar", False) disable_progress = snakemake.config["run"].get("disable_progressbar", False)

View File

@ -86,22 +86,21 @@ The rule :mod:`simplify_network` does up to four things:
""" """
import logging import logging
from functools import reduce from functools import partial, reduce
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import scipy as sp import scipy as sp
from _helpers import configure_logging, get_aggregation_strategies, update_p_nom_max from _helpers import configure_logging, update_p_nom_max
from add_electricity import load_costs from add_electricity import load_costs
from cluster_network import cluster_regions, clustering_for_n_clusters from cluster_network import cluster_regions, clustering_for_n_clusters
from pypsa.io import import_components_from_dataframe, import_series_from_dataframe from pypsa.clustering.spatial import (
from pypsa.networkclustering import (
aggregategenerators,
aggregateoneport, aggregateoneport,
busmap_by_stubs, busmap_by_stubs,
get_clustering_from_busmap, get_clustering_from_busmap,
) )
from pypsa.io import import_components_from_dataframe, import_series_from_dataframe
from scipy.sparse.csgraph import connected_components, dijkstra from scipy.sparse.csgraph import connected_components, dijkstra
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -149,17 +148,17 @@ def simplify_network_to_380(n):
return n, trafo_map return n, trafo_map
def _prepare_connection_costs_per_link(n, costs, config): def _prepare_connection_costs_per_link(n, costs, renewable_carriers, length_factor):
if n.links.empty: if n.links.empty:
return {} return {}
connection_costs_per_link = {} connection_costs_per_link = {}
for tech in config["renewable"]: for tech in renewable_carriers:
if tech.startswith("offwind"): if tech.startswith("offwind"):
connection_costs_per_link[tech] = ( connection_costs_per_link[tech] = (
n.links.length n.links.length
* config["lines"]["length_factor"] * length_factor
* ( * (
n.links.underwater_fraction n.links.underwater_fraction
* costs.at[tech + "-connection-submarine", "capital_cost"] * costs.at[tech + "-connection-submarine", "capital_cost"]
@ -172,10 +171,18 @@ def _prepare_connection_costs_per_link(n, costs, config):
def _compute_connection_costs_to_bus( def _compute_connection_costs_to_bus(
n, busmap, costs, config, connection_costs_per_link=None, buses=None n,
busmap,
costs,
renewable_carriers,
length_factor,
connection_costs_per_link=None,
buses=None,
): ):
if connection_costs_per_link is None: if connection_costs_per_link is None:
connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config) connection_costs_per_link = _prepare_connection_costs_per_link(
n, costs, renewable_carriers, length_factor
)
if buses is None: if buses is None:
buses = busmap.index[busmap.index != busmap.values] buses = busmap.index[busmap.index != busmap.values]
@ -245,11 +252,15 @@ def _aggregate_and_move_components(
_adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output) _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output)
_, generator_strategies = get_aggregation_strategies(aggregation_strategies) generator_strategies = aggregation_strategies["generators"]
carriers = set(n.generators.carrier) - set(exclude_carriers) carriers = set(n.generators.carrier) - set(exclude_carriers)
generators, generators_pnl = aggregategenerators( generators, generators_pnl = aggregateoneport(
n, busmap, carriers=carriers, custom_strategies=generator_strategies n,
busmap,
"Generator",
carriers=carriers,
custom_strategies=generator_strategies,
) )
replace_components(n, "Generator", generators, generators_pnl) replace_components(n, "Generator", generators, generators_pnl)
@ -265,7 +276,16 @@ def _aggregate_and_move_components(
n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)]) n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)])
def simplify_links(n, costs, config, output, aggregation_strategies=dict()): def simplify_links(
n,
costs,
renewables,
length_factor,
p_max_pu,
exclude_carriers,
output,
aggregation_strategies=dict(),
):
## Complex multi-node links are folded into end-points ## Complex multi-node links are folded into end-points
logger.info("Simplifying connected link components") logger.info("Simplifying connected link components")
@ -315,7 +335,9 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
busmap = n.buses.index.to_series() busmap = n.buses.index.to_series()
connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config) connection_costs_per_link = _prepare_connection_costs_per_link(
n, costs, renewables, length_factor
)
connection_costs_to_bus = pd.DataFrame( connection_costs_to_bus = pd.DataFrame(
0.0, index=n.buses.index, columns=list(connection_costs_per_link) 0.0, index=n.buses.index, columns=list(connection_costs_per_link)
) )
@ -333,12 +355,17 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
) )
busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]] busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]]
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus( connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(
n, busmap, costs, config, connection_costs_per_link, buses n,
busmap,
costs,
renewables,
length_factor,
connection_costs_per_link,
buses,
) )
all_links = [i for _, i in sum(links, [])] all_links = [i for _, i in sum(links, [])]
p_max_pu = config["links"].get("p_max_pu", 1.0)
lengths = n.links.loc[all_links, "length"] lengths = n.links.loc[all_links, "length"]
name = lengths.idxmax() + "+{}".format(len(links) - 1) name = lengths.idxmax() + "+{}".format(len(links) - 1)
params = dict( params = dict(
@ -377,10 +404,6 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
logger.debug("Collecting all components using the busmap") logger.debug("Collecting all components using the busmap")
exclude_carriers = config["clustering"]["simplify_network"].get(
"exclude_carriers", []
)
_aggregate_and_move_components( _aggregate_and_move_components(
n, n,
busmap, busmap,
@ -392,19 +415,23 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
return n, busmap return n, busmap
def remove_stubs(n, costs, config, output, aggregation_strategies=dict()): def remove_stubs(
n,
costs,
renewable_carriers,
length_factor,
simplify_network,
output,
aggregation_strategies=dict(),
):
logger.info("Removing stubs") logger.info("Removing stubs")
across_borders = config["clustering"]["simplify_network"].get( across_borders = simplify_network["remove_stubs_across_borders"]
"remove_stubs_across_borders", True
)
matching_attrs = [] if across_borders else ["country"] matching_attrs = [] if across_borders else ["country"]
busmap = busmap_by_stubs(n, matching_attrs) busmap = busmap_by_stubs(n, matching_attrs)
connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap, costs, config) connection_costs_to_bus = _compute_connection_costs_to_bus(
n, busmap, costs, renewable_carriers, length_factor
exclude_carriers = config["clustering"]["simplify_network"].get(
"exclude_carriers", []
) )
_aggregate_and_move_components( _aggregate_and_move_components(
@ -413,7 +440,7 @@ def remove_stubs(n, costs, config, output, aggregation_strategies=dict()):
connection_costs_to_bus, connection_costs_to_bus,
output, output,
aggregation_strategies=aggregation_strategies, aggregation_strategies=aggregation_strategies,
exclude_carriers=exclude_carriers, exclude_carriers=simplify_network["exclude_carriers"],
) )
return n, busmap return n, busmap
@ -454,45 +481,42 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None):
busmap = n.buses.index.to_series() busmap = n.buses.index.to_series()
busmap.loc[buses_i] = dist.idxmin(1) busmap.loc[buses_i] = dist.idxmin(1)
bus_strategies, generator_strategies = get_aggregation_strategies( line_strategies = aggregation_strategies.get("lines", dict())
aggregation_strategies generator_strategies = aggregation_strategies.get("generators", dict())
) one_port_strategies = aggregation_strategies.get("one_ports", dict())
clustering = get_clustering_from_busmap( clustering = get_clustering_from_busmap(
n, n,
busmap, busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True, aggregate_generators_weighted=True,
aggregate_generators_carriers=None, aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"], aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0, line_length_factor=1.0,
line_strategies=line_strategies,
generator_strategies=generator_strategies, generator_strategies=generator_strategies,
one_port_strategies=one_port_strategies,
scale_link_capital_costs=False, scale_link_capital_costs=False,
) )
return clustering.network, busmap return clustering.network, busmap
def cluster( def cluster(
n, n_clusters, config, algorithm="hac", feature=None, aggregation_strategies=dict() n,
n_clusters,
focus_weights,
solver_name,
algorithm="hac",
feature=None,
aggregation_strategies=dict(),
): ):
logger.info(f"Clustering to {n_clusters} buses") logger.info(f"Clustering to {n_clusters} buses")
focus_weights = config.get("focus_weights", None)
renewable_carriers = pd.Index(
[
tech
for tech in n.generators.carrier.unique()
if tech.split("-", 2)[0] in config["renewable"]
]
)
clustering = clustering_for_n_clusters( clustering = clustering_for_n_clusters(
n, n,
n_clusters, n_clusters,
custom_busmap=False, custom_busmap=False,
aggregation_strategies=aggregation_strategies, aggregation_strategies=aggregation_strategies,
solver_name=config["solving"]["solver"]["name"], solver_name=solver_name,
algorithm=algorithm, algorithm=algorithm,
feature=feature, feature=feature,
focus_weights=focus_weights, focus_weights=focus_weights,
@ -508,92 +532,90 @@ if __name__ == "__main__":
snakemake = mock_snakemake("simplify_network", weather_year="", simpl="") snakemake = mock_snakemake("simplify_network", weather_year="", simpl="")
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network) params = snakemake.params
solver_name = snakemake.config["solving"]["solver"]["name"]
aggregation_strategies = snakemake.config["clustering"].get( n = pypsa.Network(snakemake.input.network)
"aggregation_strategies", {} Nyears = n.snapshot_weightings.objective.sum() / 8760
)
# translate str entries of aggregation_strategies to pd.Series functions:
aggregation_strategies = {
p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()}
for p in aggregation_strategies.keys()
}
n, trafo_map = simplify_network_to_380(n) n, trafo_map = simplify_network_to_380(n)
Nyears = n.snapshot_weightings.objective.sum() / 8760
technology_costs = load_costs( technology_costs = load_costs(
snakemake.input.tech_costs, snakemake.input.tech_costs,
snakemake.config["costs"], params.costs,
snakemake.config["electricity"], params.max_hours,
Nyears, Nyears,
) )
n, simplify_links_map = simplify_links( n, simplify_links_map = simplify_links(
n, technology_costs, snakemake.config, snakemake.output, aggregation_strategies n,
technology_costs,
params.renewable_carriers,
params.length_factor,
params.p_max_pu,
params.simplify_network["exclude_carriers"],
snakemake.output,
params.aggregation_strategies,
) )
busmaps = [trafo_map, simplify_links_map] busmaps = [trafo_map, simplify_links_map]
cluster_config = snakemake.config["clustering"]["simplify_network"] if params.simplify_network["remove_stubs"]:
if cluster_config.get("remove_stubs", True):
n, stub_map = remove_stubs( n, stub_map = remove_stubs(
n, n,
technology_costs, technology_costs,
snakemake.config, params.renewable_carriers,
params.length_factor,
params.simplify_network,
snakemake.output, snakemake.output,
aggregation_strategies=aggregation_strategies, aggregation_strategies=params.aggregation_strategies,
) )
busmaps.append(stub_map) busmaps.append(stub_map)
if cluster_config.get("to_substations", False): if params.simplify_network["to_substations"]:
n, substation_map = aggregate_to_substations(n, aggregation_strategies) n, substation_map = aggregate_to_substations(n, params.aggregation_strategies)
busmaps.append(substation_map) busmaps.append(substation_map)
# treatment of outliers (nodes without a profile for considered carrier): # treatment of outliers (nodes without a profile for considered carrier):
# all nodes that have no profile of the given carrier are being aggregated to closest neighbor # all nodes that have no profile of the given carrier are being aggregated to closest neighbor
if ( if params.simplify_network["algorithm"] == "hac":
snakemake.config.get("clustering", {}) carriers = params.simplify_network["feature"].split("-")[0].split("+")
.get("cluster_network", {})
.get("algorithm", "hac")
== "hac"
or cluster_config.get("algorithm", "hac") == "hac"
):
carriers = (
cluster_config.get("feature", "solar+onwind-time").split("-")[0].split("+")
)
for carrier in carriers: for carrier in carriers:
buses_i = list( buses_i = list(
set(n.buses.index) - set(n.generators.query("carrier == @carrier").bus) set(n.buses.index) - set(n.generators.query("carrier == @carrier").bus)
) )
logger.info( logger.info(
f"clustering preparaton (hac): aggregating {len(buses_i)} buses of type {carrier}." f"clustering preparation (hac): aggregating {len(buses_i)} buses of type {carrier}."
)
n, busmap_hac = aggregate_to_substations(
n, params.aggregation_strategies, buses_i
) )
n, busmap_hac = aggregate_to_substations(n, aggregation_strategies, buses_i)
busmaps.append(busmap_hac) busmaps.append(busmap_hac)
if snakemake.wildcards.simpl: if snakemake.wildcards.simpl:
n, cluster_map = cluster( n, cluster_map = cluster(
n, n,
int(snakemake.wildcards.simpl), int(snakemake.wildcards.simpl),
snakemake.config, params.focus_weights,
cluster_config.get("algorithm", "hac"), solver_name,
cluster_config.get("feature", None), params.simplify_network["algorithm"],
aggregation_strategies, params.simplify_network["feature"],
params.aggregation_strategies,
) )
busmaps.append(cluster_map) busmaps.append(cluster_map)
# some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed # some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed
# and are lost when clustering (for example with the simpl wildcard), we remove them for consistency: # and are lost when clustering (for example with the simpl wildcard), we remove them for consistency:
buses_c = { remove = [
"symbol", "symbol",
"tags", "tags",
"under_construction", "under_construction",
"substation_lv", "substation_lv",
"substation_off", "substation_off",
}.intersection(n.buses.columns) "geometry",
n.buses = n.buses.drop(buses_c, axis=1) ]
n.buses.drop(remove, axis=1, inplace=True, errors="ignore")
n.lines.drop(remove, axis=1, errors="ignore", inplace=True)
update_p_nom_max(n) update_p_nom_max(n)

View File

@ -33,26 +33,21 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import xarray as xr import xarray as xr
from _helpers import ( from _helpers import configure_logging, update_config_with_sector_opts
configure_logging,
override_component_attrs,
update_config_with_sector_opts,
)
from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING) pypsa.pf.logger.setLevel(logging.WARNING)
from pypsa.descriptors import get_switchable_as_dense as get_as_dense from pypsa.descriptors import get_switchable_as_dense as get_as_dense
def add_land_use_constraint(n, config): def add_land_use_constraint(n, planning_horizons, config):
if "m" in snakemake.wildcards.clusters: if "m" in snakemake.wildcards.clusters:
_add_land_use_constraint_m(n, config) _add_land_use_constraint_m(n, planning_horizons, config)
else: else:
_add_land_use_constraint(n, config) _add_land_use_constraint(n)
def _add_land_use_constraint(n, config): def _add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' # warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]: for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
@ -81,10 +76,10 @@ def _add_land_use_constraint(n, config):
n.generators.p_nom_max.clip(lower=0, inplace=True) n.generators.p_nom_max.clip(lower=0, inplace=True)
def _add_land_use_constraint_m(n, config): def _add_land_use_constraint_m(n, planning_horizons, config):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters # if generators clustering is lower than network clustering, land_use accounting is at generators clusters
planning_horizons = config["scenario"]["planning_horizons"] planning_horizons = param["planning_horizons"]
grouping_years = config["existing_capacities"]["grouping_years"] grouping_years = config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons current_horizon = snakemake.wildcards.planning_horizons
@ -142,11 +137,18 @@ def add_co2_sequestration_limit(n, limit=200):
) )
def prepare_network(n, solve_opts=None, config=None): def prepare_network(
n,
solve_opts=None,
config=None,
foresight=None,
planning_horizons=None,
co2_sequestration_potential=None,
):
if "clip_p_max_pu" in solve_opts: if "clip_p_max_pu" in solve_opts:
for df in ( for df in (
n.generators_t.p_max_pu, n.generators_t.p_max_pu,
n.generators_t.p_min_pu, # TODO: check if this can be removed n.generators_t.p_min_pu,
n.storage_units_t.inflow, n.storage_units_t.inflow,
): ):
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True) df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
@ -192,11 +194,11 @@ def prepare_network(n, solve_opts=None, config=None):
n.set_snapshots(n.snapshots[:nhours]) n.set_snapshots(n.snapshots[:nhours])
n.snapshot_weightings[:] = 8760.0 / nhours n.snapshot_weightings[:] = 8760.0 / nhours
if config["foresight"] == "myopic": if foresight == "myopic":
add_land_use_constraint(n, config) add_land_use_constraint(n, planning_horizons, config)
if n.stores.carrier.eq("co2 stored").any(): if n.stores.carrier.eq("co2 stored").any():
limit = config["sector"].get("co2_sequestration_potential", 200) limit = co2_sequestration_potential
add_co2_sequestration_limit(n, limit=limit) add_co2_sequestration_limit(n, limit=limit)
return n return n
@ -229,8 +231,7 @@ def add_CCL_constraints(n, config):
p_nom = n.model["Generator-p_nom"] p_nom = n.model["Generator-p_nom"]
gens = n.generators.query("p_nom_extendable").rename_axis(index="Generator-ext") gens = n.generators.query("p_nom_extendable").rename_axis(index="Generator-ext")
grouper = [gens.bus.map(n.buses.country), gens.carrier] grouper = pd.concat([gens.bus.map(n.buses.country), gens.carrier])
grouper = xr.DataArray(pd.MultiIndex.from_arrays(grouper), dims=["Generator-ext"])
lhs = p_nom.groupby(grouper).sum().rename(bus="country") lhs = p_nom.groupby(grouper).sum().rename(bus="country")
minimum = xr.DataArray(agg_p_nom_minmax["min"].dropna()).rename(dim_0="group") minimum = xr.DataArray(agg_p_nom_minmax["min"].dropna()).rename(dim_0="group")
@ -275,13 +276,13 @@ def add_EQ_constraints(n, o, scaling=1e-1):
float_regex = "[0-9]*\.?[0-9]+" float_regex = "[0-9]*\.?[0-9]+"
level = float(re.findall(float_regex, o)[0]) level = float(re.findall(float_regex, o)[0])
if o[-1] == "c": if o[-1] == "c":
ggrouper = n.generators.bus.map(n.buses.country).to_xarray() ggrouper = n.generators.bus.map(n.buses.country)
lgrouper = n.loads.bus.map(n.buses.country).to_xarray() lgrouper = n.loads.bus.map(n.buses.country)
sgrouper = n.storage_units.bus.map(n.buses.country).to_xarray() sgrouper = n.storage_units.bus.map(n.buses.country)
else: else:
ggrouper = n.generators.bus.to_xarray() ggrouper = n.generators.bus
lgrouper = n.loads.bus.to_xarray() lgrouper = n.loads.bus
sgrouper = n.storage_units.bus.to_xarray() sgrouper = n.storage_units.bus
load = ( load = (
n.snapshot_weightings.generators n.snapshot_weightings.generators
@ n.loads_t.p_set.groupby(lgrouper, axis=1).sum() @ n.loads_t.p_set.groupby(lgrouper, axis=1).sum()
@ -295,7 +296,7 @@ def add_EQ_constraints(n, o, scaling=1e-1):
p = n.model["Generator-p"] p = n.model["Generator-p"]
lhs_gen = ( lhs_gen = (
(p * (n.snapshot_weightings.generators * scaling)) (p * (n.snapshot_weightings.generators * scaling))
.groupby(ggrouper) .groupby(ggrouper.to_xarray())
.sum() .sum()
.sum("snapshot") .sum("snapshot")
) )
@ -304,7 +305,7 @@ def add_EQ_constraints(n, o, scaling=1e-1):
spillage = n.model["StorageUnit-spill"] spillage = n.model["StorageUnit-spill"]
lhs_spill = ( lhs_spill = (
(spillage * (-n.snapshot_weightings.stores * scaling)) (spillage * (-n.snapshot_weightings.stores * scaling))
.groupby(sgrouper) .groupby(sgrouper.to_xarray())
.sum() .sum()
.sum("snapshot") .sum("snapshot")
) )
@ -373,13 +374,14 @@ def add_SAFE_constraints(n, config):
peakdemand = n.loads_t.p_set.sum(axis=1).max() peakdemand = n.loads_t.p_set.sum(axis=1).max()
margin = 1.0 + config["electricity"]["SAFE_reservemargin"] margin = 1.0 + config["electricity"]["SAFE_reservemargin"]
reserve_margin = peakdemand * margin reserve_margin = peakdemand * margin
# TODO: do not take this from the plotting config! conventional_carriers = config["electricity"]["conventional_carriers"]
conv_techs = config["plotting"]["conv_techs"] ext_gens_i = n.generators.query(
ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index "carrier in @conventional_carriers & p_nom_extendable"
).index
p_nom = n.model["Generator-p_nom"].loc[ext_gens_i] p_nom = n.model["Generator-p_nom"].loc[ext_gens_i]
lhs = p_nom.sum() lhs = p_nom.sum()
exist_conv_caps = n.generators.query( exist_conv_caps = n.generators.query(
"~p_nom_extendable & carrier in @conv_techs" "~p_nom_extendable & carrier in @conventional_carriers"
).p_nom.sum() ).p_nom.sum()
rhs = reserve_margin - exist_conv_caps rhs = reserve_margin - exist_conv_caps
n.model.add_constraints(lhs >= rhs, name="safe_mintotalcap") n.model.add_constraints(lhs >= rhs, name="safe_mintotalcap")
@ -591,16 +593,15 @@ def extra_functionality(n, snapshots):
add_pipe_retrofit_constraint(n) add_pipe_retrofit_constraint(n)
def solve_network(n, config, opts="", **kwargs): def solve_network(n, config, solving, opts="", **kwargs):
set_of_options = config["solving"]["solver"]["options"] set_of_options = solving["solver"]["options"]
solver_options = ( solver_options = solving["solver_options"][set_of_options] if set_of_options else {}
config["solving"]["solver_options"][set_of_options] if set_of_options else {} solver_name = solving["solver"]["name"]
) cf_solving = solving["options"]
solver_name = config["solving"]["solver"]["name"]
cf_solving = config["solving"]["options"]
track_iterations = cf_solving.get("track_iterations", False) track_iterations = cf_solving.get("track_iterations", False)
min_iterations = cf_solving.get("min_iterations", 4) min_iterations = cf_solving.get("min_iterations", 4)
max_iterations = cf_solving.get("max_iterations", 6) max_iterations = cf_solving.get("max_iterations", 6)
transmission_losses = cf_solving.get("transmission_losses", 0)
# add to network for extra_functionality # add to network for extra_functionality
n.config = config n.config = config
@ -614,6 +615,7 @@ def solve_network(n, config, opts="", **kwargs):
if skip_iterations: if skip_iterations:
status, condition = n.optimize( status, condition = n.optimize(
solver_name=solver_name, solver_name=solver_name,
transmission_losses=transmission_losses,
extra_functionality=extra_functionality, extra_functionality=extra_functionality,
**solver_options, **solver_options,
**kwargs, **kwargs,
@ -624,6 +626,7 @@ def solve_network(n, config, opts="", **kwargs):
track_iterations=track_iterations, track_iterations=track_iterations,
min_iterations=min_iterations, min_iterations=min_iterations,
max_iterations=max_iterations, max_iterations=max_iterations,
transmission_losses=transmission_losses,
extra_functionality=extra_functionality, extra_functionality=extra_functionality,
**solver_options, **solver_options,
**kwargs, **kwargs,
@ -664,27 +667,28 @@ if __name__ == "__main__":
if "sector_opts" in snakemake.wildcards.keys(): if "sector_opts" in snakemake.wildcards.keys():
opts += "-" + snakemake.wildcards.sector_opts opts += "-" + snakemake.wildcards.sector_opts
opts = [o for o in opts.split("-") if o != ""] opts = [o for o in opts.split("-") if o != ""]
solve_opts = snakemake.config["solving"]["options"] solve_opts = snakemake.params.solving["options"]
np.random.seed(solve_opts.get("seed", 123)) np.random.seed(solve_opts.get("seed", 123))
fn = getattr(snakemake.log, "memory", None) n = pypsa.Network(snakemake.input.network)
with memory_logger(filename=fn, interval=30.0) as mem:
if "overrides" in snakemake.input.keys():
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(
snakemake.input.network, override_component_attrs=overrides
)
else:
n = pypsa.Network(snakemake.input.network)
n = prepare_network(n, solve_opts, config=snakemake.config) n = prepare_network(
n,
solve_opts,
config=snakemake.config,
foresight=snakemake.params.foresight,
planning_horizons=snakemake.params.planning_horizons,
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
)
n = solve_network( n = solve_network(
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver n,
) config=snakemake.config,
solving=snakemake.params.solving,
opts=opts,
log_fn=snakemake.log.solver,
)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])
logger.info("Maximum memory usage: {}".format(mem.mem_usage))

View File

@ -11,13 +11,8 @@ import logging
import numpy as np import numpy as np
import pypsa import pypsa
from _helpers import ( from _helpers import configure_logging, update_config_with_sector_opts
configure_logging,
override_component_attrs,
update_config_with_sector_opts,
)
from solve_network import prepare_network, solve_network from solve_network import prepare_network, solve_network
from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -43,27 +38,17 @@ if __name__ == "__main__":
opts = (snakemake.wildcards.opts + "-" + snakemake.wildcards.sector_opts).split("-") opts = (snakemake.wildcards.opts + "-" + snakemake.wildcards.sector_opts).split("-")
opts = [o for o in opts if o != ""] opts = [o for o in opts if o != ""]
solve_opts = snakemake.config["solving"]["options"] solve_opts = snakemake.params.options
np.random.seed(solve_opts.get("seed", 123)) np.random.seed(solve_opts.get("seed", 123))
fn = getattr(snakemake.log, "memory", None) n = pypsa.Network(snakemake.input.network)
with memory_logger(filename=fn, interval=30.0) as mem:
if "overrides" in snakemake.input:
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(
snakemake.input.network, override_component_attrs=overrides
)
else:
n = pypsa.Network(snakemake.input.network)
n.optimize.fix_optimal_capacities() n.optimize.fix_optimal_capacities()
n = prepare_network(n, solve_opts, config=snakemake.config) n = prepare_network(n, solve_opts, config=snakemake.config)
n = solve_network( n = solve_network(
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
) )
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])
logger.info("Maximum memory usage: {}".format(mem.mem_usage))