merge master

This commit is contained in:
Fabian Neumann 2023-01-06 18:45:36 +01:00
commit 1ebe53cdac
95 changed files with 5959 additions and 3111 deletions

7
.git-blame-ignore-revs Normal file
View File

@ -0,0 +1,7 @@
# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
# Exclude pre-commit applications
5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0
92080b1cd2ca5f123158571481722767b99c2b27

2
.gitattributes vendored
View File

@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0

View File

@ -1,5 +1,5 @@
blank_issues_enabled: false blank_issues_enabled: false
contact_links: contact_links:
- name: PyPSA Mailing List - name: PyPSA Mailing List
url: https://groups.google.com/forum/#!forum/pypsa url: https://groups.google.com/forum/#!forum/pypsa
about: Please ask and answer general usage questions here. about: Please ask and answer general usage questions here.

View File

@ -16,7 +16,7 @@ on:
branches: branches:
- master - master
schedule: schedule:
- cron: "0 5 * * TUE" - cron: "0 5 * * TUE"
env: env:
CACHE_NUMBER: 1 # Change this value to manually reset the environment cache CACHE_NUMBER: 1 # Change this value to manually reset the environment cache
@ -28,17 +28,17 @@ jobs:
matrix: matrix:
include: include:
# Matrix required to handle caching with Mambaforge # Matrix required to handle caching with Mambaforge
- os: ubuntu-latest - os: ubuntu-latest
label: ubuntu-latest label: ubuntu-latest
prefix: /usr/share/miniconda3/envs/pypsa-eur prefix: /usr/share/miniconda3/envs/pypsa-eur
- os: macos-latest - os: macos-latest
label: macos-latest label: macos-latest
prefix: /Users/runner/miniconda3/envs/pypsa-eur prefix: /Users/runner/miniconda3/envs/pypsa-eur
- os: windows-latest - os: windows-latest
label: windows-latest label: windows-latest
prefix: C:\Miniconda3\envs\pypsa-eur prefix: C:\Miniconda3\envs\pypsa-eur
name: ${{ matrix.label }} name: ${{ matrix.label }}
@ -49,42 +49,52 @@ jobs:
shell: bash -l {0} shell: bash -l {0}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Setup secrets - name: Setup secrets
run: | run: |
echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
- name: Add solver to environment - name: Add solver to environment
run: | run: |
echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml echo -e "- glpk\n- ipopt" >> envs/environment.yaml
- name: Setup Mambaforge - name: Add solver to environment
uses: conda-incubator/setup-miniconda@v2 run: |
with: echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml
miniforge-variant: Mambaforge if: ${{ matrix.label }} == 'windows-latest'
miniforge-version: latest
activate-environment: pypsa-eur
use-mamba: true
- name: Set cache date - name: Add solver to environment
run: echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV run: |
echo -e "- glpk\n- ipopt" >> envs/environment.yaml
if: ${{ matrix.label }} != 'windows-latest'
- name: Create environment cache - name: Setup Mambaforge
uses: actions/cache@v2 uses: conda-incubator/setup-miniconda@v2
id: cache with:
with: miniforge-variant: Mambaforge
path: ${{ matrix.prefix }} miniforge-version: latest
key: ${{ matrix.label }}-conda-${{ hashFiles('envs/environment.yaml') }}-${{ env.DATE }}-${{ env.CACHE_NUMBER }} activate-environment: pypsa-eur
use-mamba: true
- name: Update environment due to outdated or unavailable cache - name: Set cache date
run: mamba env update -n pypsa-eur -f envs/environment.yaml run: echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV
if: steps.cache.outputs.cache-hit != 'true'
- name: Test snakemake workflow - name: Create environment cache
run: | uses: actions/cache@v2
conda activate pypsa-eur id: cache
conda list with:
cp test/config.test1.yaml config.yaml path: ${{ matrix.prefix }}
snakemake --cores all solve_all_networks key: ${{ matrix.label }}-conda-${{ hashFiles('envs/environment.yaml') }}-${{ env.DATE }}-${{ env.CACHE_NUMBER }}
rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results
- name: Update environment due to outdated or unavailable cache
run: mamba env update -n pypsa-eur -f envs/environment.yaml
if: steps.cache.outputs.cache-hit != 'true'
- name: Test snakemake workflow
run: |
conda activate pypsa-eur
conda list
cp test/config.test1.yaml config.yaml
snakemake --cores all solve_all_networks
rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results

3
.gitignore vendored
View File

@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
@ -19,6 +19,7 @@ gurobi.log
/data /data
/data/links_p_nom.csv /data/links_p_nom.csv
/cutouts /cutouts
/dask-worker-space
doc/_build doc/_build

92
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,92 @@
# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
exclude: "^LICENSES"
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-merge-conflict
- id: end-of-file-fixer
- id: fix-encoding-pragma
- id: mixed-line-ending
- id: trailing-whitespace
- id: check-added-large-files
args: ["--maxkb=2000"]
# Sort package imports alphabetically
- repo: https://github.com/PyCQA/isort
rev: 5.11.4
hooks:
- id: isort
args: ["--profile", "black", "--filter-files"]
# Convert relative imports to absolute imports
- repo: https://github.com/MarcoGorelli/absolufy-imports
rev: v0.3.1
hooks:
- id: absolufy-imports
# Find common spelling mistakes in comments and docstrings
- repo: https://github.com/codespell-project/codespell
rev: v2.2.2
hooks:
- id: codespell
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom'] # Ignore capital case words, e.g. country codes
types_or: [python, rst, markdown]
files: ^(scripts|doc)/
# Make docstrings PEP 257 compliant
- repo: https://github.com/PyCQA/docformatter
rev: v1.5.1
hooks:
- id: docformatter
args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"]
- repo: https://github.com/keewis/blackdoc
rev: v0.3.8
hooks:
- id: blackdoc
# Formatting with "black" coding style
- repo: https://github.com/psf/black
rev: 22.12.0
hooks:
# Format Python files
- id: black
# Format Jupyter Python notebooks
- id: black-jupyter
# Remove output from Jupyter notebooks
- repo: https://github.com/aflc/pre-commit-jupyter
rev: v1.2.1
hooks:
- id: jupyter-notebook-cleanup
args: ["--remove-kernel-metadata"]
# Do YAML formatting (before the linter checks it for misses)
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
rev: v2.5.0
hooks:
- id: pretty-format-yaml
args: [--autofix, --indent, "2", --preserve-quotes]
# Format Snakemake rule / workflow files
- repo: https://github.com/snakemake/snakefmt
rev: v0.8.0
hooks:
- id: snakefmt
# For cleaning jupyter notebooks
- repo: https://github.com/aflc/pre-commit-jupyter
rev: v1.2.1
hooks:
- id: jupyter-notebook-cleanup
exclude: examples/solve-on-remote.ipynb
# Check for FSFE REUSE compliance (licensing)
- repo: https://github.com/fsfe/reuse-tool
rev: v1.1.0
hooks:
- id: reuse

View File

@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
@ -7,5 +7,5 @@ version: 2
python: python:
version: 3.8 version: 3.8
install: install:
- requirements: doc/requirements.txt - requirements: doc/requirements.txt
system_packages: true system_packages: true

View File

@ -4,15 +4,15 @@ Upstream-Contact: Tom Brown <tom.brown@kit.edu>
Source: https://github.com/pypsa/pypsa-eur Source: https://github.com/pypsa/pypsa-eur
Files: doc/img/* Files: doc/img/*
Copyright: 2019 Fabian Neumann (KIT) Copyright: 2019 Fabian Neumann (TUB, KIT)
License: CC-BY-4.0 License: CC-BY-4.0
Files: doc/configtables/* Files: doc/configtables/*
Copyright: 2019 Fabian Neumann (KIT) Copyright: 2019 Fabian Neumann (TUB, KIT)
License: CC-BY-4.0 License: CC-BY-4.0
Files: data/* Files: data/*
Copyright: 2017-2020 The PyPSA-Eur Authors Copyright: 2017-2022 The PyPSA-Eur Authors
License: CC-BY-4.0 License: CC-BY-4.0
Files: .github/* Files: .github/*
@ -20,9 +20,9 @@ Copyright: 2019 The PyPSA-Eur Authors
License: CC0-1.0 License: CC0-1.0
Files: matplotlibrc Files: matplotlibrc
Copyright: : 2017-2020 The PyPSA-Eur Authors Copyright: : 2017-2022 The PyPSA-Eur Authors
License: CC0-1.0 License: CC0-1.0
Files: borg-it Files: borg-it
Copyright: : 2017-2020 The PyPSA-Eur Authors Copyright: : 2017-2022 The PyPSA-Eur Authors
License: CC0-1.0 License: CC0-1.0

View File

@ -6,7 +6,7 @@ cff-version: 1.1.0
message: "If you use this package, please cite the corresponding manuscript in Energy Strategy Reviews." message: "If you use this package, please cite the corresponding manuscript in Energy Strategy Reviews."
title: "PyPSA-Eur: An open optimisation model of the European transmission system" title: "PyPSA-Eur: An open optimisation model of the European transmission system"
repository: https://github.com/pypsa/pypsa-eur repository: https://github.com/pypsa/pypsa-eur
version: 0.4.0 version: 0.6.1
license: MIT license: MIT
journal: Energy Strategy Reviews journal: Energy Strategy Reviews
doi: 10.1016/j.esr.2018.08.012 doi: 10.1016/j.esr.2018.08.012

View File

@ -1,6 +1,6 @@
MIT License MIT License
Copyright 2017-2021 The PyPSA-Eur Authors Copyright 2017-2022 The PyPSA-Eur Authors
Permission is hereby granted, free of charge, to any person obtaining a copy of Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in this software and associated documentation files (the "Software"), to deal in

View File

@ -1,5 +1,5 @@
<!-- <!--
SPDX-FileCopyrightText: 2017-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2017-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
--> -->
@ -8,7 +8,6 @@ SPDX-License-Identifier: CC-BY-4.0
[![Documentation](https://readthedocs.org/projects/pypsa-eur/badge/?version=latest)](https://pypsa-eur.readthedocs.io/en/latest/?badge=latest) [![Documentation](https://readthedocs.org/projects/pypsa-eur/badge/?version=latest)](https://pypsa-eur.readthedocs.io/en/latest/?badge=latest)
![Size](https://img.shields.io/github/repo-size/pypsa/pypsa-eur) ![Size](https://img.shields.io/github/repo-size/pypsa/pypsa-eur)
[![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg)](https://doi.org/10.5281/zenodo.3520874) [![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg)](https://doi.org/10.5281/zenodo.3520874)
[![Gitter](https://badges.gitter.im/PyPSA/community.svg)](https://gitter.im/PyPSA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
[![Snakemake](https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io) [![Snakemake](https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)
[![REUSE status](https://api.reuse.software/badge/github.com/pypsa/pypsa-eur)](https://api.reuse.software/info/github.com/pypsa/pypsa-eur) [![REUSE status](https://api.reuse.software/badge/github.com/pypsa/pypsa-eur)](https://api.reuse.software/info/github.com/pypsa/pypsa-eur)
@ -38,9 +37,7 @@ curtailment. We recommend to cluster the network to a couple of
hundred nodes to remove these local inconsistencies. See the hundred nodes to remove these local inconsistencies. See the
discussion in Section 3.4 "Model validation" of the paper. discussion in Section 3.4 "Model validation" of the paper.
![PyPSA-Eur Grid Model](doc/img/base.png) ![PyPSA-Eur Grid Model](doc/img/elec.png)
![PyPSA-Eur Grid Model Simplified](doc/img/elec_s_X.png)
The model building routines are defined through a snakemake workflow. The model is designed to be imported into the open toolbox The model building routines are defined through a snakemake workflow. The model is designed to be imported into the open toolbox
[PyPSA](https://github.com/PyPSA/PyPSA) for operational studies as [PyPSA](https://github.com/PyPSA/PyPSA) for operational studies as

763
Snakefile
View File

@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -6,65 +6,101 @@ from os.path import normpath, exists
from shutil import copyfile, move from shutil import copyfile, move
from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider
HTTP = HTTPRemoteProvider() HTTP = HTTPRemoteProvider()
if not exists("config.yaml"): if not exists("config.yaml"):
copyfile("config.default.yaml", "config.yaml") copyfile("config.default.yaml", "config.yaml")
configfile: "config.yaml" configfile: "config.yaml"
COSTS="data/costs.csv"
ATLITE_NPROCESSES = config['atlite'].get('nprocesses', 4) run = config.get("run", {})
RDIR = run["name"] + "/" if run.get("name") else ""
CDIR = RDIR if not run.get("shared_cutouts") else ""
COSTS = "resources/" + RDIR + "costs.csv"
ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4)
wildcard_constraints: wildcard_constraints:
simpl="[a-zA-Z0-9]*|all", simpl="[a-zA-Z0-9]*|all",
clusters="[0-9]+m?|all", clusters="[0-9]+m?|all",
ll="(v|c)([0-9\.]+|opt|all)|all", ll="(v|c)([0-9\.]+|opt|all)|all",
opts="[-+a-zA-Z0-9\.]*" opts="[-+a-zA-Z0-9\.]*",
rule cluster_all_networks: rule cluster_all_networks:
input: expand("networks/elec_s{simpl}_{clusters}.nc", **config['scenario']) input:
expand("networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", **config["scenario"]),
rule extra_components_all_networks: rule extra_components_all_networks:
input: expand("networks/elec_s{simpl}_{clusters}_ec.nc", **config['scenario']) input:
expand(
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", **config["scenario"]
),
rule prepare_all_networks: rule prepare_all_networks:
input: expand("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) input:
expand(
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
**config["scenario"]
),
rule solve_all_networks: rule solve_all_networks:
input: expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) input:
expand(
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
**config["scenario"]
),
if config['enable'].get('prepare_links_p_nom', False): if config["enable"].get("prepare_links_p_nom", False):
rule prepare_links_p_nom: rule prepare_links_p_nom:
output: 'data/links_p_nom.csv' output:
log: 'logs/prepare_links_p_nom.log' "data/links_p_nom.csv",
log:
"logs/" + RDIR + "prepare_links_p_nom.log",
threads: 1 threads: 1
resources: mem_mb=500 resources:
script: 'scripts/prepare_links_p_nom.py' mem_mb=1500,
script:
"scripts/prepare_links_p_nom.py"
datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls', datafiles = [
'eez/World_EEZ_v8_2014.shp', 'EIA_hydro_generation_2000_2014.csv', "ch_cantons.csv",
'hydro_capacities.csv', 'naturalearth/ne_10m_admin_0_countries.shp', "je-e-21.03.02.xls",
'NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', 'nama_10r_3popgdp.tsv.gz', "eez/World_EEZ_v8_2014.shp",
'nama_10r_3gdp.tsv.gz', 'corine/g250_clc06_V18_5.tif'] "hydro_capacities.csv",
"naturalearth/ne_10m_admin_0_countries.shp",
"NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp",
"nama_10r_3popgdp.tsv.gz",
"nama_10r_3gdp.tsv.gz",
"corine/g250_clc06_V18_5.tif",
]
if not config.get('tutorial', False): if not config.get("tutorial", False):
datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"]) datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"])
if config['enable'].get('retrieve_databundle', True): if config["enable"].get("retrieve_databundle", True):
rule retrieve_databundle: rule retrieve_databundle:
output: expand('data/bundle/{file}', file=datafiles) output:
log: "logs/retrieve_databundle.log" expand("data/bundle/{file}", file=datafiles),
script: 'scripts/retrieve_databundle.py' log:
"logs/" + RDIR + "retrieve_databundle.log",
resources:
mem_mb=1000,
script:
"scripts/retrieve_databundle.py"
# Downloading Copernicus Global Land Cover for land cover and land use: # Downloading Copernicus Global Land Cover for land cover and land use:
# Website: https://land.copernicus.eu/global/products/lc # Website: https://land.copernicus.eu/global/products/lc
@ -104,118 +140,232 @@ rule determine_availability_matrix_MD_UA:
"scripts/determine_availability_matrix_MD_UA.py.ipynb" "scripts/determine_availability_matrix_MD_UA.py.ipynb"
rule retrieve_load_data: rule retrieve_load_data:
input: HTTP.remote("data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv", keep_local=True, static=True) input:
output: "data/load_raw.csv" HTTP.remote(
run: move(input[0], output[0]) "data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv",
keep_local=True,
static=True,
),
output:
"data/load_raw.csv",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
rule build_load_data: rule build_load_data:
input: "data/load_raw.csv" input:
output: "resources/load.csv" "data/load_raw.csv",
log: "logs/build_load_data.log" output:
script: 'scripts/build_load_data.py' "resources/" + RDIR + "load.csv",
log:
"logs/" + RDIR + "build_load_data.log",
resources:
mem_mb=5000,
script:
"scripts/build_load_data.py"
rule build_powerplants: rule build_powerplants:
input: input:
base_network="networks/base.nc", base_network="networks/" + RDIR + "base.nc",
custom_powerplants="data/custom_powerplants.csv" custom_powerplants="data/custom_powerplants.csv",
output: "resources/powerplants.csv" output:
log: "logs/build_powerplants.log" "resources/" + RDIR + "powerplants.csv",
log:
"logs/" + RDIR + "build_powerplants.log",
threads: 1 threads: 1
resources: mem_mb=500 resources:
script: "scripts/build_powerplants.py" mem_mb=5000,
script:
"scripts/build_powerplants.py"
rule base_network: rule base_network:
input: input:
eg_buses='data/entsoegridkit/buses.csv', eg_buses="data/entsoegridkit/buses.csv",
eg_lines='data/entsoegridkit/lines.csv', eg_lines="data/entsoegridkit/lines.csv",
eg_links='data/entsoegridkit/links.csv', eg_links="data/entsoegridkit/links.csv",
eg_converters='data/entsoegridkit/converters.csv', eg_converters="data/entsoegridkit/converters.csv",
eg_transformers='data/entsoegridkit/transformers.csv', eg_transformers="data/entsoegridkit/transformers.csv",
parameter_corrections='data/parameter_corrections.yaml', parameter_corrections="data/parameter_corrections.yaml",
links_p_nom='data/links_p_nom.csv', links_p_nom="data/links_p_nom.csv",
links_tyndp='data/links_tyndp.csv', links_tyndp="data/links_tyndp.csv",
country_shapes='resources/country_shapes.geojson', country_shapes="resources/" + RDIR + "country_shapes.geojson",
offshore_shapes='resources/offshore_shapes.geojson', offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
europe_shape='resources/europe_shape.geojson' europe_shape="resources/" + RDIR + "europe_shape.geojson",
output: "networks/base.nc" output:
log: "logs/base_network.log" "networks/" + RDIR + "base.nc",
benchmark: "benchmarks/base_network" log:
"logs/" + RDIR + "base_network.log",
benchmark:
"benchmarks/" + RDIR + "base_network"
threads: 1 threads: 1
resources: mem_mb=500 resources:
script: "scripts/base_network.py" mem_mb=1500,
script:
"scripts/base_network.py"
rule build_shapes: rule build_shapes:
input: input:
naturalearth='data/bundle/naturalearth/ne_10m_admin_0_countries.shp', naturalearth="data/bundle/naturalearth/ne_10m_admin_0_countries.shp",
eez='data/bundle/eez/World_EEZ_v8_2014.shp', eez="data/bundle/eez/World_EEZ_v8_2014.shp",
nuts3='data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', nuts3="data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp",
nuts3pop='data/bundle/nama_10r_3popgdp.tsv.gz', nuts3pop="data/bundle/nama_10r_3popgdp.tsv.gz",
nuts3gdp='data/bundle/nama_10r_3gdp.tsv.gz', nuts3gdp="data/bundle/nama_10r_3gdp.tsv.gz",
ch_cantons='data/bundle/ch_cantons.csv', ch_cantons="data/bundle/ch_cantons.csv",
ch_popgdp='data/bundle/je-e-21.03.02.xls' ch_popgdp="data/bundle/je-e-21.03.02.xls",
output: output:
country_shapes='resources/country_shapes.geojson', country_shapes="resources/" + RDIR + "country_shapes.geojson",
offshore_shapes='resources/offshore_shapes.geojson', offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
europe_shape='resources/europe_shape.geojson', europe_shape="resources/" + RDIR + "europe_shape.geojson",
nuts3_shapes='resources/nuts3_shapes.geojson' nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson",
log: "logs/build_shapes.log" log:
"logs/" + RDIR + "build_shapes.log",
threads: 1 threads: 1
resources: mem_mb=500 resources:
script: "scripts/build_shapes.py" mem_mb=1500,
script:
"scripts/build_shapes.py"
rule build_bus_regions: rule build_bus_regions:
input: input:
country_shapes='resources/country_shapes.geojson', country_shapes="resources/" + RDIR + "country_shapes.geojson",
offshore_shapes='resources/offshore_shapes.geojson', offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
base_network="networks/base.nc" base_network="networks/" + RDIR + "base.nc",
output: output:
regions_onshore="resources/regions_onshore.geojson", regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
regions_offshore="resources/regions_offshore.geojson" regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
log: "logs/build_bus_regions.log" log:
"logs/" + RDIR + "build_bus_regions.log",
threads: 1 threads: 1
resources: mem_mb=1000 resources:
script: "scripts/build_bus_regions.py" mem_mb=1000,
script:
"scripts/build_bus_regions.py"
if config["enable"].get("build_cutout", False):
if config['enable'].get('build_cutout', False):
rule build_cutout: rule build_cutout:
input: input:
regions_onshore="resources/regions_onshore.geojson", regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
regions_offshore="resources/regions_offshore.geojson" regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
output: "cutouts/{cutout}.nc" output:
log: "logs/build_cutout/{cutout}.log" "cutouts/" + CDIR + "{cutout}.nc",
benchmark: "benchmarks/build_cutout_{cutout}" log:
"logs/" + CDIR + "build_cutout/{cutout}.log",
benchmark:
"benchmarks/" + CDIR + "build_cutout_{cutout}"
threads: ATLITE_NPROCESSES threads: ATLITE_NPROCESSES
resources: mem_mb=ATLITE_NPROCESSES * 1000 resources:
script: "scripts/build_cutout.py" mem_mb=ATLITE_NPROCESSES * 1000,
script:
"scripts/build_cutout.py"
if config['enable'].get('retrieve_cutout', True): if config["enable"].get("retrieve_cutout", True):
rule retrieve_cutout: rule retrieve_cutout:
input: HTTP.remote("zenodo.org/record/6350001/files/{cutout}.nc", keep_local=True, static=True) input:
output: "cutouts/{cutout}.nc" HTTP.remote(
run: move(input[0], output[0]) "zenodo.org/record/6350001/files/{cutout}.nc",
keep_local=True,
static=True,
),
output:
"cutouts/" + CDIR + "{cutout}.nc",
log:
"logs/" + CDIR + "retrieve_cutout_{cutout}.log",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
if config['enable'].get('build_natura_raster', False): if config["enable"].get("retrieve_cost_data", True):
rule retrieve_cost_data:
input:
HTTP.remote(
f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs_{config['costs']['year']}.csv",
keep_local=True,
),
output:
COSTS,
log:
"logs/" + RDIR + "retrieve_cost_data.log",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
if config["enable"].get("build_natura_raster", False):
rule build_natura_raster: rule build_natura_raster:
input: input:
natura="data/bundle/natura/Natura2000_end2015.shp", natura="data/bundle/natura/Natura2000_end2015.shp",
cutouts=expand("cutouts/{cutouts}.nc", **config['atlite']) cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]),
output: "resources/natura.tiff" output:
log: "logs/build_natura_raster.log" "resources/" + RDIR + "natura.tiff",
script: "scripts/build_natura_raster.py" resources:
mem_mb=5000,
log:
"logs/" + RDIR + "build_natura_raster.log",
script:
"scripts/build_natura_raster.py"
if config['enable'].get('retrieve_natura_raster', True): if config["enable"].get("retrieve_natura_raster", True):
rule retrieve_natura_raster: rule retrieve_natura_raster:
input: HTTP.remote("zenodo.org/record/4706686/files/natura.tiff", keep_local=True, static=True) input:
output: "resources/natura.tiff" HTTP.remote(
run: move(input[0], output[0]) "zenodo.org/record/4706686/files/natura.tiff",
keep_local=True,
static=True,
),
output:
"resources/" + RDIR + "natura.tiff",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
rule retrieve_ship_raster:
input:
HTTP.remote(
"https://zenodo.org/record/6953563/files/shipdensity_global.zip",
keep_local=True,
static=True,
),
output:
"data/shipdensity_global.zip",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
rule build_ship_raster:
input:
ship_density="data/shipdensity_global.zip",
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]),
output:
"resources/" + RDIR + "shipdensity_raster.nc",
log:
"logs/" + RDIR + "build_ship_raster.log",
resources:
mem_mb=5000,
benchmark:
"benchmarks/" + RDIR + "build_ship_raster"
script:
"scripts/build_ship_raster.py"
# Optional input when having Ukraine (UA) or Moldova (MD) in the countries list # Optional input when having Ukraine (UA) or Moldova (MD) in the countries list
@ -228,136 +378,207 @@ else:
rule build_renewable_profiles: rule build_renewable_profiles:
input: input:
base_network="networks/base.nc", base_network="networks/" + RDIR + "base.nc",
corine="data/bundle/corine/g250_clc06_V18_5.tif", corine="data/bundle/corine/g250_clc06_V18_5.tif",
natura="resources/natura.tiff", natura=lambda w: (
gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc" "resources/" + RDIR + "natura.tiff"
if "max_depth" in config["renewable"][w.technology].keys() if config["renewable"][w.technology]["natura"]
else []), else []
country_shapes='resources/country_shapes.geojson', ),
offshore_shapes='resources/offshore_shapes.geojson', gebco=lambda w: (
regions=lambda w: ("resources/regions_onshore.geojson" "data/bundle/GEBCO_2014_2D.nc"
if w.technology in ('onwind', 'solar') if "max_depth" in config["renewable"][w.technology].keys()
else "resources/regions_offshore.geojson"), else []
cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] + ".nc", ),
**opt, ship_density=lambda w: (
output: profile="resources/profile_{technology}.nc", "resources/" + RDIR + "shipdensity_raster.nc"
log: "logs/build_renewable_profile_{technology}.log" if "ship_threshold" in config["renewable"][w.technology].keys()
benchmark: "benchmarks/build_renewable_profiles_{technology}" else []
),
country_shapes="resources/" + RDIR + "country_shapes.geojson",
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
regions=lambda w: (
"resources/" + RDIR + "regions_onshore.geojson"
if w.technology in ("onwind", "solar")
else "resources/" + RDIR + "regions_offshore.geojson"
),
cutout=lambda w: "cutouts/"
+ CDIR
+ config["renewable"][w.technology]["cutout"]
+ ".nc",
output:
profile="resources/" + RDIR + "profile_{technology}.nc",
log:
"logs/" + RDIR + "build_renewable_profile_{technology}.log",
benchmark:
"benchmarks/" + RDIR + "build_renewable_profiles_{technology}"
threads: ATLITE_NPROCESSES threads: ATLITE_NPROCESSES
resources: mem_mb=ATLITE_NPROCESSES * 5000 resources:
script: "scripts/build_renewable_profiles.py" mem_mb=ATLITE_NPROCESSES * 5000,
wildcard_constraints:
technology="(?!hydro).*", # Any technology other than hydro
script:
"scripts/build_renewable_profiles.py"
if 'hydro' in config['renewable'].keys(): rule build_hydro_profile:
rule build_hydro_profile: input:
input: country_shapes="resources/" + RDIR + "country_shapes.geojson",
country_shapes='resources/country_shapes.geojson', eia_hydro_generation="data/eia_hydro_annual_generation.csv",
eia_hydro_generation='data/EIA_hydro_generation_2000_2014.csv', cutout=f"cutouts/" + CDIR + config["renewable"]["hydro"]["cutout"] + ".nc"
cutout="cutouts/" + config["renewable"]['hydro']['cutout'] + ".nc" if "hydro" in config["renewable"]
output: 'resources/profile_hydro.nc' else [],
log: "logs/build_hydro_profile.log" output:
resources: mem_mb=5000 "resources/" + RDIR + "profile_hydro.nc",
script: 'scripts/build_hydro_profile.py' log:
"logs/" + RDIR + "build_hydro_profile.log",
resources:
mem_mb=5000,
script:
"scripts/build_hydro_profile.py"
rule add_electricity: rule add_electricity:
input: input:
base_network='networks/base.nc', **{
f"profile_{tech}": "resources/" + RDIR + f"profile_{tech}.nc"
for tech in config["renewable"]
},
**{
f"conventional_{carrier}_{attr}": fn
for carrier, d in config.get("conventional", {None: {}}).items()
for attr, fn in d.items()
if str(fn).startswith("data/")
},
base_network="networks/" + RDIR + "base.nc",
tech_costs=COSTS, tech_costs=COSTS,
regions="resources/regions_onshore.geojson", regions="resources/" + RDIR + "regions_onshore.geojson",
powerplants='resources/powerplants.csv', powerplants="resources/" + RDIR + "powerplants.csv",
hydro_capacities='data/bundle/hydro_capacities.csv', hydro_capacities="data/bundle/hydro_capacities.csv",
geth_hydro_capacities='data/geth2015_hydro_capacities.csv', geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
load='resources/load.csv', load="resources/" + RDIR + "load.csv",
nuts3_shapes='resources/nuts3_shapes.geojson', nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson",
ua_md_gdp='data/GDP_PPP_30arcsec_v3_mapped_default.csv', ua_md_gdp='data/GDP_PPP_30arcsec_v3_mapped_default.csv',
**{f"profile_{tech}": f"resources/profile_{tech}.nc" output:
for tech in config['renewable']} "networks/" + RDIR + "elec.nc",
output: "networks/elec.nc" log:
log: "logs/add_electricity.log" "logs/" + RDIR + "add_electricity.log",
benchmark: "benchmarks/add_electricity" benchmark:
"benchmarks/" + RDIR + "add_electricity"
threads: 1 threads: 1
resources: mem_mb=5000 resources:
script: "scripts/add_electricity.py" mem_mb=5000,
script:
"scripts/add_electricity.py"
rule simplify_network: rule simplify_network:
input: input:
network='networks/elec.nc', network="networks/" + RDIR + "elec.nc",
tech_costs=COSTS, tech_costs=COSTS,
regions_onshore="resources/regions_onshore.geojson", regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
regions_offshore="resources/regions_offshore.geojson" regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
output: output:
network='networks/elec_s{simpl}.nc', network="networks/" + RDIR + "elec_s{simpl}.nc",
regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson", regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}.geojson",
regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson", regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}.geojson",
busmap='resources/busmap_elec_s{simpl}.csv', busmap="resources/" + RDIR + "busmap_elec_s{simpl}.csv",
connection_costs='resources/connection_costs_s{simpl}.csv' connection_costs="resources/" + RDIR + "connection_costs_s{simpl}.csv",
log: "logs/simplify_network/elec_s{simpl}.log" log:
benchmark: "benchmarks/simplify_network/elec_s{simpl}" "logs/" + RDIR + "simplify_network/elec_s{simpl}.log",
benchmark:
"benchmarks/" + RDIR + "simplify_network/elec_s{simpl}"
threads: 1 threads: 1
resources: mem_mb=4000 resources:
script: "scripts/simplify_network.py" mem_mb=4000,
script:
"scripts/simplify_network.py"
rule cluster_network: rule cluster_network:
input: input:
network='networks/elec_s{simpl}.nc', network="networks/" + RDIR + "elec_s{simpl}.nc",
regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson", regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}.geojson",
regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson", regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}.geojson",
busmap=ancient('resources/busmap_elec_s{simpl}.csv'), busmap=ancient("resources/" + RDIR + "busmap_elec_s{simpl}.csv"),
custom_busmap=("data/custom_busmap_elec_s{simpl}_{clusters}.csv" custom_busmap=(
if config["enable"].get("custom_busmap", False) else []), "data/custom_busmap_elec_s{simpl}_{clusters}.csv"
tech_costs=COSTS if config["enable"].get("custom_busmap", False)
else []
),
tech_costs=COSTS,
output: output:
network='networks/elec_s{simpl}_{clusters}.nc', network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc",
regions_onshore="resources/regions_onshore_elec_s{simpl}_{clusters}.geojson", regions_onshore="resources/"
regions_offshore="resources/regions_offshore_elec_s{simpl}_{clusters}.geojson", + RDIR
busmap="resources/busmap_elec_s{simpl}_{clusters}.csv", + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
linemap="resources/linemap_elec_s{simpl}_{clusters}.csv" regions_offshore="resources/"
log: "logs/cluster_network/elec_s{simpl}_{clusters}.log" + RDIR
benchmark: "benchmarks/cluster_network/elec_s{simpl}_{clusters}" + "regions_offshore_elec_s{simpl}_{clusters}.geojson",
busmap="resources/" + RDIR + "busmap_elec_s{simpl}_{clusters}.csv",
linemap="resources/" + RDIR + "linemap_elec_s{simpl}_{clusters}.csv",
log:
"logs/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}.log",
benchmark:
"benchmarks/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}"
threads: 1 threads: 1
resources: mem_mb=6000 resources:
script: "scripts/cluster_network.py" mem_mb=6000,
script:
"scripts/cluster_network.py"
rule add_extra_components: rule add_extra_components:
input: input:
network='networks/elec_s{simpl}_{clusters}.nc', network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc",
tech_costs=COSTS, tech_costs=COSTS,
output: 'networks/elec_s{simpl}_{clusters}_ec.nc' output:
log: "logs/add_extra_components/elec_s{simpl}_{clusters}.log" "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
benchmark: "benchmarks/add_extra_components/elec_s{simpl}_{clusters}_ec" log:
"logs/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}.log",
benchmark:
"benchmarks/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}_ec"
threads: 1 threads: 1
resources: mem_mb=3000 resources:
script: "scripts/add_extra_components.py" mem_mb=3000,
script:
"scripts/add_extra_components.py"
rule prepare_network: rule prepare_network:
input: 'networks/elec_s{simpl}_{clusters}_ec.nc', tech_costs=COSTS input:
output: 'networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc' "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
log: "logs/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log" tech_costs=COSTS,
benchmark: "benchmarks/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" output:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
"logs/" + RDIR + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log",
benchmark:
(
"benchmarks/"
+ RDIR
+ "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 1 threads: 1
resources: mem_mb=4000 resources:
script: "scripts/prepare_network.py" mem_mb=4000,
script:
"scripts/prepare_network.py"
def memory(w): def memory(w):
factor = 3. factor = 3.0
for o in w.opts.split('-'): for o in w.opts.split("-"):
m = re.match(r'^(\d+)h$', o, re.IGNORECASE) m = re.match(r"^(\d+)h$", o, re.IGNORECASE)
if m is not None: if m is not None:
factor /= int(m.group(1)) factor /= int(m.group(1))
break break
for o in w.opts.split('-'): for o in w.opts.split("-"):
m = re.match(r'^(\d+)seg$', o, re.IGNORECASE) m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None: if m is not None:
factor *= int(m.group(1)) / 8760 factor *= int(m.group(1)) / 8760
break break
if w.clusters.endswith('m'): if w.clusters.endswith("m"):
return int(factor * (18000 + 180 * int(w.clusters[:-1]))) return int(factor * (18000 + 180 * int(w.clusters[:-1])))
elif w.clusters == "all": elif w.clusters == "all":
return int(factor * (18000 + 180 * 4000)) return int(factor * (18000 + 180 * 4000))
@ -366,44 +587,87 @@ def memory(w):
rule solve_network: rule solve_network:
input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" input:
output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log: log:
solver=normpath("logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"), solver=normpath(
python="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", "logs/"
memory="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log" + RDIR
benchmark: "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
),
python="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
memory="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
benchmark:
"benchmarks/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
threads: 4 threads: 4
resources: mem_mb=memory resources:
shadow: "minimal" mem_mb=memory,
script: "scripts/solve_network.py" shadow:
"minimal"
script:
"scripts/solve_network.py"
rule solve_operations_network: rule solve_operations_network:
input: input:
unprepared="networks/elec_s{simpl}_{clusters}_ec.nc", unprepared="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
optimized="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" optimized="results/networks/"
output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc" + RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
log: log:
solver=normpath("logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"), solver=normpath(
python="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", "logs/"
memory="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log" + RDIR
benchmark: "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
),
python="logs/"
+ RDIR
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
memory="logs/"
+ RDIR
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log",
benchmark:
(
"benchmarks/"
+ RDIR
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 4 threads: 4
resources: mem_mb=(lambda w: 5000 + 372 * int(w.clusters)) resources:
shadow: "minimal" mem_mb=(lambda w: 5000 + 372 * int(w.clusters)),
script: "scripts/solve_operations_network.py" shadow:
"minimal"
script:
"scripts/solve_operations_network.py"
rule plot_network: rule plot_network:
input: input:
network="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network="results/networks/"
tech_costs=COSTS + RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
tech_costs=COSTS,
output: output:
only_map="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}", only_map="results/plots/"
ext="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}" + RDIR
log: "logs/plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log" + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}",
script: "scripts/plot_network.py" ext="results/plots/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}",
log:
"logs/"
+ RDIR
+ "plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log",
script:
"scripts/plot_network.py"
def input_make_summary(w): def input_make_summary(w):
@ -414,36 +678,79 @@ def input_make_summary(w):
ll = [l for l in ll if l[0] == w.ll[0]] ll = [l for l in ll if l[0] == w.ll[0]]
else: else:
ll = w.ll ll = w.ll
return ([COSTS] + return [COSTS] + expand(
expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
ll=ll, ll=ll,
**{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k) **{
for k in ["simpl", "clusters", "opts"]})) k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k)
for k in ["simpl", "clusters", "opts"]
}
)
rule make_summary: rule make_summary:
input: input_make_summary input:
output: directory("results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}") input_make_summary,
log: "logs/make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log", output:
script: "scripts/make_summary.py" directory(
"results/summaries/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}"
),
log:
"logs/"
+ RDIR
+ "make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log",
resources:
mem_mb=1500,
script:
"scripts/make_summary.py"
rule plot_summary: rule plot_summary:
input: "results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}" input:
output: "results/plots/summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}" "results/summaries/"
log: "logs/plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log" + RDIR
script: "scripts/plot_summary.py" + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}",
output:
"results/plots/"
+ RDIR
+ "summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}",
log:
"logs/"
+ RDIR
+ "plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log",
resources:
mem_mb=1500,
script:
"scripts/plot_summary.py"
def input_plot_p_nom_max(w): def input_plot_p_nom_max(w):
return [("networks/elec_s{simpl}{maybe_cluster}.nc" return [
.format(maybe_cluster=('' if c == 'full' else ('_' + c)), **w)) (
for c in w.clusts.split(",")] "results/networks/"
+ RDIR
+ "elec_s{simpl}{maybe_cluster}.nc".format(
maybe_cluster=("" if c == "full" else ("_" + c)), **w
)
)
for c in w.clusts.split(",")
]
rule plot_p_nom_max: rule plot_p_nom_max:
input: input_plot_p_nom_max input:
output: "results/plots/elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}" input_plot_p_nom_max,
log: "logs/plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log" output:
script: "scripts/plot_p_nom_max.py" "results/plots/"
+ RDIR
+ "elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}",
log:
"logs/"
+ RDIR
+ "plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log",
resources:
mem_mb=1500,
script:
"scripts/plot_p_nom_max.py"

View File

@ -1,15 +1,18 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
version: 0.4.0 version: 0.6.1
tutorial: false tutorial: false
logging: logging:
level: INFO level: INFO
format: '%(levelname)s:%(name)s:%(message)s' format: '%(levelname)s:%(name)s:%(message)s'
summary_dir: results run:
name: "" # use this to keep track of runs with different settings
shared_cutouts: false # set to true to share the default cutout(s) across runs
scenario: scenario:
simpl: [''] simpl: ['']
@ -19,10 +22,6 @@ scenario:
countries: ['AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MD', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK', 'UA'] countries: ['AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MD', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK', 'UA']
clustering:
simplify:
to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections)
snapshots: snapshots:
start: "2013-01-01" start: "2013-01-01"
end: "2014-01-01" end: "2014-01-01"
@ -31,6 +30,7 @@ snapshots:
enable: enable:
prepare_links_p_nom: false prepare_links_p_nom: false
retrieve_databundle: true retrieve_databundle: true
retrieve_cost_data: true
build_cutout: false build_cutout: false
retrieve_cutout: true retrieve_cutout: true
build_natura_raster: false build_natura_raster: false
@ -39,32 +39,54 @@ enable:
electricity: electricity:
voltages: [220., 300., 380., 750.] voltages: [220., 300., 380., 750.]
gaslimit: false # global gas usage limit of X MWh_th
co2limit: 9.59e+7 # 0.05 * co2base co2limit: 9.59e+7 # 0.05 * co2base
co2base: 1.918e+9 co2base: 1.918e+9
agg_p_nom_limits: data/agg_p_nom_minmax.csv agg_p_nom_limits: data/agg_p_nom_minmax.csv
extendable_carriers: operational_reserve: # like https://genxproject.github.io/GenX/dev/core/#Reserves
Generator: [] activate: false
StorageUnit: [] # battery, H2 epsilon_load: 0.02 # share of total load
Store: [battery, H2] epsilon_vres: 0.02 # share of total renewable supply
Link: [] contingency: 4000 # fixed capacity in MW
max_hours: max_hours:
battery: 6 battery: 6
H2: 168 H2: 168
powerplants_filter: false # use pandas query strings here, e.g. Country not in ['Germany'] extendable_carriers:
custom_powerplants: true # use pandas query strings here, e.g. Country in ['Germany'] Generator: [solar, onwind, offwind-ac, offwind-dc, OCGT]
conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] StorageUnit: [] # battery, H2
renewable_capacities_from_OPSD: [] # onwind, offwind, solar Store: [battery, H2]
Link: [] # H2 pipeline
# estimate_renewable_capacities_from_capacity_stats: # use pandas query strings here, e.g. Country not in ['Germany']
# # Wind is the Fueltype in ppm.data.Capacity_stats, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur powerplants_filter: (DateOut >= 2022 or DateOut != DateOut)
# Wind: [onwind, offwind-ac, offwind-dc] # use pandas query strings here, e.g. Country in ['Germany']
# Solar: [solar] custom_powerplants: false
conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc, hydro]
estimate_renewable_capacities:
enable: true
# Add capacities from OPSD data
from_opsd: true
# Renewable capacities are based on existing capacities reported by IRENA
year: 2020
# Artificially limit maximum capacities to factor * (IRENA capacities),
# i.e. 110% of <years>'s capacities => expansion_limit: 1.1
# false: Use estimated renewable potentials determine by the workflow
expansion_limit: false
technology_mapping:
# Wind is the Fueltype in powerplantmatching, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur
Offshore: [offwind-ac, offwind-dc]
Onshore: [onwind]
PV: [solar]
atlite: atlite:
nprocesses: 4 nprocesses: 4
show_progress: false # false saves time
cutouts: cutouts:
# use 'base' to determine geographical bounds and time span from config # use 'base' to determine geographical bounds and time span from config
# base: # base:
@ -88,16 +110,18 @@ renewable:
resource: resource:
method: wind method: wind
turbine: Vestas_V112_3MW turbine: Vestas_V112_3MW
capacity_per_sqkm: 3 # ScholzPhd Tab 4.3.1: 10MW/km^2 capacity_per_sqkm: 3 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 30% fraction of the already restricted
# area is available for installation of wind generators due to competing land use and likely public
# acceptance issues.
# correction_factor: 0.93 # correction_factor: 0.93
corine: corine:
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
# development of the REMix model and application for Europe. ( p.42 / p.28) # development of the REMix model and application for Europe. ( p.42 / p.28)
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000 distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6] distance_grid_codes: [1, 2, 3, 4, 5, 6]
natura: true natura: true
excluder_resolution: 100
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
offwind-ac: offwind-ac:
@ -105,15 +129,19 @@ renewable:
resource: resource:
method: wind method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore turbine: NREL_ReferenceTurbine_5MW_offshore
capacity_per_sqkm: 2 capacity_per_sqkm: 2 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 20% fraction of the already restricted
# area is available for installation of wind generators due to competing land use and likely public
# acceptance issues.
correction_factor: 0.8855 correction_factor: 0.8855
# proxy for wake losses # proxy for wake losses
# from 10.1016/j.energy.2018.08.153 # from 10.1016/j.energy.2018.08.153
# until done more rigorously in #153 # until done more rigorously in #153
corine: [44, 255] corine: [44, 255]
natura: true natura: true
ship_threshold: 400
max_depth: 50 max_depth: 50
max_shore_distance: 30000 max_shore_distance: 30000
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
offwind-dc: offwind-dc:
@ -121,16 +149,19 @@ renewable:
resource: resource:
method: wind method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore turbine: NREL_ReferenceTurbine_5MW_offshore
# ScholzPhd Tab 4.3.1: 10MW/km^2 capacity_per_sqkm: 2 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 20% fraction of the already restricted
capacity_per_sqkm: 2 # area is available for installation of wind generators due to competing land use and likely public
# acceptance issues.
correction_factor: 0.8855 correction_factor: 0.8855
# proxy for wake losses # proxy for wake losses
# from 10.1016/j.energy.2018.08.153 # from 10.1016/j.energy.2018.08.153
# until done more rigorously in #153 # until done more rigorously in #153
corine: [44, 255] corine: [44, 255]
natura: true natura: true
ship_threshold: 400
max_depth: 50 max_depth: 50
min_shore_distance: 30000 min_shore_distance: 30000
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
solar: solar:
@ -141,7 +172,7 @@ renewable:
orientation: orientation:
slope: 35. slope: 35.
azimuth: 180. azimuth: 180.
capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2 capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2 and assuming 1% of the area can be used for solar PV panels
# Correction factor determined by comparing uncorrected area-weighted full-load hours to those # Correction factor determined by comparing uncorrected area-weighted full-load hours to those
# published in Supplementary Data to # published in Supplementary Data to
# Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power # Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power
@ -150,9 +181,9 @@ renewable:
# This correction factor of 0.854337 may be in order if using reanalysis data. # This correction factor of 0.854337 may be in order if using reanalysis data.
# for discussion refer to https://github.com/PyPSA/pypsa-eur/pull/304 # for discussion refer to https://github.com/PyPSA/pypsa-eur/pull/304
# correction_factor: 0.854337 # correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true natura: true
excluder_resolution: 100
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
hydro: hydro:
@ -162,6 +193,10 @@ renewable:
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float
clip_min_inflow: 1.0 clip_min_inflow: 1.0
conventional:
nuclear:
p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name
lines: lines:
types: types:
220.: "Al/St 240/40 2-bundle 220.0" 220.: "Al/St 240/40 2-bundle 220.0"
@ -185,7 +220,7 @@ transformers:
type: '' type: ''
load: load:
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false manual_adjustments: true # false
@ -193,9 +228,18 @@ load:
costs: costs:
year: 2030 year: 2030
discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016 version: v0.4.0
USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
marginal_cost: # EUR/MWh fill_values:
FOM: 0
VOM: 0
efficiency: 1
fuel: 0
investment: 0
lifetime: 25
"CO2 intensity": 0
"discount rate": 0.07
marginal_cost:
solar: 0.01 solar: 0.01
onwind: 0.015 onwind: 0.015
offwind: 0.015 offwind: 0.015
@ -208,6 +252,29 @@ costs:
emission_prices: # in currency per tonne emission, only used with the option Ep emission_prices: # in currency per tonne emission, only used with the option Ep
co2: 0. co2: 0.
clustering:
simplify_network:
to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections)
algorithm: kmeans # choose from: [hac, kmeans]
feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc.
exclude_carriers: []
remove_stubs: true
remove_stubs_across_borders: true
cluster_network:
algorithm: kmeans
feature: solar+onwind-time
exclude_carriers: []
aggregation_strategies:
generators:
p_nom_max: sum # use "min" for more conservative assumptions
p_nom_min: sum
p_min_pu: mean
marginal_cost: mean
committable: any
ramp_limit_up: max
ramp_limit_down: max
efficiency: mean
solving: solving:
options: options:
formulation: kirchhoff formulation: kirchhoff
@ -240,7 +307,7 @@ solving:
plotting: plotting:
map: map:
figsize: [7, 7] figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72] boundaries: [-10.2, 29, 35, 72]
p_nom: p_nom:
bus_size_factor: 5.e+4 bus_size_factor: 5.e+4
linewidth_factor: 3.e+3 linewidth_factor: 3.e+3
@ -259,50 +326,50 @@ plotting:
AC_carriers: ["AC line", "AC transformer"] AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"] link_carriers: ["DC line", "Converter AC-DC"]
tech_colors: tech_colors:
"onwind" : "#235ebc" "onwind": "#235ebc"
"onshore wind" : "#235ebc" "onshore wind": "#235ebc"
'offwind' : "#6895dd" 'offwind': "#6895dd"
'offwind-ac' : "#6895dd" 'offwind-ac': "#6895dd"
'offshore wind' : "#6895dd" 'offshore wind': "#6895dd"
'offshore wind ac' : "#6895dd" 'offshore wind ac': "#6895dd"
'offwind-dc' : "#74c6f2" 'offwind-dc': "#74c6f2"
'offshore wind dc' : "#74c6f2" 'offshore wind dc': "#74c6f2"
"hydro" : "#08ad97" "hydro": "#08ad97"
"hydro+PHS" : "#08ad97" "hydro+PHS": "#08ad97"
"PHS" : "#08ad97" "PHS": "#08ad97"
"hydro reservoir" : "#08ad97" "hydro reservoir": "#08ad97"
'hydroelectricity' : '#08ad97' 'hydroelectricity': '#08ad97'
"ror" : "#4adbc8" "ror": "#4adbc8"
"run of river" : "#4adbc8" "run of river": "#4adbc8"
'solar' : "#f9d002" 'solar': "#f9d002"
'solar PV' : "#f9d002" 'solar PV': "#f9d002"
'solar thermal' : '#ffef60' 'solar thermal': '#ffef60'
'biomass' : '#0c6013' 'biomass': '#0c6013'
'solid biomass' : '#06540d' 'solid biomass': '#06540d'
'biogas' : '#23932d' 'biogas': '#23932d'
'waste' : '#68896b' 'waste': '#68896b'
'geothermal' : '#ba91b1' 'geothermal': '#ba91b1'
"OCGT" : "#d35050" "OCGT": "#d35050"
"gas" : "#d35050" "gas": "#d35050"
"natural gas" : "#d35050" "natural gas": "#d35050"
"CCGT" : "#b20101" "CCGT": "#b20101"
"nuclear" : "#ff9000" "nuclear": "#ff9000"
"coal" : "#707070" "coal": "#707070"
"lignite" : "#9e5a01" "lignite": "#9e5a01"
"oil" : "#262626" "oil": "#262626"
"H2" : "#ea048a" "H2": "#ea048a"
"hydrogen storage" : "#ea048a" "hydrogen storage": "#ea048a"
"battery" : "#b8ea04" "battery": "#b8ea04"
"Electric load" : "#f9d002" "Electric load": "#f9d002"
"electricity" : "#f9d002" "electricity": "#f9d002"
"lines" : "#70af1d" "lines": "#70af1d"
"transmission lines" : "#70af1d" "transmission lines": "#70af1d"
"AC-AC" : "#70af1d" "AC-AC": "#70af1d"
"AC line" : "#70af1d" "AC line": "#70af1d"
"links" : "#8a1caf" "links": "#8a1caf"
"HVDC links" : "#8a1caf" "HVDC links": "#8a1caf"
"DC-DC" : "#8a1caf" "DC-DC": "#8a1caf"
"DC link" : "#8a1caf" "DC link": "#8a1caf"
"load": "#dd2e23" "load": "#dd2e23"
nice_names: nice_names:
OCGT: "Open-Cycle Gas" OCGT: "Open-Cycle Gas"

View File

@ -1,15 +1,17 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
version: 0.4.0 version: 0.6.1
tutorial: true tutorial: true
logging: logging:
level: INFO level: INFO
format: '%(levelname)s:%(name)s:%(message)s' format: '%(levelname)s:%(name)s:%(message)s'
summary_dir: results run:
name: ""
shared_cutouts: false
scenario: scenario:
simpl: [''] simpl: ['']
@ -17,11 +19,7 @@ scenario:
clusters: [5] clusters: [5]
opts: [Co2L-24H] opts: [Co2L-24H]
countries: ['DE'] countries: ['BE']
clustering:
simplify:
to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections)
snapshots: snapshots:
start: "2013-03-01" start: "2013-03-01"
@ -31,6 +29,7 @@ snapshots:
enable: enable:
prepare_links_p_nom: false prepare_links_p_nom: false
retrieve_databundle: true retrieve_databundle: true
retrieve_cost_data: true
build_cutout: false build_cutout: false
retrieve_cutout: true retrieve_cutout: true
build_natura_raster: false build_natura_raster: false
@ -45,7 +44,7 @@ electricity:
Generator: [OCGT] Generator: [OCGT]
StorageUnit: [] #battery, H2 StorageUnit: [] #battery, H2
Store: [battery, H2] Store: [battery, H2]
Link: [] Link: [] # H2 pipeline
max_hours: max_hours:
battery: 6 battery: 6
@ -57,8 +56,9 @@ electricity:
atlite: atlite:
nprocesses: 4 nprocesses: 4
show_progress: false # false saves time
cutouts: cutouts:
europe-2013-era5-tutorial: be-03-2013-era5:
module: era5 module: era5
x: [4., 15.] x: [4., 15.]
y: [46., 56.] y: [46., 56.]
@ -66,7 +66,7 @@ atlite:
renewable: renewable:
onwind: onwind:
cutout: europe-2013-era5-tutorial cutout: be-03-2013-era5
resource: resource:
method: wind method: wind
turbine: Vestas_V112_3MW turbine: Vestas_V112_3MW
@ -75,15 +75,15 @@ renewable:
corine: corine:
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
# development of the REMix model and application for Europe. ( p.42 / p.28) # development of the REMix model and application for Europe. ( p.42 / p.28)
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000 distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6] distance_grid_codes: [1, 2, 3, 4, 5, 6]
natura: true natura: true
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
offwind-ac: offwind-ac:
cutout: europe-2013-era5-tutorial cutout: be-03-2013-era5
resource: resource:
method: wind method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore turbine: NREL_ReferenceTurbine_5MW_offshore
@ -91,11 +91,13 @@ renewable:
# correction_factor: 0.93 # correction_factor: 0.93
corine: [44, 255] corine: [44, 255]
natura: true natura: true
ship_threshold: 400
max_shore_distance: 30000 max_shore_distance: 30000
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
offwind-dc: offwind-dc:
cutout: europe-2013-era5-tutorial cutout: be-03-2013-era5
resource: resource:
method: wind method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore turbine: NREL_ReferenceTurbine_5MW_offshore
@ -104,11 +106,13 @@ renewable:
# correction_factor: 0.93 # correction_factor: 0.93
corine: [44, 255] corine: [44, 255]
natura: true natura: true
ship_threshold: 400
min_shore_distance: 30000 min_shore_distance: 30000
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
solar: solar:
cutout: europe-2013-era5-tutorial cutout: be-03-2013-era5
resource: resource:
method: pv method: pv
panel: CSi panel: CSi
@ -123,9 +127,9 @@ renewable:
# power." Applied Energy 135 (2014): 704-720. # power." Applied Energy 135 (2014): 704-720.
# This correction factor of 0.854337 may be in order if using reanalysis data. # This correction factor of 0.854337 may be in order if using reanalysis data.
# correction_factor: 0.854337 # correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true natura: true
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
@ -151,7 +155,7 @@ transformers:
type: '' type: ''
load: load:
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false manual_adjustments: true # false
@ -159,8 +163,17 @@ load:
costs: costs:
year: 2030 year: 2030
discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016 version: v0.4.0
USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html rooftop_share: 0.14
fill_values:
FOM: 0
VOM: 0
efficiency: 1
fuel: 0
investment: 0
lifetime: 25
"CO2 intensity": 0
"discount rate": 0.07
marginal_cost: marginal_cost:
solar: 0.01 solar: 0.01
onwind: 0.015 onwind: 0.015
@ -170,6 +183,27 @@ costs:
emission_prices: # in currency per tonne emission, only used with the option Ep emission_prices: # in currency per tonne emission, only used with the option Ep
co2: 0. co2: 0.
clustering:
simplify_network:
to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections)
algorithm: kmeans # choose from: [hac, kmeans]
feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc.
exclude_carriers: []
cluster_network:
algorithm: kmeans
feature: solar+onwind-time
exclude_carriers: []
aggregation_strategies:
generators:
p_nom_max: sum # use "min" for more conservative assumptions
p_nom_min: sum
p_min_pu: mean
marginal_cost: mean
committable: any
ramp_limit_up: max
ramp_limit_down: max
efficiency: mean
solving: solving:
options: options:
formulation: kirchhoff formulation: kirchhoff
@ -186,7 +220,7 @@ solving:
plotting: plotting:
map: map:
figsize: [7, 7] figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72] boundaries: [-10.2, 29, 35, 72]
p_nom: p_nom:
bus_size_factor: 5.e+4 bus_size_factor: 5.e+4
linewidth_factor: 3.e+3 linewidth_factor: 3.e+3
@ -205,50 +239,50 @@ plotting:
AC_carriers: ["AC line", "AC transformer"] AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"] link_carriers: ["DC line", "Converter AC-DC"]
tech_colors: tech_colors:
"onwind" : "#235ebc" "onwind": "#235ebc"
"onshore wind" : "#235ebc" "onshore wind": "#235ebc"
'offwind' : "#6895dd" 'offwind': "#6895dd"
'offwind-ac' : "#6895dd" 'offwind-ac': "#6895dd"
'offshore wind' : "#6895dd" 'offshore wind': "#6895dd"
'offshore wind ac' : "#6895dd" 'offshore wind ac': "#6895dd"
'offwind-dc' : "#74c6f2" 'offwind-dc': "#74c6f2"
'offshore wind dc' : "#74c6f2" 'offshore wind dc': "#74c6f2"
"hydro" : "#08ad97" "hydro": "#08ad97"
"hydro+PHS" : "#08ad97" "hydro+PHS": "#08ad97"
"PHS" : "#08ad97" "PHS": "#08ad97"
"hydro reservoir" : "#08ad97" "hydro reservoir": "#08ad97"
'hydroelectricity' : '#08ad97' 'hydroelectricity': '#08ad97'
"ror" : "#4adbc8" "ror": "#4adbc8"
"run of river" : "#4adbc8" "run of river": "#4adbc8"
'solar' : "#f9d002" 'solar': "#f9d002"
'solar PV' : "#f9d002" 'solar PV': "#f9d002"
'solar thermal' : '#ffef60' 'solar thermal': '#ffef60'
'biomass' : '#0c6013' 'biomass': '#0c6013'
'solid biomass' : '#06540d' 'solid biomass': '#06540d'
'biogas' : '#23932d' 'biogas': '#23932d'
'waste' : '#68896b' 'waste': '#68896b'
'geothermal' : '#ba91b1' 'geothermal': '#ba91b1'
"OCGT" : "#d35050" "OCGT": "#d35050"
"gas" : "#d35050" "gas": "#d35050"
"natural gas" : "#d35050" "natural gas": "#d35050"
"CCGT" : "#b20101" "CCGT": "#b20101"
"nuclear" : "#ff9000" "nuclear": "#ff9000"
"coal" : "#707070" "coal": "#707070"
"lignite" : "#9e5a01" "lignite": "#9e5a01"
"oil" : "#262626" "oil": "#262626"
"H2" : "#ea048a" "H2": "#ea048a"
"hydrogen storage" : "#ea048a" "hydrogen storage": "#ea048a"
"battery" : "#b8ea04" "battery": "#b8ea04"
"Electric load" : "#f9d002" "Electric load": "#f9d002"
"electricity" : "#f9d002" "electricity": "#f9d002"
"lines" : "#70af1d" "lines": "#70af1d"
"transmission lines" : "#70af1d" "transmission lines": "#70af1d"
"AC-AC" : "#70af1d" "AC-AC": "#70af1d"
"AC line" : "#70af1d" "AC line": "#70af1d"
"links" : "#8a1caf" "links": "#8a1caf"
"HVDC links" : "#8a1caf" "HVDC links": "#8a1caf"
"DC-DC" : "#8a1caf" "DC-DC": "#8a1caf"
"DC link" : "#8a1caf" "DC link": "#8a1caf"
nice_names: nice_names:
OCGT: "Open-Cycle Gas" OCGT: "Open-Cycle Gas"
CCGT: "Combined-Cycle Gas" CCGT: "Combined-Cycle Gas"

View File

@ -0,0 +1,50 @@
https://www.eia.gov/international/data/world/electricity/electricity-generation?pd=2&p=000000000000000000000000000000g&u=1&f=A&v=mapbubble&a=-&i=none&vo=value&t=R&g=000000000000002&l=73-1028i008017kg6368g80a4k000e0ag00gg0004g8g0ho00g000400008&s=315532800000&e=1577836800000&ev=false&
Report generated on: 03-28-2022 11:20:48
"API","","1980","1981","1982","1983","1984","1985","1986","1987","1988","1989","1990","1991","1992","1993","1994","1995","1996","1997","1998","1999","2000","2001","2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014","2015","2016","2017","2018","2019","2020"
"","hydroelectricity net generation (billion kWh)","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","",""
"INTL.33-12-EURO-BKWH.A"," Europe","458.018","464.155","459.881","473.685","481.241","476.739","459.535","491.085","534.517","465.365","474.466","475.47","509.041","526.448","531.815","543.743","529.114164","543.845616","562.441501","569.308453","591.206662","587.371195","541.542535","506.19703","544.536443","545.176179","537.335934","540.934407","567.557921","564.244482","619.96477","543.05273","600.46622","631.86431","619.59229","615.53013","629.98906","562.59258","619.31106","610.62616","670.925"
"INTL.33-12-ALB-BKWH.A"," Albania","2.919","3.018","3.093","3.167","3.241","3.315","3.365","3.979","3.713","3.846","2.82","3.483","3.187","3.281","3.733","4.162","5.669","4.978","4.872","5.231","4.548","3.519","3.477","5.117","5.411","5.319","4.951","2.76","3.759","5.201","7.49133","4.09068","4.67775","6.88941","4.67676","5.83605","7.70418","4.47975","8.46648","5.15394","5.281"
"INTL.33-12-AUT-BKWH.A"," Austria","28.501","30.008","29.893","29.577","28.384","30.288","30.496","25.401","35.151","34.641","31.179","31.112","34.483","36.336","35.349","36.696","33.874","35.744","36.792","40.292","41.418","40.05","39.825","32.883","36.394","36.31","35.48","36.732","37.969","40.487","36.466","32.511","41.862","40.138","39.001","35.255","37.954","36.462","35.73","40.43655","45.344"
"INTL.33-12-BEL-BKWH.A"," Belgium","0.274","0.377","0.325","0.331","0.348","0.282","0.339","0.425","0.354","0.3","0.263","0.226","0.338","0.252","0.342","0.335","0.237","0.30195","0.38511","0.338","0.455","0.437","0.356","0.245","0.314","0.285","0.355","0.385","0.406","0.325","0.298","0.193","0.353","0.376","0.289","0.314","0.367","0.268","0.311","0.108","1.29"
"INTL.33-12-BIH-BKWH.A"," Bosnia and Herzegovina","--","--","--","--","--","--","--","--","--","--","--","--","3.374","2.343","3.424","3.607","5.104","4.608","4.511","5.477","5.043","5.129","5.215","4.456","5.919","5.938","5.798","3.961","4.818","6.177","7.946","4.343","4.173","7.164","5.876","5.495","5.585","3.7521","6.35382","6.02019","6.1"
"INTL.33-12-BGR-BKWH.A"," Bulgaria","3.674","3.58","3.018","3.318","3.226","2.214","2.302","2.512","2.569","2.662","1.859","2.417","2.042","1.923","1.453","2.291","2.89","2.726","3.066","2.725","2.646","1.72","2.172","2.999","3.136","4.294","4.196","2.845","2.796","3.435","4.98168","2.84328","3.14622","3.99564","4.55598","5.59845","3.8412","2.79972","5.09553","3.34917","3.37"
"INTL.33-12-HRV-BKWH.A"," Croatia","--","--","--","--","--","--","--","--","--","--","--","--","4.298","4.302","4.881","5.212","7.156","5.234","5.403","6.524","5.794","6.482","5.311","4.827","6.888","6.27","5.94","4.194","5.164","6.663","9.035","4.983","4.789","8.536","8.917","6.327","6.784","5.255","7.62399","5.87268","3.4"
"INTL.33-12-CYP-BKWH.A"," Cyprus","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0"
"INTL.33-12-CZE-BKWH.A"," Czech Republic","--","--","--","--","--","--","--","--","--","--","--","--","--","1.355","1.445","1.982","1.949","1.68201","1.382","1.664","1.7404","2.033","2.467","1.369","1.999","2.356","2.525","2.068","2.004","2.405","2.775","1.95","2.107","2.704","1.909","1.779","1.983","1.852","1.615","1.98792","3.4"
"INTL.33-12-DNK-BKWH.A"," Denmark","0.03","0.031","0.028","0.036","0.028","0.027","0.029","0.029","0.032","0.027","0.027","0.026","0.028","0.027","0.033","0.03","0.019","0.019","0.02673","0.031","0.03","0.028","0.032","0.021","0.027","0.023","0.023","0.028","0.026","0.019","0.021","0.017","0.017","0.013","0.015","0.018","0.019","0.018","0.015","0.01584","0.02"
"INTL.33-12-EST-BKWH.A"," Estonia","--","--","--","--","--","--","--","--","--","--","--","--","0.001","0.001","0.003","0.002","0.002","0.003","0.004","0.004","0.005","0.007","0.006","0.013","0.022","0.022","0.014","0.021","0.028","0.032","0.027","0.03","0.042","0.026","0.027","0.027","0.035","0.026","0.015","0.01881","0.04"
"INTL.33-12-FRO-BKWH.A"," Faroe Islands","0.049","0.049","0.049","0.049","0.049","0.049","0.049","0.049","0.062","0.071","0.074","0.074","0.083","0.073","0.075","0.075","0.069564","0.075066","0.076501","0.069453","0.075262","0.075195","0.095535","0.08483","0.093443","0.097986","0.099934","0.103407","0.094921","0.091482","0.06676","0.092","0.099","0.091","0.121","0.132","0.105","0.11","0.107","0.102","0.11"
"INTL.33-12-FIN-BKWH.A"," Finland","10.115","13.518","12.958","13.445","13.115","12.211","12.266","13.658","13.229","12.9","10.75","13.065","14.956","13.341","11.669","12.796","11.742","12.11958","14.9","12.652","14.513","13.073","10.668","9.495","14.919","13.646","11.379","14.035","16.941","12.559","12.743","12.278","16.667","12.672","13.24","16.584","15.634","14.61","13.137","12.31461","15.56"
"INTL.33-12-CSK-BKWH.A"," Former Czechoslovakia","4.8","4.2","3.7","3.9","3.2","4.3","4","4.853","4.355","4.229","3.919","3.119","3.602","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--"
"INTL.33-12-SCG-BKWH.A"," Former Serbia and Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","11.23","10.395","11.016","12.071","14.266","12.636","12.763","13.243","11.88","12.326","11.633","9.752","11.01","11.912","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--"
"INTL.33-12-YUG-BKWH.A"," Former Yugoslavia","27.868","25.044","23.295","21.623","25.645","24.363","27.474","25.98","25.612","23.256","19.601","18.929","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--"
"INTL.33-12-FRA-BKWH.A"," France","68.253","70.358","68.6","67.515","64.01","60.248","60.953","68.623","73.952","45.744","52.796","56.277","68.313","64.3","78.057","72.196","64.43","63.151","61.479","71.832","66.466","73.888","59.992","58.567","59.276","50.965","55.741","57.029","63.017","56.428","61.945","45.184","59.099","71.042","62.993","54.876","60.094","49.389","64.485","56.98242","64.84"
"INTL.33-12-DEU-BKWH.A"," Germany","--","--","--","--","--","--","--","--","--","--","--","14.742","17.223","17.699","19.731","21.562","21.737","17.18343","17.044","19.451","21.515","22.506","22.893","19.071","20.866","19.442","19.808","20.957","20.239","18.841","20.678","17.323","21.331","22.66","19.31","18.664","20.214","19.985","17.815","19.86039","24.75"
"INTL.33-12-DDR-BKWH.A"," Germany, East","1.658","1.718","1.748","1.683","1.748","1.758","1.767","1.726","1.719","1.551","1.389","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--"
"INTL.33-12-DEUW-BKWH.A"," Germany, West","17.125","17.889","17.694","16.713","16.434","15.354","16.526","18.36","18.128","16.482","15.769","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--"
"INTL.33-12-GIB-BKWH.A"," Gibraltar","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0"
"INTL.33-12-GRC-BKWH.A"," Greece","3.396","3.398","3.551","2.331","2.852","2.792","3.222","2.768","2.354","1.888","1.751","3.068","2.181","2.26","2.573","3.494","4.305","3.84318","3.68","4.546","3.656","2.076","2.772","4.718","4.625","4.967","5.806","2.565","3.279","5.32","7.431","3.998","4.387","6.337","4.464","5.782","5.543","3.962","5.035","3.9798","3.43"
"INTL.33-12-HUN-BKWH.A"," Hungary","0.111","0.166","0.158","0.153","0.179","0.153","0.152","0.167","0.167","0.156","0.176","0.192","0.156","0.164","0.159","0.161","0.205","0.21384","0.15345","0.179","0.176","0.184","0.192","0.169","0.203","0.2","0.184","0.208","0.211","0.226","0.184","0.216","0.206","0.208","0.294","0.227","0.253","0.214","0.216","0.21681","0.24"
"INTL.33-12-ISL-BKWH.A"," Iceland","3.053","3.085","3.407","3.588","3.738","3.667","3.846","3.918","4.169","4.217","4.162","4.162","4.267","4.421","4.47","4.635","4.724","5.15493","5.565","5.987","6.292","6.512","6.907","7.017","7.063","6.949","7.22","8.31","12.303","12.156","12.51","12.382","12.214","12.747","12.554","13.541","13.092","13.892","13.679","13.32441","12.46"
"INTL.33-12-IRL-BKWH.A"," Ireland","0.833","0.855","0.792","0.776","0.68","0.824","0.91","0.673","0.862","0.684","0.69","0.738","0.809","0.757","0.911","0.706","0.715","0.67122","0.907","0.838","0.838","0.59","0.903","0.592","0.624","0.625","0.717","0.66","0.959","0.893","0.593","0.699","0.795","0.593","0.701","0.798","0.674","0.685","0.687","0.87813","1.21"
"INTL.33-12-ITA-BKWH.A"," Italy","44.997","42.782","41.216","40.96","41.923","40.616","40.626","39.05","40.205","33.647","31.31","41.817","41.778","41.011","44.212","37.404","41.617","41.18697","40.808","44.911","43.763","46.343","39.125","33.303","41.915","35.706","36.624","32.488","41.207","48.647","50.506","45.36477","41.45625","52.24626","57.95955","45.08163","42.00768","35.83701","48.29913","45.31824","47.72"
"INTL.33-12-XKS-BKWH.A"," Kosovo","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","0.075","0.119","0.154","0.104","0.095","0.142","0.149","0.139","0.243","0.177","0.27027","0.2079","0.26"
"INTL.33-12-LVA-BKWH.A"," Latvia","--","--","--","--","--","--","--","--","--","--","--","--","2.498","2.846","3.272","2.908","1.841","2.922","2.99","2.729","2.791","2.805","2.438","2.243","3.078","3.293","2.671","2.706","3.078","3.422","3.488","2.857","3.677","2.838","1.953","1.841","2.523","4.356","2.417","2.08692","2.59"
"INTL.33-12-LTU-BKWH.A"," Lithuania","--","--","--","--","--","--","--","--","--","--","--","--","0.308","0.389","0.447","0.369","0.323","0.291","0.413","0.409","0.336","0.322","0.35","0.323","0.417","0.446193","0.393","0.417","0.398","0.42","0.535","0.475","0.419","0.516","0.395","0.346","0.45","0.597","0.427","0.34254","1.06"
"INTL.33-12-LUX-BKWH.A"," Luxembourg","0.086","0.095","0.084","0.083","0.088","0.071","0.084","0.101","0.097","0.072","0.07","0.083","0.069","0.066","0.117","0.087","0.059","0.082","0.114","0.084","0.119","0.117","0.098","0.078","0.103","0.093","0.11","0.116","0.131","0.105","0.104","0.061","0.095","0.114","0.104","0.095","0.111","0.082","0.089","0.10593","1.09"
"INTL.33-12-MLT-BKWH.A"," Malta","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0"
"INTL.33-12-MNE-BKWH.A"," Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","1.733","1.271","1.524","2.05","2.723","1.192","1.462","2.479","1.734","1.476","1.825","1.014","2.09187","1.78","1.8"
"INTL.33-12-NLD-BKWH.A"," Netherlands","0","0","0","0","0","0.003","0.003","0.001","0.002","0.037","0.119","0.079","0.119","0.091","0.1","0.087","0.079","0.09108","0.111","0.089","0.141","0.116","0.109","0.071","0.094","0.087","0.105","0.106","0.101","0.097","0.105","0.057","0.104","0.114","0.112","0.093","0.1","0.061","0.072","0.07326","0.05"
"INTL.33-12-MKD-BKWH.A"," North Macedonia","--","--","--","--","--","--","--","--","--","--","--","--","0.817","0.517","0.696","0.793","0.842","0.891","1.072","1.375","1.158","0.62","0.749","1.36","1.467","1.477","1.634","1","0.832","1.257","2.407","1.419","1.031","1.568","1.195","1.846","1.878","1.099","1.773","1.15236","1.24"
"INTL.33-12-NOR-BKWH.A"," Norway","82.717","91.876","91.507","104.704","104.895","101.464","95.321","102.341","107.919","117.369","119.933","109.032","115.505","118.024","110.398","120.315","102.823","108.677","114.546","120.237","140.4","119.258","128.078","104.425","107.693","134.331","118.175","132.319","137.654","124.03","116.257","119.78","141.189","127.551","134.844","136.662","142.244","141.651","138.202","123.66288","141.69"
"INTL.33-12-POL-BKWH.A"," Poland","2.326","2.116","1.528","1.658","1.394","1.833","1.534","1.644","1.775","1.593","1.403","1.411","1.492","1.473","1.716","1.868","1.912","1.941","2.286","2.133","2.085","2.302","2.256","1.654","2.06","2.179","2.022","2.328","2.13","2.351","2.9","2.313","2.02","2.421","2.165","1.814","2.117","2.552","1.949","1.93842","2.93"
"INTL.33-12-PRT-BKWH.A"," Portugal","7.873","4.934","6.82","7.897","9.609","10.512","8.364","9.005","12.037","5.72","9.065","8.952","4.599","8.453","10.551","8.26","14.613","12.97395","12.853","7.213","11.21","13.894","7.722","15.566","9.77","4.684","10.892","9.991","6.73","8.201","15.954","11.423","5.589","13.652","15.471","8.615","15.608","5.79","12.316","8.6526","13.96"
"INTL.33-12-ROU-BKWH.A"," Romania","12.506","12.605","11.731","9.934","11.208","11.772","10.688","11.084","13.479","12.497","10.87","14.107","11.583","12.64","12.916","16.526","15.597","17.334","18.69","18.107","14.63","14.774","15.886","13.126","16.348","20.005","18.172","15.806","17.023","15.379","19.684","14.581","11.945","14.807","18.618","16.467","17.848","14.349","17.48736","15.65289","15.53"
"INTL.33-12-SRB-BKWH.A"," Serbia","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","10.855","9.937","9.468","10.436","11.772","8.58","9.193","10.101","10.893","9.979","10.684","9.061","10.53261","10.07028","9.66"
"INTL.33-12-SVK-BKWH.A"," Slovakia","--","--","--","--","--","--","--","--","--","--","--","--","--","3.432","4.311","4.831","4.185","4.023","4.224","4.429","4.569","4.878","5.215","3.4452","4.059","4.592","4.355","4.406","4","4.324","5.184","3.211","3.687","4.329","3.762","3.701","4.302","4.321","3.506","4.27383","4.67"
"INTL.33-12-SVN-BKWH.A"," Slovenia","--","--","--","--","--","--","--","--","--","--","--","--","3.379","2.974","3.348","3.187","3.616","3.046","3.4","3.684","3.771","3.741","3.265","2.916","4.033","3.426","3.555","3.233","3.978","4.666","4.452","3.506","3.841","4.562","6.011","3.75","4.443","3.814","4.643","4.43421","5.24"
"INTL.33-12-ESP-BKWH.A"," Spain","29.16","21.64","25.99","26.696","31.088","30.895","26.105","27.016","34.76","19.046","25.16","27.01","18.731","24.133","27.898","22.881","39.404","34.43","33.665","22.634","29.274","40.617","22.691","40.643","31.359","18.209","25.699","27.036","23.13","26.147","41.576","30.07","20.192","36.45","38.815","27.656","35.77","18.007","33.743","24.23025","33.34"
"INTL.33-12-SWE-BKWH.A"," Sweden","58.133","59.006","54.369","62.801","67.106","70.095","60.134","70.95","69.016","70.911","71.778","62.603","73.588","73.905","58.508","67.421","51.2226","68.365","74.25","70.974","77.798","78.269","65.696","53.005","59.522","72.075","61.106","65.497","68.378","65.193","66.279","66.047","78.333","60.81","63.227","74.734","61.645","64.651","61.79","64.46583","71.6"
"INTL.33-12-CHE-BKWH.A"," Switzerland","32.481","35.13","35.974","35.069","29.871","31.731","32.576","34.328","35.437","29.477","29.497","31.756","32.373","35.416","38.678","34.817","28.458","33.70257","33.136","39.604","36.466","40.895","34.862","34.471","33.411","30.914","30.649","34.898","35.676","35.366","35.704","32.069","38.218","38.08","37.659","37.879","34.281","33.754","34.637","37.6596","40.62"
"INTL.33-12-TUR-BKWH.A"," Turkey","11.159","12.308","13.81","11.13","13.19","11.822","11.637","18.314","28.447","17.61","22.917","22.456","26.302","33.611","30.28","35.186","40.07","39.41784","41.80671","34.33","30.57","23.77","33.346","34.977","45.623","39.165","43.802","35.492","32.937","35.598","51.423","51.155","56.669","58.225","39.75","65.856","66.686","57.824","59.49","87.99714","77.39"
"INTL.33-12-GBR-BKWH.A"," United Kingdom","3.921","4.369","4.543","4.548","3.992","4.08","4.767","4.13","4.915","4.732","5.119","4.534","5.329","4.237","5.043","4.79","3.359","4.127","5.067","5.283","5.035","4.015","4.74","3.195","4.795","4.873","4.547","5.026","5.094","5.178","3.566","5.655","5.286","4.667","5.832","6.246","5.342","5.836","5.189","5.89941","7.64"
Can't render this file because it has a wrong number of fields in line 3.

View File

@ -24,3 +24,5 @@ Gridlink,Kingsnorth (UK),Warande (FR),160,,1400,in permitting,,https://tyndp.ent
NeuConnect,Grain (UK),Fedderwarden (DE),680,,1400,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/309,0.716666666666667,51.44,8.046524,53.562763 NeuConnect,Grain (UK),Fedderwarden (DE),680,,1400,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/309,0.716666666666667,51.44,8.046524,53.562763
NordBalt,Klaipeda (LT),Nybro (SE),450,,700,built,,https://en.wikipedia.org/wiki/NordBalt,21.256667,55.681667,15.854167,56.767778 NordBalt,Klaipeda (LT),Nybro (SE),450,,700,built,,https://en.wikipedia.org/wiki/NordBalt,21.256667,55.681667,15.854167,56.767778
Estlink 1,Harku (EE),Espoo (FI),105,,350,built,,https://en.wikipedia.org/wiki/Estlink,24.560278,59.384722,24.551667,60.203889 Estlink 1,Harku (EE),Espoo (FI),105,,350,built,,https://en.wikipedia.org/wiki/Estlink,24.560278,59.384722,24.551667,60.203889
Greenlink,Waterford (IE),Pembroke (UK),,180,500,under construction,,https://tyndp2022-project-platform.azurewebsites.net/projectsheets/transmission/286,-6.987,52.260,-4.986,51.686
Celtic Interconnector,Aghada (IE),La Martyre (FR),,572,700,under consideration,,https://tyndp2022-project-platform.azurewebsites.net/projectsheets/transmission/107,-8.16642,51.91413,-4.184,48.459

1 Name Converterstation 1 Converterstation 2 Length (given) (km) Length (distance*1.2) (km) Power (MW) status replaces Ref x1 y1 x2 y2
24 NeuConnect Grain (UK) Fedderwarden (DE) 680 1400 in permitting https://tyndp.entsoe.eu/tyndp2018/projects/projects/309 0.716666666666667 51.44 8.046524 53.562763
25 NordBalt Klaipeda (LT) Nybro (SE) 450 700 built https://en.wikipedia.org/wiki/NordBalt 21.256667 55.681667 15.854167 56.767778
26 Estlink 1 Harku (EE) Espoo (FI) 105 350 built https://en.wikipedia.org/wiki/Estlink 24.560278 59.384722 24.551667 60.203889
27 Greenlink Waterford (IE) Pembroke (UK) 180 500 under construction https://tyndp2022-project-platform.azurewebsites.net/projectsheets/transmission/286 -6.987 52.260 -4.986 51.686
28 Celtic Interconnector Aghada (IE) La Martyre (FR) 572 700 under consideration https://tyndp2022-project-platform.azurewebsites.net/projectsheets/transmission/107 -8.16642 51.91413 -4.184 48.459

16
data/nuclear_p_max_pu.csv Normal file
View File

@ -0,0 +1,16 @@
country,factor
BE,0.65
BG,0.89
CZ,0.82
FI,0.92
FR,0.70
DE,0.88
HU,0.90
NL,0.86
RO,0.92
SK,0.89
SI,0.94
ES,0.89
SE,0.82
CH,0.86
GB,0.67
1 country factor
2 BE 0.65
3 BG 0.89
4 CZ 0.82
5 FI 0.92
6 FR 0.70
7 DE 0.88
8 HU 0.90
9 NL 0.86
10 RO 0.92
11 SK 0.89
12 SI 0.94
13 ES 0.89
14 SE 0.82
15 CH 0.86
16 GB 0.67

View File

@ -36,12 +36,20 @@ Link:
"5583": "7428" # bus0 == bus1 to remove link in remove_unconnected_components (Sardinia) "5583": "7428" # bus0 == bus1 to remove link in remove_unconnected_components (Sardinia)
"13588": "7428" # bus0 == bus1 to remove link in remove_unconnected_components (Sardinia) "13588": "7428" # bus0 == bus1 to remove link in remove_unconnected_components (Sardinia)
"T23": "6355" # bus0 == bus1 to remove link in remove_unconnected_components (NordBalt) "T23": "6355" # bus0 == bus1 to remove link in remove_unconnected_components (NordBalt)
"14815": "5939" # Kainachtal
"8706": "6448"
bus1: bus1:
index: index:
"12931": "8152" # BorWin3 "12931": "8152" # BorWin3
"5582": "2382" # combine link 5583 + 5582 in 5582 (Sardinia) "5582": "2382" # combine link 5583 + 5582 in 5582 (Sardinia)
"13589": "1349" # combine link 13589 + 13588 in 13589 (Sardinia) "13589": "1349" # combine link 13589 + 13588 in 13589 (Sardinia)
"14820": "6354" # NordBalt "14820": "6354" # NordBalt
"14810": "6365" # Skagerrak
"8708": "6448"
"8394": "6695"
"14813": "7052"
"8009": "5939"
"5601": "7052" # Link Sweden - Lübeck
length: length:
index: index:
"5582": 26.39 # new length of combined links (sum) "5582": 26.39 # new length of combined links (sum)
@ -53,6 +61,7 @@ Line:
bus0: bus0:
index: index:
"14573": "7179" #fix bus-id substation in PT (220/380kV issue) "14573": "7179" #fix bus-id substation in PT (220/380kV issue)
"14756": "8577" # Deeside connection
v_nom: v_nom:
index: index:
"14573": 220 # 220/380kV issue of substation in PT "14573": 220 # 220/380kV issue of substation in PT

View File

@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT

View File

@ -1,4 +1,4 @@
/* SPDX-FileCopyrightText: 2017-2020 The PyPSA-Eur Authors /* SPDX-FileCopyrightText: 2017-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: MIT SPDX-License-Identifier: MIT
*/ */

View File

@ -72,7 +72,7 @@ Step 3 - Installation of Cloud SDK
- Download Google Cloud SDK `SDK <https://cloud.google.com/sdk>`_. Check that you are logged in in your Google account. The link should lead you to the Windows installation of Google Cloud SDK. - Download Google Cloud SDK `SDK <https://cloud.google.com/sdk>`_. Check that you are logged in in your Google account. The link should lead you to the Windows installation of Google Cloud SDK.
- Follow the "Quickstart for Windows - Before you begin" steps. - Follow the "Quickstart for Windows - Before you begin" steps.
- After the successfull installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell": - After the successful installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell":
.. code:: bash .. code:: bash
@ -107,7 +107,7 @@ Make sure that your instance is operating for the next steps.
- Click on the advanced setting. SSH -> Authentication. - Click on the advanced setting. SSH -> Authentication.
- Option 1. Click on the Tools button and "Install Public Key into Server..". Somewhere in your folder structure must be a public key. I found it with the following folder syntax on my local windows computer -> :\Users\...\.ssh (there should be a PKK file). - Option 1. Click on the Tools button and "Install Public Key into Server..". Somewhere in your folder structure must be a public key. I found it with the following folder syntax on my local windows computer -> :\Users\...\.ssh (there should be a PKK file).
- Option 2. Click on the Tools button and "Generate new key pair...". Save the private key at a folder you remember and add it to the "private key file" field in WinSCP. Upload the public key to the metadeta of your instance. - Option 2. Click on the Tools button and "Generate new key pair...". Save the private key at a folder you remember and add it to the "private key file" field in WinSCP. Upload the public key to the metadeta of your instance.
- Click ok and save. Then click Login. If successfull WinSCP will open on the left side your local computer folder structure and on the right side the folder strucutre of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successfull) - Click ok and save. Then click Login. If successful WinSCP will open on the left side your local computer folder structure and on the right side the folder structure of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successful)
If you had struggle with the above steps, you could also try `this video <https://www.youtube.com/watch?v=lYx1oQkEF0E>`_. If you had struggle with the above steps, you could also try `this video <https://www.youtube.com/watch?v=lYx1oQkEF0E>`_.

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 20017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: 20017-2020 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -16,19 +17,19 @@
# All configuration values have a default; values that are commented out # All configuration values have a default; values that are commented out
# serve to show the default. # serve to show the default.
import sys
import os import os
import shlex import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory, # If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../scripts')) sys.path.insert(0, os.path.abspath("../scripts"))
# -- General configuration ------------------------------------------------ # -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here. # If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0' # needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be # Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@ -36,47 +37,47 @@ sys.path.insert(0, os.path.abspath('../scripts'))
extensions = [ extensions = [
#'sphinx.ext.autodoc', #'sphinx.ext.autodoc',
#'sphinx.ext.autosummary', #'sphinx.ext.autosummary',
'sphinx.ext.intersphinx', "sphinx.ext.intersphinx",
'sphinx.ext.todo', "sphinx.ext.todo",
'sphinx.ext.mathjax', "sphinx.ext.mathjax",
'sphinx.ext.napoleon', "sphinx.ext.napoleon",
'sphinx.ext.graphviz', "sphinx.ext.graphviz",
#'sphinx.ext.pngmath', #'sphinx.ext.pngmath',
#'sphinxcontrib.tikz', #'sphinxcontrib.tikz',
#'rinoh.frontend.sphinx', #'rinoh.frontend.sphinx',
'sphinx.ext.imgconverter', # for SVG conversion "sphinx.ext.imgconverter", # for SVG conversion
] ]
autodoc_default_flags = ['members'] autodoc_default_flags = ["members"]
autosummary_generate = True autosummary_generate = True
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates'] templates_path = ["_templates"]
# The suffix(es) of source filenames. # The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string: # You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md'] # source_suffix = ['.rst', '.md']
source_suffix = '.rst' source_suffix = ".rst"
# The encoding of source files. # The encoding of source files.
#source_encoding = 'utf-8-sig' # source_encoding = 'utf-8-sig'
# The master toctree document. # The master toctree document.
master_doc = 'index' master_doc = "index"
# General information about the project. # General information about the project.
project = u'PyPSA-Eur' project = "PyPSA-Eur"
copyright = u'2017-2020 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberger (FIAS), Tom Brown (KIT, FIAS); 2019-2020 Fabian Neumann (KIT)' copyright = "2017-2022 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS); 2019-2022 Fabian Neumann (TUB, KIT)"
author = u'Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberger (FIAS), Tom Brown (KIT, FIAS), Fabian Neumann (KIT)' author = "Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS), Fabian Neumann (TUB, KIT)"
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = u'0.3' version = "0.6"
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = u'0.4.0' release = "0.6.1"
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.
@ -87,37 +88,37 @@ language = None
# There are two options for replacing |today|: either, you set today to some # There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: # non-false value, then it is used:
#today = '' # today = ''
# Else, today_fmt is used as the format for a strftime call. # Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y' # today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
exclude_patterns = ['_build'] exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all # The reST default role (used for this markup: `text`) to use for all
# documents. # documents.
#default_role = None # default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text. # If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True # add_function_parentheses = True
# If true, the current module name will be prepended to all description # If true, the current module name will be prepended to all description
# unit titles (such as .. function::). # unit titles (such as .. function::).
#add_module_names = True # add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the # If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default. # output. They are ignored by default.
#show_authors = False # show_authors = False
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx' pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting. # A list of ignored prefixes for module index sorting.
#modindex_common_prefix = [] # modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents. # If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False # keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing. # If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True todo_include_todos = True
@ -127,35 +128,35 @@ todo_include_todos = True
# The theme to use for HTML and HTML Help pages. See the documentation for # The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. # a list of builtin themes.
html_theme = 'sphinx_rtd_theme' html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme # Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the # further. For a list of options available for each theme, see the
# documentation. # documentation.
html_theme_options = { html_theme_options = {
'display_version': True, "display_version": True,
'sticky_navigation': True, "sticky_navigation": True,
} }
# Add any paths that contain custom themes here, relative to this directory. # Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [] # html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to # The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation". # "<project> v<release> documentation".
#html_title = None # html_title = None
# A shorter title for the navigation bar. Default is the same as html_title. # A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None # html_short_title = None
# The name of an image file (relative to this directory) to place at the top # The name of an image file (relative to this directory) to place at the top
# of the sidebar. # of the sidebar.
#html_logo = None # html_logo = None
# The name of an image file (within the static path) to use as favicon of the # The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large. # pixels large.
#html_favicon = None # html_favicon = None
# These folders are copied to the documentation's HTML output # These folders are copied to the documentation's HTML output
html_static_path = ["_static"] html_static_path = ["_static"]
@ -167,130 +168,127 @@ html_css_files = ["theme_overrides.css"]
# Add any extra paths that contain custom files (such as robots.txt or # Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied # .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation. # directly to the root of the documentation.
#html_extra_path = [] # html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format. # using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y' # html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to # If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities. # typographically correct entities.
#html_use_smartypants = True # html_use_smartypants = True
# Custom sidebar templates, maps document names to template names. # Custom sidebar templates, maps document names to template names.
#html_sidebars = {} # html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to # Additional templates that should be rendered to pages, maps page names to
# template names. # template names.
#html_additional_pages = {} # html_additional_pages = {}
# If false, no module index is generated. # If false, no module index is generated.
#html_domain_indices = True # html_domain_indices = True
# If false, no index is generated. # If false, no index is generated.
#html_use_index = True # html_use_index = True
# If true, the index is split into individual pages for each letter. # If true, the index is split into individual pages for each letter.
#html_split_index = False # html_split_index = False
# If true, links to the reST sources are added to the pages. # If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True # html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True # html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True # html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will # If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the # contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served. # base URL from which the finished HTML is served.
#html_use_opensearch = '' # html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml"). # This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None # html_file_suffix = None
# Language to be used for generating the HTML full-text search index. # Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages: # Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en' # html_search_language = 'en'
# A dictionary with options for the search language support, empty by default. # A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value # Now only 'ja' uses this config value
#html_search_options = {'type': 'default'} # html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that # The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used. # implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js' # html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = 'PyPSAEurdoc' htmlhelp_basename = "PyPSAEurdoc"
# -- Options for LaTeX output --------------------------------------------- # -- Options for LaTeX output ---------------------------------------------
latex_elements = { latex_elements = {
# The paper size ('letterpaper' or 'a4paper'). # The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper', #'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt',
#'pointsize': '10pt', # Additional stuff for the LaTeX preamble.
#'preamble': '',
# Additional stuff for the LaTeX preamble. # Latex figure (float) alignment
#'preamble': '', #'figure_align': 'htbp',
# Latex figure (float) alignment
#'figure_align': 'htbp',
} }
# Grouping the document tree into LaTeX files. List of tuples # Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, # (source start file, target name, title,
# author, documentclass [howto, manual, or own class]). # author, documentclass [howto, manual, or own class]).
latex_documents = [ latex_documents = [
(master_doc, 'PyPSA-Eur.tex', u'PyPSA-Eur Documentation', (master_doc, "PyPSA-Eur.tex", "PyPSA-Eur Documentation", "author", "manual"),
u'author', 'manual'),
] ]
#Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html # Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html
rinoh_documents = [(master_doc, # top-level file (index.rst) rinoh_documents = [
'PyPSA-Eur', # output (target.pdf) (
'PyPSA-Eur Documentation', # document title master_doc, # top-level file (index.rst)
'author')] # document author "PyPSA-Eur", # output (target.pdf)
"PyPSA-Eur Documentation", # document title
"author",
)
] # document author
# The name of an image file (relative to this directory) to place at the top of # The name of an image file (relative to this directory) to place at the top of
# the title page. # the title page.
#latex_logo = None # latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts, # For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters. # not chapters.
#latex_use_parts = False # latex_use_parts = False
# If true, show page references after internal links. # If true, show page references after internal links.
#latex_show_pagerefs = False # latex_show_pagerefs = False
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
#latex_show_urls = False # latex_show_urls = False
# Documents to append as an appendix to all manuals. # Documents to append as an appendix to all manuals.
#latex_appendices = [] # latex_appendices = []
# If false, no module index is generated. # If false, no module index is generated.
#latex_domain_indices = True # latex_domain_indices = True
# -- Options for manual page output --------------------------------------- # -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
man_pages = [ man_pages = [(master_doc, "pypsa-eur", "PyPSA-Eur Documentation", [author], 1)]
(master_doc, 'pypsa-eur', u'PyPSA-Eur Documentation',
[author], 1)
]
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
#man_show_urls = False # man_show_urls = False
# -- Options for Texinfo output ------------------------------------------- # -- Options for Texinfo output -------------------------------------------
@ -299,23 +297,29 @@ man_pages = [
# (source start file, target name, title, author, # (source start file, target name, title, author,
# dir menu entry, description, category) # dir menu entry, description, category)
texinfo_documents = [ texinfo_documents = [
(master_doc, 'PyPSA-Eur', u'PyPSA-Eur Documentation', (
author, 'PyPSA-Eur', 'One line description of project.', master_doc,
'Miscellaneous'), "PyPSA-Eur",
"PyPSA-Eur Documentation",
author,
"PyPSA-Eur",
"One line description of project.",
"Miscellaneous",
),
] ]
# Documents to append as an appendix to all manuals. # Documents to append as an appendix to all manuals.
#texinfo_appendices = [] # texinfo_appendices = []
# If false, no module index is generated. # If false, no module index is generated.
#texinfo_domain_indices = True # texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'. # How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote' # texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu. # If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False # texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library. # Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None} intersphinx_mapping = {"https://docs.python.org/": None}

View File

@ -1,3 +1,15 @@
,Unit,Values,Description ,Unit,Values,Description
simplify,,, simplify_network,,,
-- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones" -- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones"
-- algorithm,str,"One of {kmeans, hac, modularity}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
cluster_network,,,
-- algorithm,str,"One of {kmeans, hac}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
aggregation_strategies,,,
-- generators,,,
-- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
-- buses,,,
-- -- {key},str,"{key} can be any of the component of the bus (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."

1 Unit Values Description
2 simplify simplify_network
3 -- to_substations bool {'true','false'} Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones
4 -- algorithm str One of {‘kmeans’, ‘hac’, ‘modularity‘}
5 -- feature str Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.
6 -- exclude_carriers list List of Str like [ 'solar', 'onwind'] or empy list [] List of carriers which will not be aggregated. If empty, all carriers will be aggregated.
7 cluster_network
8 -- algorithm str One of {‘kmeans’, ‘hac’}
9 -- feature str Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.
10 -- exclude_carriers list List of Str like [ 'solar', 'onwind'] or empy list [] List of carriers which will not be aggregated. If empty, all carriers will be aggregated.
11 aggregation_strategies
12 -- generators
13 -- -- {key} str {key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator.
14 -- buses
15 -- -- {key} str {key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus.

View File

@ -1,8 +1,9 @@
,Unit,Values,Description ,Unit,Values,Description
year,--,"YYYY; e.g. '2030'","Year for which to retrieve cost assumptions of ``data/costs.csv``." year,--,"YYYY; e.g. '2030'","Year for which to retrieve cost assumptions of ``resources/costs.csv``."
discountrate,--,float,"Default discount rate if not specified for a technology in ``data/costs.csv``." version,--,"vX.X.X; e.g. 'v0.1.0'","Version of ``technology-data`` repository to use."
USD2013_to_EUR2013,--,float,"Exchange rate from USD :math:`_{2013}` to EUR :math:`_{2013}` from `ECB <https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html>`_" rooftop_share,--,float,"Share of rooftop PV when calculating capital cost of solar (joint rooftop and utility-scale PV)."
capital_cost,EUR/MW,"Keys should be in the 'technology' column of ``data/costs.csv``. Values can be any float.","For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``data/costs.csv``." fill_values,--,float,"Default values if not specified for a technology in ``resources/costs.csv``."
marginal_cost,EUR/MWh,"Keys should be in the 'technology' column of ``data/costs.csv``. Values can be any float.","For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``data/costs.csv``." capital_cost,EUR/MW,"Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.","For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``."
marginal_cost,EUR/MWh,"Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.","For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``."
emission_prices,,,"Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs." emission_prices,,,"Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs."
-- co2,EUR/t,float,"Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``." -- co2,EUR/t,float,"Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``."

1 Unit Values Description
2 year -- YYYY; e.g. '2030' Year for which to retrieve cost assumptions of ``data/costs.csv``. Year for which to retrieve cost assumptions of ``resources/costs.csv``.
3 discountrate version -- float vX.X.X; e.g. 'v0.1.0' Default discount rate if not specified for a technology in ``data/costs.csv``. Version of ``technology-data`` repository to use.
4 USD2013_to_EUR2013 rooftop_share -- float Exchange rate from USD :math:`_{2013}` to EUR :math:`_{2013}` from `ECB <https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html>`_ Share of rooftop PV when calculating capital cost of solar (joint rooftop and utility-scale PV).
5 capital_cost fill_values EUR/MW -- Keys should be in the 'technology' column of ``data/costs.csv``. Values can be any float. float For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``data/costs.csv``. Default values if not specified for a technology in ``resources/costs.csv``.
6 marginal_cost capital_cost EUR/MWh EUR/MW Keys should be in the 'technology' column of ``data/costs.csv``. Values can be any float. Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float. For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``data/costs.csv``. For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``.
7 marginal_cost EUR/MWh Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float. For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``.
8 emission_prices Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs.
9 -- co2 EUR/t float Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``.

View File

@ -1,19 +1,29 @@
,Unit,Values,Description ,Unit,Values,Description
voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider when voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider
gaslimit,MWhth,"float or false",Global gas usage limit
co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions
co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard. co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard.
agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``.
extendable_carriers,,, operational_reserve,,,"Settings for reserve requirements following like `GenX <https://genxproject.github.io/GenX/dev/core/#Reserves>`_"
-- Generator,--,"Any subset of {'OCGT','CCGT'}",Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits. -- activate,bool,"true or false","Whether to take operational reserve requirements into account during optimisation"
-- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. -- epsilon_load,--,float,share of total load
-- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. -- epsilon_vres,--,float,share of total renewable supply
-- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. -- contingency,MW,float,fixed reserve capacity
max_hours,,, max_hours,,,
-- battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_. -- battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
-- H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_. -- H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
extendable_carriers,,,
-- Generator,--,"Any extendable carrier","Defines existing or non-existing conventional and renewable power plants to be extendable during the optimization. Conventional generators can only be built/expanded where already existent today. If a listed conventional carrier is not included in the ``conventional_carriers`` list, the lower limit of the capacity expansion is set to 0."
-- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
-- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
-- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``.
powerplants_filter,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database. powerplants_filter,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database.
custom_powerplants,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database. custom_powerplants,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database.
conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}",List of conventional power plants to include in the model from ``resources/powerplants.csv``. conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in `extendable_carriers`, the capacity is taken as a lower bound."
renewable_capacities_from_OPSD,,"[solar, onwind, offwind]",List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list <https://data.open-power-system-data.org/renewable_power_plants/>`_ renewable_carriers,--,"Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro}",List of renewable generators to include in the model.
estimate_renewable_capacities_from_capacitiy_stats,,, estimate_renewable_capacities,,,
"-- Fueltype [ppm], e.g. Wind",,"list of fueltypes strings in PyPSA-Eur, e.g. [onwind, offwind-ac, offwind-dc]",converts ppm Fueltype to PyPSA-EUR Fueltype -- enable,,bool,"Activate routine to estimate renewable capacities"
-- from_opsd,--,bool,"Add capacities from OPSD data"
-- year,--,bool,"Renewable capacities are based on existing capacities reported by IRENA for the specified year"
-- expansion_limit,--,float or false,"Artificially limit maximum capacities to factor * (IRENA capacities), i.e. 110% of <years>'s capacities => expansion_limit: 1.1 false: Use estimated renewable potentials determine by the workflow"
-- technology_mapping,,,"Mapping between powerplantmatching and PyPSA-Eur technology names"

1 Unit Values Description
2 voltages kV Any subset of {220., 300., 380.} Voltage levels to consider when Voltage levels to consider
3 gaslimit MWhth float or false Global gas usage limit
4 co2limit :math:`t_{CO_2-eq}/a` float Cap on total annual system carbon dioxide emissions
5 co2base :math:`t_{CO_2-eq}/a` float Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard.
6 agg_p_nom_limits file path Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``.
7 extendable_carriers operational_reserve Settings for reserve requirements following like `GenX <https://genxproject.github.io/GenX/dev/core/#Reserves>`_
8 -- Generator -- activate -- bool Any subset of {'OCGT','CCGT'} true or false Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits. Whether to take operational reserve requirements into account during optimisation
9 -- StorageUnit -- epsilon_load -- Any subset of {'battery','H2'} float Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. share of total load
10 -- Store -- epsilon_vres -- Any subset of {'battery','H2'} float Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. share of total renewable supply
11 -- Link -- contingency -- MW Any subset of {'H2 pipeline'} float Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. fixed reserve capacity
12 max_hours
13 -- battery h float Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
14 -- H2 h float Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
15 extendable_carriers
16 -- Generator -- Any extendable carrier Defines existing or non-existing conventional and renewable power plants to be extendable during the optimization. Conventional generators can only be built/expanded where already existent today. If a listed conventional carrier is not included in the ``conventional_carriers`` list, the lower limit of the capacity expansion is set to 0.
17 -- StorageUnit -- Any subset of {'battery','H2'} Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
18 -- Store -- Any subset of {'battery','H2'} Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
19 -- Link -- Any subset of {'H2 pipeline'} Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``.
20 powerplants_filter -- use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country not in ['Germany'] Filter query for the default powerplant database.
21 custom_powerplants -- use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country in ['Germany'] Filter query for the custom powerplant database.
22 conventional_carriers -- Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass} List of conventional power plants to include in the model from ``resources/powerplants.csv``. List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in `extendable_carriers`, the capacity is taken as a lower bound.
23 renewable_capacities_from_OPSD renewable_carriers -- [solar, onwind, offwind] Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro} List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list <https://data.open-power-system-data.org/renewable_power_plants/>`_ List of renewable generators to include in the model.
24 estimate_renewable_capacities_from_capacitiy_stats estimate_renewable_capacities
25 -- Fueltype [ppm], e.g. Wind -- enable list of fueltypes strings in PyPSA-Eur, e.g. [onwind, offwind-ac, offwind-dc] bool converts ppm Fueltype to PyPSA-EUR Fueltype Activate routine to estimate renewable capacities
26 -- from_opsd -- bool Add capacities from OPSD data
27 -- year -- bool Renewable capacities are based on existing capacities reported by IRENA for the specified year
28 -- expansion_limit -- float or false Artificially limit maximum capacities to factor * (IRENA capacities), i.e. 110% of <years>'s capacities => expansion_limit: 1.1 false: Use estimated renewable potentials determine by the workflow
29 -- technology_mapping Mapping between powerplantmatching and PyPSA-Eur technology names

View File

@ -6,8 +6,8 @@ resource,,,
capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement."
corine,--,"Any *realistic* subset of the `CORINE Land Cover code list <http://www.eea.europa.eu/data-and-maps/data/corine-land-cover-2006-raster-1/corine-land-cover-classes-and/clc_legend.csv/at_download/file>`_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement." corine,--,"Any *realistic* subset of the `CORINE Land Cover code list <http://www.eea.europa.eu/data-and-maps/data/corine-land-cover-2006-raster-1/corine-land-cover-classes-and/clc_legend.csv/at_download/file>`_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement."
natura,bool,"{true, false}","Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``." natura,bool,"{true, false}","Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``."
ship_threshold,--,float,"Ship density threshold from which areas are excluded."
max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential." max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential."
min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential."
potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`"
clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed."

1 Unit Values Description
6 capacity_per_sqkm :math:`MW/km^2` float Allowable density of wind turbine placement.
7 corine -- Any *realistic* subset of the `CORINE Land Cover code list <http://www.eea.europa.eu/data-and-maps/data/corine-land-cover-2006-raster-1/corine-land-cover-classes-and/clc_legend.csv/at_download/file>`_ Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement.
8 natura bool {true, false} Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``.
9 ship_threshold -- float Ship density threshold from which areas are excluded.
10 max_depth m float Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential.
11 min_shore_distance m float Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential.
12 potential -- One of {'simple', 'conservative'} Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`
13 clip_p_max_pu p.u. float To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
keep_all_available_areas bool {'true', 'false'} Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed.

View File

@ -6,8 +6,8 @@ resource,,,
capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement."
corine,--,"Any *realistic* subset of the `CORINE Land Cover code list <http://www.eea.europa.eu/data-and-maps/data/corine-land-cover-2006-raster-1/corine-land-cover-classes-and/clc_legend.csv/at_download/file>`_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement." corine,--,"Any *realistic* subset of the `CORINE Land Cover code list <http://www.eea.europa.eu/data-and-maps/data/corine-land-cover-2006-raster-1/corine-land-cover-classes-and/clc_legend.csv/at_download/file>`_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement."
natura,bool,"{true, false}","Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``." natura,bool,"{true, false}","Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``."
ship_threshold,--,float,"Ship density threshold from which areas are excluded."
max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential." max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential."
min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential."
potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`"
clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed."

1 Unit Values Description
6 capacity_per_sqkm :math:`MW/km^2` float Allowable density of wind turbine placement.
7 corine -- Any *realistic* subset of the `CORINE Land Cover code list <http://www.eea.europa.eu/data-and-maps/data/corine-land-cover-2006-raster-1/corine-land-cover-classes-and/clc_legend.csv/at_download/file>`_ Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement.
8 natura bool {true, false} Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``.
9 ship_threshold -- float Ship density threshold from which areas are excluded.
10 max_depth m float Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential.
11 min_shore_distance m float Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential.
12 potential -- One of {'simple', 'conservative'} Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`
13 clip_p_max_pu p.u. float To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
keep_all_available_areas bool {'true', 'false'} Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed.

View File

@ -11,4 +11,3 @@ corine,,,
natura,bool,"{true, false}","Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``." natura,bool,"{true, false}","Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``."
potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`"
clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed."

1 Unit Values Description
11 natura bool {true, false} Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``.
12 potential -- One of {'simple', 'conservative'} Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`
13 clip_p_max_pu p.u. float To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
keep_all_available_areas bool {'true', 'false'} Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed.

View File

@ -8,4 +8,5 @@ Trigger, Description, Definition, Status
``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use ``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use
``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L66>`__, Untested ``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L66>`__, Untested
``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L73>`__, Untested ``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L73>`__, Untested
``carrier+{c|p}factor``, "Alter the capital cost (``c``) or installable potential (``p``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use ``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use
``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use

1 Trigger Description Definition Status
8 ``ATK`` Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links. ``prepare_network`` In active use
9 ``BAU`` Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities`` ``solve_network``: `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L66>`__ Untested
10 ``SAFE`` Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network. ``solve_network`` `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L73>`__ Untested
11 ``carrier+{c|p}factor`` ``carrier+{c|p|m}factor`` Alter the capital cost (``c``) or installable potential (``p``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values. Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values. ``prepare_network`` In active use
12 ``CH4L`` Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal) ``prepare_network``: ``add_gaslimit()`` In active use

View File

@ -1,4 +1,4 @@
,Unit,Values,Description ,Unit,Values,Description
start,--,"str or datetime-like; e.g. YYYY-MM-DD","Left bound of date range" start,--,"str or datetime-like; e.g. YYYY-MM-DD","Left bound of date range"
end,--,"str or datetime-like; e.g. YYYY-MM-DD","Right bound of date range" end,--,"str or datetime-like; e.g. YYYY-MM-DD","Right bound of date range"
closed,--,"One of {None, left, right}","Make the time interval closed to the ``left``, ``right``, or both sides ``None``." closed,--,"One of {None, left, right}","Make the time interval closed to the ``left``, ``right``, or open on both sides ``None``."

1 Unit Values Description
2 start -- str or datetime-like; e.g. YYYY-MM-DD Left bound of date range
3 end -- str or datetime-like; e.g. YYYY-MM-DD Right bound of date range
4 closed -- One of {None, ‘left’, ‘right’} Make the time interval closed to the ``left``, ``right``, or both sides ``None``. Make the time interval closed to the ``left``, ``right``, or open on both sides ``None``.

View File

@ -12,4 +12,3 @@ corine,--,"Any subset of the `CORINE Land Cover code list <http://www.eea.europa
natura,bool,"{true, false}","Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``." natura,bool,"{true, false}","Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``."
potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`"
clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed."

1 Unit Values Description
12 natura bool {true, false} Switch to exclude `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas. Area is excluded if ``true``.
13 potential -- One of {'simple', 'conservative'} Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`
14 clip_p_max_pu p.u. float To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
keep_all_available_areas bool {'true', 'false'} Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed.

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
@ -28,13 +28,24 @@ Top-level configuration
.. _scenario: .. _scenario:
``scenario`` ``run``
============ =======
It is common conduct to analyse energy system optimisation models for **multiple scenarios** for a variety of reasons, It is common conduct to analyse energy system optimisation models for **multiple scenarios** for a variety of reasons,
e.g. assessing their sensitivity towards changing the temporal and/or geographical resolution or investigating how e.g. assessing their sensitivity towards changing the temporal and/or geographical resolution or investigating how
investment changes as more ambitious greenhouse-gas emission reduction targets are applied. investment changes as more ambitious greenhouse-gas emission reduction targets are applied.
The ``run`` section is used for running and storing scenarios with different configurations which are not covered by :ref:`wildcards`. It determines the path at which resources, networks and results are stored. Therefore the user can run different configurations within the same directory. If a run with a non-empty name should use cutouts shared across runs, set ``shared_cutouts`` to `true`.
.. literalinclude:: ../config.default.yaml
:language: yaml
:start-at: run:
:end-before: scenario:
``scenario``
============
The ``scenario`` section is an extraordinary section of the config file The ``scenario`` section is an extraordinary section of the config file
that is strongly connected to the :ref:`wildcards` and is designed to that is strongly connected to the :ref:`wildcards` and is designed to
facilitate running multiple scenarios through a single command facilitate running multiple scenarios through a single command
@ -91,9 +102,6 @@ Specifies the temporal range to build an energy system model for as arguments to
:widths: 25,7,22,30 :widths: 25,7,22,30
:file: configtables/electricity.csv :file: configtables/electricity.csv
.. warning::
Carriers in ``conventional_carriers`` must not also be in ``extendable_carriers``.
.. _atlite_cf: .. _atlite_cf:
``atlite`` ``atlite``
@ -174,7 +182,7 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml .. literalinclude:: ../config.default.yaml
:language: yaml :language: yaml
:start-at: hydro: :start-at: hydro:
:end-before: lines: :end-before: conventional:
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
@ -183,6 +191,17 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. _lines_cf: .. _lines_cf:
``conventional``
=============
Define additional generator attribute for conventional carrier types. If a scalar value is given it is applied to all generators. However if a string starting with "data/" is given, the value is interpreted as a path to a csv file with country specific values. Then, the values are read in and applied to all generators of the given carrier in the given country. Note that the value(s) overwrite the existing values in the corresponding section of the ``generators`` dataframe.
.. literalinclude:: ../config.default.yaml
:language: yaml
:start-at: conventional:
:end-before: lines:
``lines`` ``lines``
============= =============
@ -233,8 +252,7 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml .. literalinclude:: ../config.default.yaml
:language: yaml :language: yaml
:start-at: load: :lines: 212-217
:end-before: costs:
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
@ -249,7 +267,7 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
.. literalinclude:: ../config.default.yaml .. literalinclude:: ../config.default.yaml
:language: yaml :language: yaml
:start-after: scaling_factor: :start-after: scaling_factor:
:end-before: solving: :end-before: clustering:
.. csv-table:: .. csv-table::
:header-rows: 1 :header-rows: 1
@ -257,8 +275,25 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
:file: configtables/costs.csv :file: configtables/costs.csv
.. note:: .. note::
To change cost assumptions in more detail (i.e. other than ``marginal_cost`` and ``capital_cost``), consider modifying cost assumptions directly in ``data/costs.csv`` as this is not yet supported through the config file. To change cost assumptions in more detail (i.e. other than ``marginal_cost`` and ``capital_cost``), consider modifying cost assumptions directly in ``resources/costs.csv`` as this is not yet supported through the config file.
You can also build multiple different cost databases. Make a renamed copy of ``data/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``. You can also build multiple different cost databases. Make a renamed copy of ``resources/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``.
.. _clustering_cf:
``clustering``
==============
.. literalinclude:: ../config.default.yaml
:language: yaml
:start-after: co2:
:end-before: solving:
.. csv-table::
:header-rows: 1
:widths: 25,7,22,30
:file: configtables/clustering.csv
.. _solving_cf: .. _solving_cf:

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
@ -16,10 +16,20 @@ to our `GitHub repository <https://github.com/PyPSA/PyPSA-Eur>`_.
* If you start working on a feature in the code, let us know by opening an issue or a draft pull request. * If you start working on a feature in the code, let us know by opening an issue or a draft pull request.
This helps all of us to keep an overview on what is being done and helps to avoid a situation where we This helps all of us to keep an overview on what is being done and helps to avoid a situation where we
are doing the same work twice in parallel. are doing the same work twice in parallel.
* We encourage you to use the `PEP 8 coding style <https://www.python.org/dev/peps/pep-0008/>`_.
For linting, formatting and checking your code contributions
against our guidelines (e.g. we use `Black <https://github.com/psf/black>`_ as code style
use `pre-commit <https://pre-commit.com/index.html>`_:
1. Installation ``conda install -c conda-forge pre-commit`` or ``pip install pre-commit``
2. Usage:
* To automatically activate ``pre-commit`` on every ``git commit``: Run ``pre-commit install``
* To manually run it: ``pre-commit run --all``
Note that installing `pre-commit` locally is not strictly necessary. If you create a Pull Request the `pre-commit CI` will be triggered automatically and take care of the checks.
For all code contributions we follow the four eyes principle (two person principle), i.e. all suggested code For all code contributions we follow the four eyes principle (two person principle), i.e. all suggested code
including our own are reviewed by a second person before they are incoporated into our repository. including our own are reviewed by a second person before they are incorporated into our repository.
If you are unfamiliar with pull requests, the GitHub help pages have a nice `guide <https://help.github.com/en/articles/about-pull-requests>`_. If you are unfamiliar with pull requests, the GitHub help pages have a nice `guide <https://help.github.com/en/articles/about-pull-requests>`_.

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
@ -7,7 +7,13 @@
Cost Assumptions Cost Assumptions
################## ##################
The database of cost assumptions is stored in ``data/costs.csv``. The database of cost assumptions is retrieved from the repository
`PyPSA/technology-data <https://github.com/pypsa/technology-data>`_ and then
saved to ``resources/costs.csv``. Cost assumptions of previous PyPSA-Eur
versions can be restored by setting in the ``Snakefile``:
``COSTS="data/costs.csv"``.
The ``config.yaml`` provides options to choose a reference year (``costs: year:``) and use a specific version of the repository ``costs: version:``.
It includes cost assumptions for all included technologies for specific It includes cost assumptions for all included technologies for specific
years from various sources, namely for years from various sources, namely for
@ -30,24 +36,12 @@ with a discount rate of :math:`r` over the economic lifetime :math:`n` using the
Based on the parameters above the ``marginal_cost`` and ``capital_cost`` of the system components are calculated. Based on the parameters above the ``marginal_cost`` and ``capital_cost`` of the system components are calculated.
.. note::
Another great resource for cost assumptions is the `cost database from the Danish Energy Agency <https://ens.dk/en/our-services/projections-and-models/technology-data>`_.
Modifying Cost Assumptions Modifying Cost Assumptions
========================== ==========================
Some cost assumptions (e.g. marginal cost and capital cost) can be directly overwritten in the ``config.yaml`` (cf. Section :ref:`costs_cf` in :ref:`config`). Some cost assumptions (e.g. marginal cost and capital cost) can be directly overwritten in the ``config.yaml`` (cf. Section :ref:`costs_cf` in :ref:`config`).
To change cost assumptions in more detail, modify cost assumptions directly in ``data/costs.csv`` as this is not yet supported through the config file. To change cost assumptions in more detail, modify cost assumptions directly in ``resources/costs.csv`` as this is not yet supported through the config file.
You can also build multiple different cost databases. Make a renamed copy of ``data/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``. You can also build multiple different cost databases. Make a renamed copy of ``resources/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``.
Default Cost Assumptions
========================
.. csv-table::
:header-rows: 1
:widths: 10,3,5,4,6,8
:file: ../data/costs.csv

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 MiB

After

Width:  |  Height:  |  Size: 1.7 MiB

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
@ -22,10 +22,6 @@ PyPSA-Eur: An Open Optimisation Model of the European Transmission System
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg
:target: https://doi.org/10.5281/zenodo.3520874 :target: https://doi.org/10.5281/zenodo.3520874
.. image:: https://badges.gitter.im/PyPSA/community.svg
:target: https://gitter.im/PyPSA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge
:alt: Chat on Gitter
.. image:: https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat .. image:: https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat
:target: https://snakemake.readthedocs.io :target: https://snakemake.readthedocs.io
:alt: Snakemake :alt: Snakemake
@ -41,7 +37,7 @@ It contains alternating current lines at and above 220 kV voltage level and all
The model is suitable both for operational studies and generation and transmission expansion planning studies. The continental scope and highly resolved spatial scale enables a proper description of the long-range smoothing effects for renewable power generation and their varying resource availability. The model is suitable both for operational studies and generation and transmission expansion planning studies. The continental scope and highly resolved spatial scale enables a proper description of the long-range smoothing effects for renewable power generation and their varying resource availability.
.. image:: img/base.png .. image:: img/elec.png
:width: 50% :width: 50%
:align: center :align: center
@ -199,7 +195,7 @@ The included ``.nc`` files are PyPSA network files which can be imported with Py
import pypsa import pypsa
filename = "elec_s_1024_ec.nc" # example filename = "elec_s_1024_ec.nc" # example
n = pypsa.Network(filename) n = pypsa.Network(filename)
Licence Licence

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
@ -71,6 +71,7 @@ PyPSA is known to work with the free software
- `Ipopt <https://coin-or.github.io/Ipopt/INSTALL.html>`_ - `Ipopt <https://coin-or.github.io/Ipopt/INSTALL.html>`_
- `Cbc <https://projects.coin-or.org/Cbc#DownloadandInstall>`_ - `Cbc <https://projects.coin-or.org/Cbc#DownloadandInstall>`_
- `GLPK <https://www.gnu.org/software/glpk/>`_ (`WinGLKP <http://winglpk.sourceforge.net/>`_) - `GLPK <https://www.gnu.org/software/glpk/>`_ (`WinGLKP <http://winglpk.sourceforge.net/>`_)
- `HiGHS <https://highs.dev/>`_
and the non-free, commercial software (for some of which free academic licenses are available) and the non-free, commercial software (for some of which free academic licenses are available)
@ -102,6 +103,8 @@ It might be the case that you can only retrieve solutions by using a commercial
conda activate pypsa-eur conda activate pypsa-eur
conda install -c conda-forge ipopt glpk conda install -c conda-forge ipopt glpk
.. warning::
On Windows, new versions of ``ipopt`` have caused problems. Consider downgrading to version 3.11.1.
.. _defaultconfig: .. _defaultconfig:

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,4 +1,4 @@
REM SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors REM SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
REM SPDX-License-Identifier: MIT REM SPDX-License-Identifier: MIT
@ECHO OFF @ECHO OFF

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2020-2021 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2020-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2021 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
@ -7,6 +7,222 @@
Release Notes Release Notes
########################################## ##########################################
Upcoming Release
================
* Carriers of generators can now be excluded from aggregation in clustering network and simplify network.
* Bugfix in the reserve constraint will increase demand related reserve requirements
PyPSA-Eur 0.6.1 (20th September 2022)
=====================================
* Individual commits are now tested against pre-commit hooks. This includes
black style formatting, sorting of package imports, Snakefile formatting and
others. Installation instructions can for the pre-commit can be found `here
<https://pre-commit.com/>`_.
* Pre-commit CI is now part of the repository's CI.
* The software now supports running the workflow with different settings within
the same directory. A new config section ``run`` was created that specifies
under which scenario ``name`` the created resources, networks and results
should be stored. If ``name`` is not specified, the workflow uses the default
paths. The entry ``shared_cutouts`` specifies whether the run should use
cutouts from the default root directory or use run-specific cutouts.
* The heuristic distribution of today's renewable capacity installations is now
enabled by default.
* The marginal costs of conventional generators are now taking the plant-specific
efficiency into account where available.
PyPSA-Eur 0.6.0 (10th September 2022)
=====================================
* Functionality to consider shipping routes when calculating the available area
for offshore technologies were added. Data for the shipping density comes from
the `Global Shipping Traffic Density dataset
<https://datacatalog.worldbank.org/search/dataset/0037580/Global-Shipping-Traffic-Density>`_.
* When transforming all transmission lines to a unified voltage level of 380kV,
the workflow now preserves the transmission capacity rather than electrical
impedance and reactance.
* Memory resources are now specified for all rules.
* Filtering of power plant data was adjusted to new versions of
``powerplantmatching``.
* The resolution of land exclusion calculation is now a configurable option. See
setting ``excluder_resolution``.
PyPSA-Eur 0.5.0 (27th July 2022)
=====================================
**New Features**
* New network topology extracted from the ENTSO-E interactive map.
* Added existing renewable capacities for all countries based on IRENA
statistics (IRENASTAT) using new ``powerplantmatching`` version:
* The corresponding ``config`` entries changed, cf. ``config.default.yaml``:
* old: ``estimate_renewable_capacities_from_capacity_stats``
* new: ``estimate_renewable_capacities``
* The estimation is endabled by setting the subkey ``enable`` to ``True``.
* Configuration of reference year for capacities can be configured (default:
``2020``)
* The list of renewables provided by the OPSD database can be used as a basis,
using the tag ``from_opsd: True``. This adds the renewables from the
database and fills up the missing capacities with the heuristic
distribution.
* Uniform expansion limit of renewable build-up based on existing capacities
can be configured using ``expansion_limit`` option (default: ``false``;
limited to determined renewable potentials)
* Distribution of country-level capacities proportional to maximum annual
energy yield for each bus region
* The config key ``renewable_capacities_from_OPSD`` is deprecated and was moved
under the section, ``estimate_renewable_capacities``. To enable it, set
``from_opsd`` to ``True``.
* Add operational reserve margin constraint analogous to `GenX implementation
<https://genxproject.github.io/GenX/dev/core/#Reserves>`_. Can be activated
with config setting ``electricity: operational_reserve:``.
* Implement country-specific Energy Availability Factors (EAFs) for nuclear
power plants based on IAEA 2018-2020 reported country averages. These are
specified ``data/nuclear_p_max_pu.csv`` and translate to static ``p_max_pu``
values.
* Add function to add global constraint on use of gas in :mod:`prepare_network`.
This can be activated by including the keyword ``CH4L`` in the ``{opts}``
wildcard which enforces the limit set in ``electricity: gaslimit:`` given in
MWh thermal. Alternatively, it is possible to append a number in the ``{opts}``
wildcard, e.g. ``CH4L200`` which limits the gas use to 200 TWh thermal.
* Add option to alter marginal costs of a carrier through ``{opts}`` wildcard:
``<carrier>+m<factor>``, e.g. ``gas+m2.5``, will multiply the default marginal
cost for gas by factor 2.5.
* Hierarchical clustering was introduced. Distance metric is calculated from
renewable potentials on hourly (feature entry ends with ``-time``) or annual
(feature entry in config end with ``-cap``) values.
* Greedy modularity clustering was introduced. Distance metric is based on electrical distance taking into account the impedance of all transmission lines of the network.
* Techno-economic parameters of technologies (e.g. costs and efficiencies) will
now be retrieved from a separate repository `PyPSA/technology-data
<https://github.com/pypsa/technology-data>`_ that collects assumptions from a
variety of sources. It is activated by default with ``enable:
retrieve_cost_data: true`` and controlled with ``costs: year:`` and ``costs:
version:``. The location of this data changed from ``data/costs.csv`` to
``resources/costs.csv`` [`#184
<https://github.com/PyPSA/pypsa-eur/pull/184>`_].
* A new section ``conventional`` was added to the config file. This section
contains configurations for conventional carriers.
* Add configuration option to implement arbitrary generator attributes for
conventional generation technologies.
* Add option to set CO2 emission prices through ``{opts}`` wildcard: ``Ep<number>``,
e.g. ``Ep180``, will set the EUR/tCO2 price.
**Changes**
* Add an efficiency factor of 88.55% to offshore wind capacity factors as a
proxy for wake losses. More rigorous modelling is `planned
<https://github.com/PyPSA/pypsa-eur/issues/153>`_ [`#277
<https://github.com/PyPSA/pypsa-eur/pull/277>`_].
* Following discussion in `#285
<https://github.com/PyPSA/pypsa-eur/issues/285>`_ we have disabled the
correction factor for solar PV capacity factors by default while satellite
data is used. A correction factor of 0.854337 is recommended if reanalysis
data like ERA5 is used.
* The default deployment density of AC- and DC-connected offshore wind capacity
is reduced from 3 MW/sqkm to a more conservative estimate of 2 MW/sqkm [`#280
<https://github.com/PyPSA/pypsa-eur/pull/280>`_].
* The inclusion of renewable carriers is now specified in the config entry
``renewable_carriers``. Before this was done by commenting/uncommenting
sub-sections in the ``renewable`` config section.
* Now, all carriers that should be extendable have to be listed in the config
entry ``extendable_carriers``. Before, renewable carriers were always set to
be extendable. For backwards compatibility, the workflow is still looking at
the listed carriers under the ``renewable`` key. In the future, all of them
have to be listed under ``extendable_carriers``.
* It is now possible to set conventional power plants as extendable by adding
them to the list of extendable ``Generator`` carriers in the config.
* Listing conventional carriers in ``extendable_carriers`` but not in
``conventional_carriers``, sets the corresponding conventional power plants as
extendable without a lower capacity bound of today's capacities.
* Now, conventional carriers have an assigned capital cost by default.
* The ``build_year`` and ``lifetime`` column are now defined for conventional
power plants.
* Use updated SARAH-2 and ERA5 cutouts with slightly wider scope to east and
additional variables.
* Resource definitions for memory usage now follow `Snakemake standard resource
definition
<https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#standard-resources>`_
``mem_mb`` rather than ``mem``.
* The powerplants that have been shut down by 2021 are filtered out.
* Updated historical `EIA hydro generation data <https://www.eia.gov/international/data/world>`_.
* Network building is made deterministic by supplying a fixed random state to
network clustering routines.
* Clustering strategies for generator and bus attributes can now be specified directly in the ``config.yaml``.
* Iterative solving with impedance updates is skipped if there are no expandable
lines.
* The unused argument ``simple_hvdc_costs`` in :mod:`add_electricity` was
removed.
* Switch from Germany to Belgium for continuous integration and tutorial to save
resources.
* It is now possible to skip the progressbar for land eligibility calculations for additional speedup.
**Bugs and Compatibility**
* Fix crs bug. Change crs 4236 to 4326.
* ``powerplantmatching>=0.5.1`` is now required for ``IRENASTATS``.
* Update rasterio version to correctly calculate exclusion raster.
* It is now possible to run the workflow with only landlocked countries.
* Bugfixes for manual load adjustments across years.
* Enable parallel computing with new dask version.
* Restore compatibility of ``mock_snakemake`` with latest Snakemake versions.
* Script ``build_bus_regions``: move voronoi partition from vresutils to script.
* Script ``add_electricity``: remove ``vresutils.costdata.annuity`` dependency.
* Fix the plot_network snakemake rule.
* Compatibility with pandas 1.4. Address deprecations.
* Restore Windows compatibility by using ``shutil.move`` rather than ``mv``.
Synchronisation Release - Ukraine and Moldova (17th March 2022) Synchronisation Release - Ukraine and Moldova (17th March 2022)
=============================================================== ===============================================================
@ -42,59 +258,6 @@ This release is not on the ``master`` branch. It can be used with
git checkout synchronisation-release git checkout synchronisation-release
On March 16, 2022, the transmission networks of Ukraine and Moldova have
successfully been `synchronised with the continental European grid <https://www.entsoe.eu/news/2022/03/16/continental-europe-successful-synchronisation-with-ukraine-and-moldova-power-systems/>`_. We have taken
this as an opportunity to add the power systems of Ukraine and Moldova to
PyPSA-Eur. This includes:
.. image:: img/synchronisation.png
:width: 500
* the transmission network topology from the `ENTSO-E interactive map <https://www.entsoe.eu/data/map/>`_.
* existing power plants (incl. nuclear, coal, gas and hydro) from the `powerplantmatching <https://github.com/fresna/powerplantmatching>`_ tool
* country-level load time series from ENTSO-E through the `OPSD platform <https://data.open-power-system-data.org/time_series/2020-10-06>`_, which are then distributed heuristically to substations by GDP and population density.
* wind and solar profiles based on ERA5 and SARAH-2 weather data
* hydro profiles based on historical `EIA generation data <https://www.eia.gov/international/data/world>`_
* a simplified calculation of wind and solar potentials based on the `Copernicus Land Cover dataset <https://land.copernicus.eu/global/products/lc>`_.
* electrical characteristics of 750 kV transmission lines
The Crimean power system is currently disconnected from the main Ukrainian grid and, hence, not included.
This release is not on the ``master`` branch. It can be used with
.. code-block:: bash
git clone https://github.com/pypsa/pypsa-eur
git checkout synchronisation-release
Upcoming Regular Release
========================
* Add an efficiency factor of 88.55% to offshore wind capacity factors
as a proxy for wake losses. More rigorous modelling is `planned <https://github.com/PyPSA/pypsa-eur/issues/153>`_
[`#277 <https://github.com/PyPSA/pypsa-eur/pull/277>`_].
* The default deployment density of AC- and DC-connected offshore wind capacity is reduced from 3 MW/sqkm
to a more conservative estimate of 2 MW/sqkm [`#280 <https://github.com/PyPSA/pypsa-eur/pull/280>`_].
* Following discussion in `#285 <https://github.com/PyPSA/pypsa-eur/issues/285>`_ we have disabled the
correction factor for solar PV capacity factors by default while satellite data is used.
A correction factor of 0.854337 is recommended if reanalysis data like ERA5 is used.
* Resource definitions for memory usage now follow [Snakemake standard resource definition](https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#standard-resources) ```mem_mb`` rather than ``mem``.
* Network building is made deterministic by supplying a fixed random state to network clustering routines.
* New network topology extracted from the ENTSO-E interactive map.
PyPSA-Eur 0.4.0 (22th September 2021) PyPSA-Eur 0.4.0 (22th September 2021)
===================================== =====================================
@ -136,7 +299,7 @@ PyPSA-Eur 0.4.0 (22th September 2021)
[`#261 <https://github.com/PyPSA/pypsa-eur/pull/261>`_]. [`#261 <https://github.com/PyPSA/pypsa-eur/pull/261>`_].
* The tutorial cutout was renamed from ``cutouts/europe-2013-era5.nc`` to * The tutorial cutout was renamed from ``cutouts/europe-2013-era5.nc`` to
``cutouts/europe-2013-era5-tutorial.nc`` to accomodate tutorial and productive ``cutouts/be-03-2013-era5.nc`` to accommodate tutorial and productive
cutouts side-by-side. cutouts side-by-side.
* The flag ``keep_all_available_areas`` in the configuration for renewable * The flag ``keep_all_available_areas`` in the configuration for renewable
@ -210,7 +373,6 @@ PyPSA-Eur 0.4.0 (22th September 2021)
PyPSA network solving functions were not told about the solver logfile specified PyPSA network solving functions were not told about the solver logfile specified
in the Snakemake file [`#247 <https://github.com/PyPSA/pypsa-eur/pull/247>`_] in the Snakemake file [`#247 <https://github.com/PyPSA/pypsa-eur/pull/247>`_]
PyPSA-Eur 0.3.0 (7th December 2020) PyPSA-Eur 0.3.0 (7th December 2020)
=================================== ===================================
@ -330,7 +492,7 @@ PyPSA-Eur 0.2.0 (8th June 2020)
* Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 <https://github.com/PyPSA/pypsa-eur/pull/131>`_]. * Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 <https://github.com/PyPSA/pypsa-eur/pull/131>`_].
* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 <https://github.com/PyPSA/pypsa-eur/pull/150>`_]. * Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all available weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 <https://github.com/PyPSA/pypsa-eur/pull/150>`_].
* Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 <https://github.com/PyPSA/pypsa-eur/pull/128>`_]. * Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 <https://github.com/PyPSA/pypsa-eur/pull/128>`_].
@ -386,7 +548,7 @@ Release Process
``conda env export -n pypsa-eur -f envs/environment.fixed.yaml --no-builds`` ``conda env export -n pypsa-eur -f envs/environment.fixed.yaml --no-builds``
from an up-to-date `pypsa-eur` environment. from an up-to-date `pypsa-eur` environment.
* Update version number in ``doc/conf.py`` and ``*config.*.yaml``. * Update version number in ``doc/conf.py``, ``CITATION.cff`` and ``*config.*.yaml``.
* Open, review and merge pull request for branch ``release-v0.x.x``. * Open, review and merge pull request for branch ``release-v0.x.x``.
Make sure to close issues and PRs or the release milestone with it (e.g. closes #X). Make sure to close issues and PRs or the release milestone with it (e.g. closes #X).

View File

@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: : 2019-2021 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2019-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0

View File

@ -1,7 +1,7 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
@ -43,51 +43,63 @@ For more information on the data dependencies of PyPSA-Eur, continue reading :re
How to customise PyPSA-Eur? How to customise PyPSA-Eur?
=========================== ===========================
The model can be adapted to only include selected countries (e.g. Germany) instead of all European countries to limit the spatial scope. The model can be adapted to only include selected countries (e.g. Belgium) instead of all European countries to limit the spatial scope.
.. literalinclude:: ../config.tutorial.yaml .. literalinclude:: ../config.tutorial.yaml
:language: yaml :language: yaml
:lines: 20 :start-at: countries:
:end-before: snapshots:
Likewise, the example's temporal scope can be restricted (e.g. to a single month). Likewise, the example's temporal scope can be restricted (e.g. to a single month).
.. literalinclude:: ../config.tutorial.yaml .. literalinclude:: ../config.tutorial.yaml
:language: yaml :language: yaml
:lines: 24-27 :start-at: snapshots:
:end-before: enable:
It is also possible to allow less or more carbon-dioxide emissions. Here, we limit the emissions of Germany 100 Megatonnes per year. It is also possible to allow less or more carbon-dioxide emissions. Here, we limit the emissions of Germany 100 Megatonnes per year.
.. literalinclude:: ../config.tutorial.yaml .. literalinclude:: ../config.tutorial.yaml
:language: yaml :language: yaml
:lines: 38,40 :start-at: electricity:
:end-before: exentable_carriers:
PyPSA-Eur also includes a database of existing conventional powerplants. PyPSA-Eur also includes a database of existing conventional powerplants.
We can select which types of powerplants we like to be included with fixed capacities: We can select which types of powerplants we like to be included:
.. literalinclude:: ../config.tutorial.yaml .. literalinclude:: ../config.tutorial.yaml
:language: yaml :language: yaml
:lines: 38,54 :start-at: extendable_carriers:
:end-before: max_hours:
To accurately model the temporal and spatial availability of renewables such as wind and solar energy, we rely on historical weather data. To accurately model the temporal and spatial availability of renewables such as wind and solar energy, we rely on historical weather data.
It is advisable to adapt the required range of coordinates to the selection of countries. It is advisable to adapt the required range of coordinates to the selection of countries.
.. literalinclude:: ../config.tutorial.yaml .. literalinclude:: ../config.tutorial.yaml
:language: yaml :language: yaml
:lines: 56-63 :start-at: atlite:
:end-before: renewable:
We can also decide which weather data source should be used to calculate potentials and capacity factor time-series for each carrier. We can also decide which weather data source should be used to calculate potentials and capacity factor time-series for each carrier.
For example, we may want to use the ERA-5 dataset for solar and not the default SARAH-2 dataset. For example, we may want to use the ERA-5 dataset for solar and not the default SARAH-2 dataset.
.. literalinclude:: ../config.tutorial.yaml .. literalinclude:: ../config.tutorial.yaml
:language: yaml :language: yaml
:lines: 65,108-109 :start-at: be-03-2013-era5:
:end-at: module:
.. literalinclude:: ../config.tutorial.yaml
:language: yaml
:start-at: solar:
:end-at: cutout:
Finally, it is possible to pick a solver. For instance, this tutorial uses the open-source solvers CBC and Ipopt and does not rely Finally, it is possible to pick a solver. For instance, this tutorial uses the open-source solvers CBC and Ipopt and does not rely
on the commercial solvers Gurobi or CPLEX (for which free academic licenses are available). on the commercial solvers Gurobi or CPLEX (for which free academic licenses are available).
.. literalinclude:: ../config.tutorial.yaml .. literalinclude:: ../config.tutorial.yaml
:language: yaml :language: yaml
:lines: 171,181-182 :start-at: solver:
:end-before: plotting:
.. note:: .. note::
@ -116,21 +128,12 @@ clustered down to 6 buses and every 24 hours aggregated to one snapshot. The com
orders ``snakemake`` to run the script ``solve_network`` that produces the solved network and stores it in ``.../pypsa-eur/results/networks`` with the name ``elec_s_6_ec_lcopt_Co2L-24H.nc``: orders ``snakemake`` to run the script ``solve_network`` that produces the solved network and stores it in ``.../pypsa-eur/results/networks`` with the name ``elec_s_6_ec_lcopt_Co2L-24H.nc``:
.. code:: .. literalinclude:: ../Snakefile
:start-at: rule solve_network:
rule solve_network: :end-before: rule solve_operations_network:
input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
[...]
script: "scripts/solve_network.py"
.. until https://github.com/snakemake/snakemake/issues/46 closed .. until https://github.com/snakemake/snakemake/issues/46 closed
.. warning::
On Windows the previous command may currently cause a ``MissingRuleException`` due to problems with output files in subfolders.
This is an `open issue <https://github.com/snakemake/snakemake/issues/46>`_ at `snakemake <https://snakemake.readthedocs.io/>`_.
Windows users should add the option ``--keep-target-files`` to the command or instead run ``snakemake -j 1 solve_all_networks``.
This triggers a workflow of multiple preceding jobs that depend on each rule's inputs and outputs: This triggers a workflow of multiple preceding jobs that depend on each rule's inputs and outputs:
.. graphviz:: .. graphviz::
@ -218,7 +221,7 @@ A job (here ``simplify_network``) will display its attributes and normally some
[<DATETIME>] [<DATETIME>]
rule simplify_network: rule simplify_network:
input: networks/elec.nc, data/costs.csv, resources/regions_onshore.geojson, resources/regions_offshore.geojson input: networks/elec.nc, resources/costs.csv, resources/regions_onshore.geojson, resources/regions_offshore.geojson
output: networks/elec_s.nc, resources/regions_onshore_elec_s.geojson, resources/regions_offshore_elec_s.geojson, resources/clustermaps_elec_s.h5 output: networks/elec_s.nc, resources/regions_onshore_elec_s.geojson, resources/regions_offshore_elec_s.geojson, resources/clustermaps_elec_s.h5
jobid: 3 jobid: 3
benchmark: benchmarks/simplify_network/elec_s benchmark: benchmarks/simplify_network/elec_s
@ -247,7 +250,7 @@ Once the whole worktree is finished, it should show state so in the terminal:
You will notice that many intermediate stages are saved, namely the outputs of each individual ``snakemake`` rule. You will notice that many intermediate stages are saved, namely the outputs of each individual ``snakemake`` rule.
You can produce any output file occuring in the ``Snakefile`` by running You can produce any output file occurring in the ``Snakefile`` by running
.. code:: bash .. code:: bash
@ -271,9 +274,8 @@ the wildcards given in ``scenario`` in the configuration file ``config.yaml`` ar
.. literalinclude:: ../config.tutorial.yaml .. literalinclude:: ../config.tutorial.yaml
:language: yaml :language: yaml
:lines: 14-18 :start-at: scenario:
:end-before: countries:
In this example we would not only solve a 6-node model of Germany but also a 2-node model.
How to analyse solved networks? How to analyse solved networks?
=============================== ===============================
@ -286,4 +288,4 @@ The solved networks can be analysed just like any other PyPSA network (e.g. in J
network = pypsa.Network("results/networks/elec_s_6_ec_lcopt_Co2L-24H.nc") network = pypsa.Network("results/networks/elec_s_6_ec_lcopt_Co2L-24H.nc")
For inspiration, read the `examples section in the PyPSA documentation <https://pypsa.readthedocs.io/en/latest/examples.html>`_. For inspiration, read the `examples section in the PyPSA documentation <https://pypsa.readthedocs.io/en/latest/examples-basic.html>`_.

View File

@ -1,5 +1,5 @@
.. ..
SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0 SPDX-License-Identifier: CC-BY-4.0
@ -123,7 +123,7 @@ These cutouts will be stored in a folder specified by ``{cutout}``.
The ``{technology}`` wildcard The ``{technology}`` wildcard
============================= =============================
The ``{technology}`` wildcard specifies for which renewable energy technology to produce availablity time The ``{technology}`` wildcard specifies for which renewable energy technology to produce availability time
series and potentials using the rule :mod:`build_renewable_profiles`. series and potentials using the rule :mod:`build_renewable_profiles`.
It can take the values ``onwind``, ``offwind-ac``, ``offwind-dc``, and ``solar`` but **not** ``hydro`` It can take the values ``onwind``, ``offwind-ac``, ``offwind-dc``, and ``solar`` but **not** ``hydro``
(since hydroelectric plant profiles are created by a different rule). (since hydroelectric plant profiles are created by a different rule).
@ -155,4 +155,5 @@ formats depends on the used backend. To query the supported file types on your s
.. code:: python .. code:: python
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
plt.gcf().canvas.get_supported_filetypes() plt.gcf().canvas.get_supported_filetypes()

View File

@ -1,311 +1,430 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
name: pypsa-eur name: pypsa-eur
channels: channels:
- bioconda - bioconda
- conda-forge - http://conda.anaconda.org/gurobi
- defaults - conda-forge
- defaults
dependencies: dependencies:
- _libgcc_mutex=0.1 - _libgcc_mutex=0.1
- _openmp_mutex=4.5 - _openmp_mutex=4.5
- affine=2.3.0 - abseil-cpp=20210324.2
- alsa-lib=1.2.3 - affine=2.3.1
- amply=0.1.4 - alsa-lib=1.2.3.2
- appdirs=1.4.4 - altair=4.2.0
- atlite=0.2.5 - ampl-mp=3.1.0
- attrs=21.2.0 - amply=0.1.5
- backcall=0.2.0 - anyio=3.6.1
- backports=1.0 - appdirs=1.4.4
- backports.functools_lru_cache=1.6.4 - argon2-cffi=21.3.0
- beautifulsoup4=4.10.0 - argon2-cffi-bindings=21.2.0
- blosc=1.21.0 - arrow-cpp=8.0.0
- bokeh=2.3.3 - asttokens=2.0.5
- boost-cpp=1.74.0 - atlite=0.2.9
- bottleneck=1.3.2 - attrs=21.4.0
- brotlipy=0.7.0 - aws-c-cal=0.5.11
- bzip2=1.0.8 - aws-c-common=0.6.2
- c-ares=1.17.2 - aws-c-event-stream=0.2.7
- ca-certificates=2021.5.30 - aws-c-io=0.10.5
- cairo=1.16.0 - aws-checksums=0.1.11
- cartopy=0.19.0.post1 - aws-sdk-cpp=1.8.186
- cdsapi=0.5.1 - babel=2.10.3
- certifi=2021.5.30 - backcall=0.2.0
- cffi=1.14.6 - backports=1.0
- cfitsio=3.470 - backports.functools_lru_cache=1.6.4
- cftime=1.5.0 - beautifulsoup4=4.11.1
- chardet=4.0.0 - bleach=5.0.1
- charset-normalizer=2.0.0 - blinker=1.4
- click=7.1.2 - blosc=1.21.1
- click-plugins=1.1.1 - bokeh=2.4.3
- cligj=0.7.2 - boost-cpp=1.74.0
- cloudpickle=2.0.0 - bottleneck=1.3.5
- coincbc=2.10.5 - branca=0.5.0
- colorama=0.4.4 - brotli=1.0.9
- conda=4.10.3 - brotli-bin=1.0.9
- conda-package-handling=1.7.3 - brotlipy=0.7.0
- configargparse=1.5.2 - bzip2=1.0.8
- connection_pool=0.0.3 - c-ares=1.18.1
- country_converter=0.7.3 - ca-certificates=2022.6.15.1
- cryptography=3.4.7 - cachetools=5.0.0
- curl=7.79.0 - cairo=1.16.0
- cycler=0.10.0 - cartopy=0.20.1
- cytoolz=0.11.0 - cdsapi=0.5.1
- dask=2021.3.1 - certifi=2022.6.15.1
- dask-core=2021.3.1 - cffi=1.15.1
- datrie=0.8.2 - cfitsio=4.0.0
- dbus=1.13.6 - cftime=1.6.1
- decorator=4.4.2 - charset-normalizer=2.1.0
- deprecation=2.1.0 - click=8.0.4
- descartes=1.1.0 - click-plugins=1.1.1
- distributed=2021.4.1 - cligj=0.7.2
- distro=1.5.0 - cloudpickle=2.1.0
- docutils=0.17.1 - coin-or-cbc=2.10.8
- entsoe-py=0.3.7 - coin-or-cgl=0.60.6
- et_xmlfile=1.0.1 - coin-or-clp=1.17.7
- expat=2.4.1 - coin-or-osi=0.108.7
- filelock=3.0.12 - coin-or-utils=2.11.6
- fiona=1.8.18 - coincbc=2.10.8
- fontconfig=2.13.1 - colorama=0.4.5
- freetype=2.10.4 - colorcet=3.0.0
- freexl=1.0.6 - commonmark=0.9.1
- fsspec=2021.8.1 - configargparse=1.5.3
- gdal=3.2.1 - connection_pool=0.0.3
- geographiclib=1.52 - country_converter=0.7.4
- geopandas=0.9.0 - cryptography=37.0.4
- geopandas-base=0.9.0 - curl=7.83.1
- geopy=2.2.0 - cycler=0.11.0
- geos=3.9.1 - cytoolz=0.12.0
- geotiff=1.6.0 - dask=2022.7.0
- gettext=0.19.8.1 - dask-core=2022.7.0
- giflib=5.2.1 - dataclasses=0.8
- gitdb=4.0.7 - datrie=0.8.2
- gitpython=3.1.23 - dbus=1.13.6
- glib=2.68.4 - debugpy=1.6.0
- glib-tools=2.68.4 - decorator=5.1.1
- graphite2=1.3.13 - defusedxml=0.7.1
- gst-plugins-base=1.18.5 - deprecation=2.1.0
- gstreamer=1.18.5 - descartes=1.1.0
- harfbuzz=2.9.1 - distributed=2022.7.0
- hdf4=4.2.15 - distro=1.6.0
- hdf5=1.10.6 - docutils=0.19
- heapdict=1.0.1 - dpath=2.0.6
- icu=68.1 - entrypoints=0.4
- idna=3.1 - entsoe-py=0.5.4
- importlib-metadata=4.8.1 - et_xmlfile=1.0.1
- iniconfig=1.1.1 - executing=0.8.3
- ipython=7.27.0 - expat=2.4.8
- ipython_genutils=0.2.0 - filelock=3.7.1
- jdcal=1.4.1 - fiona=1.8.20
- jedi=0.18.0 - flit-core=3.7.1
- jinja2=3.0.1 - folium=0.12.1.post1
- joblib=1.0.1 - font-ttf-dejavu-sans-mono=2.37
- jpeg=9d - font-ttf-inconsolata=3.000
- json-c=0.15 - font-ttf-source-code-pro=2.038
- jsonschema=3.2.0 - font-ttf-ubuntu=0.83
- jupyter_core=4.8.1 - fontconfig=2.14.0
- kealib=1.4.14 - fonts-conda-ecosystem=1
- kiwisolver=1.3.2 - fonts-conda-forge=1
- krb5=1.19.2 - fonttools=4.34.4
- lcms2=2.12 - freetype=2.10.4
- ld_impl_linux-64=2.36.1 - freexl=1.0.6
- libarchive=3.5.1 - fsspec=2022.5.0
- libblas=3.9.0 - future=0.18.2
- libcblas=3.9.0 - gdal=3.3.3
- libclang=11.1.0 - geographiclib=1.52
- libcurl=7.79.0 - geojson-rewind=1.0.2
- libdap4=3.20.6 - geopandas=0.11.0
- libedit=3.1.20191231 - geopandas-base=0.11.0
- libev=4.33 - geopy=2.2.0
- libevent=2.1.10 - geos=3.10.0
- libffi=3.4.2 - geotiff=1.7.0
- libgcc-ng=11.2.0 - gettext=0.19.8.1
- libgdal=3.2.1 - gflags=2.2.2
- libgfortran-ng=11.2.0 - giflib=5.2.1
- libgfortran5=11.2.0 - gitdb=4.0.9
- libglib=2.68.4 - gitpython=3.1.27
- libgomp=11.2.0 - glog=0.6.0
- libiconv=1.16 - gmp=6.2.1
- libkml=1.3.0 - graphite2=1.3.13
- liblapack=3.9.0 - grpc-cpp=1.45.2
- libllvm11=11.1.0 - gst-plugins-base=1.18.5
- libnetcdf=4.7.4 - gstreamer=1.18.5
- libnghttp2=1.43.0 - harfbuzz=2.9.1
- libogg=1.3.4 - hdf4=4.2.15
- libopenblas=0.3.17 - hdf5=1.12.1
- libopus=1.3.1 - heapdict=1.0.1
- libpng=1.6.37 - icu=68.2
- libpq=13.3 - idna=3.3
- librttopo=1.1.0 - importlib-metadata=4.11.4
- libsolv=0.7.19 - importlib_metadata=4.11.4
- libspatialindex=1.9.3 - importlib_resources=5.8.0
- libspatialite=5.0.1 - iniconfig=1.1.1
- libssh2=1.10.0 - ipykernel=6.15.1
- libstdcxx-ng=11.2.0 - ipython=8.4.0
- libtiff=4.2.0 - ipython_genutils=0.2.0
- libuuid=2.32.1 - ipywidgets=7.7.1
- libvorbis=1.3.7 - jedi=0.18.1
- libwebp-base=1.2.1 - jinja2=3.1.2
- libxcb=1.13 - joblib=1.1.0
- libxkbcommon=1.0.3 - jpeg=9e
- libxml2=2.9.12 - json-c=0.15
- libxslt=1.1.33 - json5=0.9.5
- locket=0.2.0 - jsonschema=4.7.2
- lxml=4.6.3 - jupyter_client=7.3.4
- lz4-c=1.9.3 - jupyter_core=4.10.0
- lzo=2.10 - jupyter_server=1.18.1
- mamba=0.15.3 - kealib=1.4.15
- mapclassify=2.4.3 - keyutils=1.6.1
- markupsafe=2.0.1 - kiwisolver=1.4.4
- matplotlib=3.4.3 - krb5=1.19.3
- matplotlib-base=3.4.3 - lcms2=2.12
- matplotlib-inline=0.1.3 - ld_impl_linux-64=2.36.1
- memory_profiler=0.58.0 - lerc=3.0
- mock=4.0.3 - libblas=3.9.0
- more-itertools=8.10.0 - libbrotlicommon=1.0.9
- msgpack-python=1.0.2 - libbrotlidec=1.0.9
- munch=2.5.0 - libbrotlienc=1.0.9
- mysql-common=8.0.25 - libcblas=3.9.0
- mysql-libs=8.0.25 - libclang=11.1.0
- nbformat=5.1.3 - libcrc32c=1.1.2
- ncurses=6.2 - libcurl=7.83.1
- netcdf4=1.5.6 - libdap4=3.20.6
- networkx=2.6.3 - libdeflate=1.12
- nspr=4.30 - libedit=3.1.20191231
- nss=3.69 - libev=4.33
- numexpr=2.7.3 - libevent=2.1.10
- numpy=1.21.2 - libffi=3.4.2
- olefile=0.46 - libgcc-ng=12.1.0
- openjdk=11.0.9.1 - libgdal=3.3.3
- openjpeg=2.4.0 - libgfortran-ng=12.1.0
- openpyxl=3.0.8 - libgfortran5=12.1.0
- openssl=1.1.1l - libglib=2.72.1
- packaging=21.0 - libgomp=12.1.0
- pandas=1.2.5 - libgoogle-cloud=1.40.2
- parso=0.8.2 - libiconv=1.16
- partd=1.2.0 - libkml=1.3.0
- patsy=0.5.1 - liblapack=3.9.0
- pcre=8.45 - liblapacke=3.9.0
- pexpect=4.8.0 - libllvm11=11.1.0
- pickleshare=0.7.5 - libnetcdf=4.8.1
- pillow=8.2.0 - libnghttp2=1.47.0
- pip=21.2.4 - libnsl=2.0.0
- pixman=0.40.0 - libogg=1.3.4
- pluggy=1.0.0 - libopenblas=0.3.20
- ply=3.11 - libopus=1.3.1
- poppler=0.89.0 - libpng=1.6.37
- poppler-data=0.4.11 - libpq=13.5
- postgresql=13.3 - libprotobuf=3.20.1
- powerplantmatching=0.4.8 - librttopo=1.1.0
- progressbar2=3.53.1 - libsodium=1.0.18
- proj=7.2.0 - libspatialindex=1.9.3
- prompt-toolkit=3.0.20 - libspatialite=5.0.1
- psutil=5.8.0 - libssh2=1.10.0
- pthread-stubs=0.4 - libstdcxx-ng=12.1.0
- ptyprocess=0.7.0 - libthrift=0.16.0
- pulp=2.5.0 - libtiff=4.4.0
- py=1.10.0 - libutf8proc=2.7.0
- pycosat=0.6.3 - libuuid=2.32.1
- pycountry=20.7.3 - libvorbis=1.3.7
- pycparser=2.20 - libwebp=1.2.2
- pygments=2.10.0 - libwebp-base=1.2.2
- pyomo=6.1.2 - libxcb=1.13
- pyopenssl=20.0.1 - libxkbcommon=1.0.3
- pyparsing=2.4.7 - libxml2=2.9.12
- pyproj=3.1.0 - libxslt=1.1.33
- pypsa=0.18.0 - libzip=1.9.2
- pyqt=5.12.3 - libzlib=1.2.12
- pyqt-impl=5.12.3 - locket=1.0.0
- pyqt5-sip=4.19.18 - lxml=4.8.0
- pyqtchart=5.12 - lz4=4.0.0
- pyqtwebengine=5.12.1 - lz4-c=1.9.3
- pyrsistent=0.17.3 - lzo=2.10
- pyshp=2.1.3 - mapclassify=2.4.3
- pysocks=1.7.1 - markdown=3.4.1
- pytables=3.6.1 - markupsafe=2.1.1
- pytest=6.2.5 - matplotlib=3.5.2
- python=3.9.7 - matplotlib-base=3.5.2
- python-dateutil=2.8.2 - matplotlib-inline=0.1.3
- python-utils=2.5.6 - memory_profiler=0.60.0
- python_abi=3.9 - metis=5.1.0
- pytz=2021.1 - mistune=0.8.4
- pyyaml=5.4.1 - msgpack-python=1.0.4
- qt=5.12.9 - mumps-include=5.2.1
- rasterio=1.2.6 - mumps-seq=5.2.1
- ratelimiter=1.2.0 - munch=2.5.0
- readline=8.1 - munkres=1.1.4
- reproc=14.2.3 - mysql-common=8.0.29
- reproc-cpp=14.2.3 - mysql-libs=8.0.29
- requests=2.26.0 - nbclassic=0.4.3
- rtree=0.9.7 - nbclient=0.6.6
- ruamel_yaml=0.15.80 - nbconvert=6.5.0
- scikit-learn=0.24.2 - nbconvert-core=6.5.0
- scipy=1.7.1 - nbconvert-pandoc=6.5.0
- seaborn=0.11.2 - nbformat=5.4.0
- seaborn-base=0.11.2 - ncurses=6.3
- setuptools=58.0.4 - nest-asyncio=1.5.5
- setuptools-scm=6.3.2 - netcdf4=1.6.0
- setuptools_scm=6.3.2 - networkx=2.8.4
- shapely=1.7.1 - nomkl=1.0
- six=1.16.0 - notebook=6.4.12
- smart_open=5.2.1 - notebook-shim=0.1.0
- smmap=3.0.5 - nspr=4.32
- snakemake-minimal=6.8.0 - nss=3.78
- snuggs=1.4.7 - numexpr=2.8.3
- sortedcontainers=2.4.0 - numpy=1.23.1
- soupsieve=2.0.1 - openjdk=11.0.9.1
- sqlite=3.36.0 - openjpeg=2.4.0
- statsmodels=0.12.2 - openpyxl=3.0.9
- stopit=1.1.2 - openssl=1.1.1q
- tabula-py=2.2.0 - orc=1.7.5
- tabulate=0.8.9 - packaging=21.3
- tblib=1.7.0 - pandas=1.4.3
- threadpoolctl=2.2.0 - pandoc=2.18
- tiledb=2.2.9 - pandocfilters=1.5.0
- tk=8.6.11 - parquet-cpp=1.5.1
- toml=0.10.2 - parso=0.8.3
- tomli=1.2.1 - partd=1.2.0
- toolz=0.11.1 - patsy=0.5.2
- toposort=1.6 - pcre=8.45
- tornado=6.1 - pexpect=4.8.0
- tqdm=4.62.3 - pickleshare=0.7.5
- traitlets=5.1.0 - pillow=9.2.0
- typing_extensions=3.10.0.2 - pip=22.1.2
- tzcode=2021a - pixman=0.40.0
- tzdata=2021a - plac=1.3.5
- urllib3=1.26.6 - pluggy=1.0.0
- wcwidth=0.2.5 - ply=3.11
- wheel=0.37.0 - poppler=21.09.0
- wrapt=1.12.1 - poppler-data=0.4.11
- xarray=0.19.0 - postgresql=13.5
- xerces-c=3.2.3 - powerplantmatching=0.5.4
- xlrd=2.0.1 - progressbar2=4.0.0
- xorg-fixesproto=5.0 - proj=8.1.1
- xorg-inputproto=2.3.2 - prometheus_client=0.14.1
- xorg-kbproto=1.0.7 - prompt-toolkit=3.0.30
- xorg-libice=1.0.10 - protobuf=3.20.1
- xorg-libsm=1.2.3 - psutil=5.9.1
- xorg-libx11=1.7.2 - pthread-stubs=0.4
- xorg-libxau=1.0.9 - ptyprocess=0.7.0
- xorg-libxdmcp=1.1.3 - pulp=2.6.0
- xorg-libxext=1.3.4 - pure_eval=0.2.2
- xorg-libxfixes=5.0.3 - py=1.11.0
- xorg-libxi=1.7.10 - pyarrow=8.0.0
- xorg-libxrender=0.9.10 - pycountry=20.7.3
- xorg-libxtst=1.2.3 - pycparser=2.21
- xorg-recordproto=1.14.2 - pyct=0.4.6
- xorg-renderproto=0.11.1 - pyct-core=0.4.6
- xorg-xextproto=7.3.0 - pydeck=0.7.1
- xorg-xproto=7.0.31 - pygments=2.12.0
- xz=5.2.5 - pympler=0.9
- yaml=0.2.5 - pyomo=6.4.1
- zict=2.0.0 - pyopenssl=22.0.0
- zipp=3.5.0 - pyparsing=3.0.9
- zlib=1.2.11 - pyproj=3.2.1
- zstd=1.4.9 - pypsa=0.20.0
- pip: - pyqt=5.12.3
- countrycode==0.2 - pyqt-impl=5.12.3
- sklearn==0.0 - pyqt5-sip=4.19.18
- tsam==1.1.1 - pyqtchart=5.12
- vresutils==0.3.1 - pyqtwebengine=5.12.1
- pyrsistent=0.18.1
- pyshp=2.3.0
- pysocks=1.7.1
- pytables=3.7.0
- pytest=7.1.2
- python=3.9.13
- python-dateutil=2.8.2
- python-fastjsonschema=2.16.1
- python-tzdata=2022.1
- python-utils=3.3.3
- python_abi=3.9
- pytz=2022.1
- pytz-deprecation-shim=0.1.0.post0
- pyviz_comms=2.2.0
- pyxlsb=1.0.9
- pyyaml=6.0
- pyzmq=23.2.0
- qt=5.12.9
- rasterio=1.2.9
- ratelimiter=1.2.0
- re2=2022.06.01
- readline=8.1.2
- requests=2.28.1
- retry=0.9.2
- rich=12.5.1
- rtree=1.0.0
- s2n=1.0.10
- scikit-learn=1.1.1
- scipy=1.8.1
- scotch=6.0.9
- seaborn=0.11.2
- seaborn-base=0.11.2
- semver=2.13.0
- send2trash=1.8.0
- setuptools=63.2.0
- setuptools-scm=7.0.5
- setuptools_scm=7.0.5
- shapely=1.8.0
- six=1.16.0
- smart_open=6.0.0
- smmap=3.0.5
- snakemake-minimal=7.8.5
- snappy=1.1.9
- sniffio=1.2.0
- snuggs=1.4.7
- sortedcontainers=2.4.0
- soupsieve=2.3.1
- sqlite=3.39.1
- stack_data=0.3.0
- statsmodels=0.13.2
- stopit=1.1.2
- streamlit=1.10.0
- tabula-py=2.2.0
- tabulate=0.8.10
- tblib=1.7.0
- tenacity=8.0.1
- terminado=0.15.0
- threadpoolctl=3.1.0
- tiledb=2.3.4
- tinycss2=1.1.1
- tk=8.6.12
- toml=0.10.2
- tomli=2.0.1
- toolz=0.12.0
- toposort=1.7
- tornado=6.1
- tqdm=4.64.0
- traitlets=5.3.0
- typing-extensions=4.3.0
- typing_extensions=4.3.0
- tzcode=2022a
- tzdata=2022a
- tzlocal=4.2
- unicodedata2=14.0.0
- unidecode=1.3.4
- unixodbc=2.3.10
- urllib3=1.26.10
- validators=0.18.2
- watchdog=2.1.9
- wcwidth=0.2.5
- webencodings=0.5.1
- websocket-client=1.3.3
- wheel=0.37.1
- widgetsnbextension=3.6.1
- wrapt=1.14.1
- xarray=2022.3.0
- xerces-c=3.2.3
- xlrd=2.0.1
- xorg-fixesproto=5.0
- xorg-inputproto=2.3.2
- xorg-kbproto=1.0.7
- xorg-libice=1.0.10
- xorg-libsm=1.2.3
- xorg-libx11=1.7.2
- xorg-libxau=1.0.9
- xorg-libxdmcp=1.1.3
- xorg-libxext=1.3.4
- xorg-libxfixes=5.0.3
- xorg-libxi=1.7.10
- xorg-libxrender=0.9.10
- xorg-libxtst=1.2.3
- xorg-recordproto=1.14.2
- xorg-renderproto=0.11.1
- xorg-xextproto=7.3.0
- xorg-xproto=7.0.31
- xyzservices=2022.6.0
- xz=5.2.5
- yaml=0.2.5
- yte=1.5.1
- zeromq=4.3.4
- zict=2.2.0
- zipp=3.8.0
- zlib=1.2.12
- zstd=1.5.2
- pip:
- countrycode==0.2
- tsam==2.1.0
- vresutils==0.3.1

View File

@ -1,60 +1,62 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
name: pypsa-eur name: pypsa-eur
channels: channels:
- conda-forge - conda-forge
- bioconda - bioconda
dependencies: dependencies:
- python>=3.8 - python>=3.8
- pip - pip
- pypsa>=0.19.2 - pypsa>=0.21.3
- atlite>=0.2.5 - atlite>=0.2.9
- dask - dask
- jupyter - jupyter
- nbconvert - nbconvert
# Dependencies of the workflow itself # Dependencies of the workflow itself
- xlrd - xlrd
- openpyxl - openpyxl
- pycountry - pycountry
- seaborn - seaborn
- snakemake-minimal - snakemake-minimal
- memory_profiler - memory_profiler
- yaml - yaml
- pytables - pytables
- lxml - lxml
- powerplantmatching>=0.4.8 - powerplantmatching>=0.5.5
- numpy - numpy<1.24
- pandas - pandas
- geopandas - geopandas>=0.11.0
- xarray - xarray
- netcdf4 - netcdf4
- networkx - networkx
- scipy - scipy
- shapely - shapely<2.0
- progressbar2 - progressbar2
- pyomo - pyomo
- matplotlib - matplotlib<3.6
- proj - proj
- fiona
- country_converter
# Keep in conda environment when calling ipython # Keep in conda environment when calling ipython
- ipython - ipython
# GIS dependencies: # GIS dependencies:
- cartopy - cartopy
- descartes - descartes
- rasterio - rasterio!=1.2.10
# PyPSA-Eur-Sec Dependencies # PyPSA-Eur-Sec Dependencies
- geopy - geopy
- tqdm - tqdm
- pytz - pytz
- country_converter - tabula-py
- tabula-py - pyxlsb
- pip: - pip:
- vresutils>=0.3.1 - vresutils>=0.3.1
- tsam>=1.1.0 - tsam>=1.1.0

View File

@ -1,10 +1,14 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
import pandas as pd
from pathlib import Path from pathlib import Path
import pandas as pd
REGION_COLS = ["geometry", "name", "x", "y", "country"]
def configure_logging(snakemake, skip_handlers=False): def configure_logging(snakemake, skip_handlers=False):
""" """
@ -27,21 +31,26 @@ def configure_logging(snakemake, skip_handlers=False):
import logging import logging
kwargs = snakemake.config.get('logging', dict()) kwargs = snakemake.config.get("logging", dict()).copy()
kwargs.setdefault("level", "INFO") kwargs.setdefault("level", "INFO")
if skip_handlers is False: if skip_handlers is False:
fallback_path = Path(__file__).parent.joinpath('..', 'logs', f"{snakemake.rule}.log") fallback_path = Path(__file__).parent.joinpath(
logfile = snakemake.log.get('python', snakemake.log[0] if snakemake.log "..", "logs", f"{snakemake.rule}.log"
else fallback_path) )
logfile = snakemake.log.get(
"python", snakemake.log[0] if snakemake.log else fallback_path
)
kwargs.update( kwargs.update(
{'handlers': [ {
# Prefer the 'python' log, otherwise take the first log for each "handlers": [
# Snakemake rule # Prefer the 'python' log, otherwise take the first log for each
logging.FileHandler(logfile), # Snakemake rule
logging.StreamHandler() logging.FileHandler(logfile),
logging.StreamHandler(),
] ]
}) }
)
logging.basicConfig(**kwargs) logging.basicConfig(**kwargs)
@ -79,138 +88,199 @@ def load_network(import_name=None, custom_components=None):
if custom_components is not None: if custom_components is not None:
override_components = pypsa.components.components.copy() override_components = pypsa.components.components.copy()
override_component_attrs = Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()}) override_component_attrs = Dict(
{k: v.copy() for k, v in pypsa.components.component_attrs.items()}
)
for k, v in custom_components.items(): for k, v in custom_components.items():
override_components.loc[k] = v['component'] override_components.loc[k] = v["component"]
override_component_attrs[k] = pd.DataFrame(columns = ["type","unit","default","description","status"]) override_component_attrs[k] = pd.DataFrame(
for attr, val in v['attributes'].items(): columns=["type", "unit", "default", "description", "status"]
)
for attr, val in v["attributes"].items():
override_component_attrs[k].loc[attr] = val override_component_attrs[k].loc[attr] = val
return pypsa.Network(import_name=import_name, return pypsa.Network(
override_components=override_components, import_name=import_name,
override_component_attrs=override_component_attrs) override_components=override_components,
override_component_attrs=override_component_attrs,
)
def pdbcast(v, h): def pdbcast(v, h):
return pd.DataFrame(v.values.reshape((-1, 1)) * h.values, return pd.DataFrame(
index=v.index, columns=h.index) v.values.reshape((-1, 1)) * h.values, index=v.index, columns=h.index
)
def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True): def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True):
import pypsa import pypsa
from add_electricity import update_transmission_costs, load_costs from add_electricity import load_costs, update_transmission_costs
n = pypsa.Network(fn) n = pypsa.Network(fn)
n.loads["carrier"] = n.loads.bus.map(n.buses.carrier) + " load" n.loads["carrier"] = n.loads.bus.map(n.buses.carrier) + " load"
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
n.links["carrier"] = (n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier)) n.links["carrier"] = (
n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier)
)
n.lines["carrier"] = "AC line" n.lines["carrier"] = "AC line"
n.transformers["carrier"] = "AC transformer" n.transformers["carrier"] = "AC transformer"
n.lines['s_nom'] = n.lines['s_nom_min'] n.lines["s_nom"] = n.lines["s_nom_min"]
n.links['p_nom'] = n.links['p_nom_min'] n.links["p_nom"] = n.links["p_nom_min"]
if combine_hydro_ps: if combine_hydro_ps:
n.storage_units.loc[n.storage_units.carrier.isin({'PHS', 'hydro'}), 'carrier'] = 'hydro+PHS' n.storage_units.loc[
n.storage_units.carrier.isin({"PHS", "hydro"}), "carrier"
] = "hydro+PHS"
# if the carrier was not set on the heat storage units # if the carrier was not set on the heat storage units
# bus_carrier = n.storage_units.bus.map(n.buses.carrier) # bus_carrier = n.storage_units.bus.map(n.buses.carrier)
# n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks" # n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks"
Nyears = n.snapshot_weightings.objective.sum() / 8760. Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(Nyears, tech_costs, config['costs'], config['electricity']) costs = load_costs(tech_costs, config["costs"], config["electricity"], Nyears)
update_transmission_costs(n, costs) update_transmission_costs(n, costs)
return n return n
def update_p_nom_max(n): def update_p_nom_max(n):
# if extendable carriers (solar/onwind/...) have capacity >= 0, # if extendable carriers (solar/onwind/...) have capacity >= 0,
# e.g. existing assets from the OPSD project are included to the network, # e.g. existing assets from the OPSD project are included to the network,
# the installed capacity might exceed the expansion limit. # the installed capacity might exceed the expansion limit.
# Hence, we update the assumptions. # Hence, we update the assumptions.
n.generators.p_nom_max = n.generators[['p_nom_min', 'p_nom_max']].max(1) n.generators.p_nom_max = n.generators[["p_nom_min", "p_nom_max"]].max(1)
def aggregate_p_nom(n): def aggregate_p_nom(n):
return pd.concat([ return pd.concat(
n.generators.groupby("carrier").p_nom_opt.sum(), [
n.storage_units.groupby("carrier").p_nom_opt.sum(), n.generators.groupby("carrier").p_nom_opt.sum(),
n.links.groupby("carrier").p_nom_opt.sum(), n.storage_units.groupby("carrier").p_nom_opt.sum(),
n.loads_t.p.groupby(n.loads.carrier,axis=1).sum().mean() n.links.groupby("carrier").p_nom_opt.sum(),
]) n.loads_t.p.groupby(n.loads.carrier, axis=1).sum().mean(),
]
)
def aggregate_p(n): def aggregate_p(n):
return pd.concat([ return pd.concat(
n.generators_t.p.sum().groupby(n.generators.carrier).sum(), [
n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(), n.generators_t.p.sum().groupby(n.generators.carrier).sum(),
n.stores_t.p.sum().groupby(n.stores.carrier).sum(), n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(),
-n.loads_t.p.sum().groupby(n.loads.carrier).sum() n.stores_t.p.sum().groupby(n.stores.carrier).sum(),
]) -n.loads_t.p.sum().groupby(n.loads.carrier).sum(),
]
)
def aggregate_e_nom(n): def aggregate_e_nom(n):
return pd.concat([ return pd.concat(
(n.storage_units["p_nom_opt"]*n.storage_units["max_hours"]).groupby(n.storage_units["carrier"]).sum(), [
n.stores["e_nom_opt"].groupby(n.stores.carrier).sum() (n.storage_units["p_nom_opt"] * n.storage_units["max_hours"])
]) .groupby(n.storage_units["carrier"])
.sum(),
n.stores["e_nom_opt"].groupby(n.stores.carrier).sum(),
]
)
def aggregate_p_curtailed(n): def aggregate_p_curtailed(n):
return pd.concat([ return pd.concat(
((n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) - n.generators_t.p.sum()) [
.groupby(n.generators.carrier).sum()), (
((n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum()) (
.groupby(n.storage_units.carrier).sum()) n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt)
]) - n.generators_t.p.sum()
)
.groupby(n.generators.carrier)
.sum()
),
(
(n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum())
.groupby(n.storage_units.carrier)
.sum()
),
]
)
def aggregate_costs(n, flatten=False, opts=None, existing_only=False): def aggregate_costs(n, flatten=False, opts=None, existing_only=False):
components = dict(
components = dict(Link=("p_nom", "p0"), Link=("p_nom", "p0"),
Generator=("p_nom", "p"), Generator=("p_nom", "p"),
StorageUnit=("p_nom", "p"), StorageUnit=("p_nom", "p"),
Store=("e_nom", "p"), Store=("e_nom", "p"),
Line=("s_nom", None), Line=("s_nom", None),
Transformer=("s_nom", None)) Transformer=("s_nom", None),
)
costs = {} costs = {}
for c, (p_nom, p_attr) in zip( for c, (p_nom, p_attr) in zip(
n.iterate_components(components.keys(), skip_empty=False), n.iterate_components(components.keys(), skip_empty=False), components.values()
components.values()
): ):
if c.df.empty: continue if c.df.empty:
if not existing_only: p_nom += "_opt" continue
costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum() if not existing_only:
p_nom += "_opt"
costs[(c.list_name, "capital")] = (
(c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum()
)
if p_attr is not None: if p_attr is not None:
p = c.pnl[p_attr].sum() p = c.pnl[p_attr].sum()
if c.name == 'StorageUnit': if c.name == "StorageUnit":
p = p.loc[p > 0] p = p.loc[p > 0]
costs[(c.list_name, 'marginal')] = (p*c.df.marginal_cost).groupby(c.df.carrier).sum() costs[(c.list_name, "marginal")] = (
(p * c.df.marginal_cost).groupby(c.df.carrier).sum()
)
costs = pd.concat(costs) costs = pd.concat(costs)
if flatten: if flatten:
assert opts is not None assert opts is not None
conv_techs = opts['conv_techs'] conv_techs = opts["conv_techs"]
costs = costs.reset_index(level=0, drop=True) costs = costs.reset_index(level=0, drop=True)
costs = costs['capital'].add( costs = costs["capital"].add(
costs['marginal'].rename({t: t + ' marginal' for t in conv_techs}), costs["marginal"].rename({t: t + " marginal" for t in conv_techs}),
fill_value=0. fill_value=0.0,
) )
return costs return costs
def progress_retrieve(url, file): def progress_retrieve(url, file):
import urllib import urllib
from progressbar import ProgressBar from progressbar import ProgressBar
pbar = ProgressBar(0, 100) pbar = ProgressBar(0, 100)
def dlProgress(count, blockSize, totalSize): def dlProgress(count, blockSize, totalSize):
pbar.update( int(count * blockSize * 100 / totalSize) ) pbar.update(int(count * blockSize * 100 / totalSize))
urllib.request.urlretrieve(url, file, reporthook=dlProgress) urllib.request.urlretrieve(url, file, reporthook=dlProgress)
def get_aggregation_strategies(aggregation_strategies):
# default aggregation strategies that cannot be defined in .yaml format must be specified within
# the function, otherwise (when defaults are passed in the function's definition) they get lost
# when custom values are specified in the config.
import numpy as np
from pypsa.networkclustering import _make_consense
bus_strategies = dict(country=_make_consense("Bus", "country"))
bus_strategies.update(aggregation_strategies.get("buses", {}))
generator_strategies = {"build_year": lambda x: 0, "lifetime": lambda x: np.inf}
generator_strategies.update(aggregation_strategies.get("generators", {}))
return bus_strategies, generator_strategies
def mock_snakemake(rulename, **wildcards): def mock_snakemake(rulename, **wildcards):
""" """
This function is expected to be executed from the 'scripts'-directory of ' This function is expected to be executed from the 'scripts'-directory of '
@ -227,20 +297,24 @@ def mock_snakemake(rulename, **wildcards):
keyword arguments fixing the wildcards. Only necessary if wildcards are keyword arguments fixing the wildcards. Only necessary if wildcards are
needed. needed.
""" """
import snakemake as sm
import os import os
import snakemake as sm
from packaging.version import Version, parse
from pypsa.descriptors import Dict from pypsa.descriptors import Dict
from snakemake.script import Snakemake from snakemake.script import Snakemake
script_dir = Path(__file__).parent.resolve() script_dir = Path(__file__).parent.resolve()
assert Path.cwd().resolve() == script_dir, \ assert (
f'mock_snakemake has to be run from the repository scripts directory {script_dir}' Path.cwd().resolve() == script_dir
), f"mock_snakemake has to be run from the repository scripts directory {script_dir}"
os.chdir(script_dir.parent) os.chdir(script_dir.parent)
for p in sm.SNAKEFILE_CHOICES: for p in sm.SNAKEFILE_CHOICES:
if os.path.exists(p): if os.path.exists(p):
snakefile = p snakefile = p
break break
workflow = sm.Workflow(snakefile, overwrite_configfiles=[]) kwargs = dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {}
workflow = sm.Workflow(snakefile, overwrite_configfiles=[], **kwargs)
workflow.include(snakefile) workflow.include(snakefile)
workflow.global_resources = {} workflow.global_resources = {}
rule = workflow.get_rule(rulename) rule = workflow.get_rule(rulename)
@ -254,9 +328,18 @@ def mock_snakemake(rulename, **wildcards):
io[i] = os.path.abspath(io[i]) io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log) make_accessable(job.input, job.output, job.log)
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards, snakemake = Snakemake(
job.threads, job.resources, job.log, job.input,
job.dag.workflow.config, job.rule.name, None,) job.output,
job.params,
job.wildcards,
job.threads,
job.resources,
job.log,
job.dag.workflow.config,
job.rule.name,
None,
)
# create log and output dir if not existent # create log and output dir if not existent
for path in list(snakemake.log) + list(snakemake.output): for path in list(snakemake.log) + list(snakemake.output):
Path(path).parent.mkdir(parents=True, exist_ok=True) Path(path).parent.mkdir(parents=True, exist_ok=True)

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -13,7 +14,7 @@ Relevant Settings
costs: costs:
year: year:
USD2013_to_EUR2013: version:
dicountrate: dicountrate:
emission_prices: emission_prices:
@ -32,7 +33,7 @@ Relevant Settings
Inputs Inputs
------ ------
- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. - ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
Outputs Outputs
------- -------
@ -50,14 +51,16 @@ The rule :mod:`add_extra_components` attaches additional extendable components t
- ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit. - ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit.
""" """
import logging import logging
from _helpers import configure_logging
import pypsa
import pandas as pd
import numpy as np import numpy as np
import pandas as pd
from add_electricity import (load_costs, add_nice_carrier_names, import pypsa
_add_missing_carriers_from_costs) from _helpers import configure_logging
from add_electricity import (
_add_missing_carriers_from_costs,
add_nice_carrier_names,
load_costs,
)
idx = pd.IndexSlice idx = pd.IndexSlice
@ -65,8 +68,8 @@ logger = logging.getLogger(__name__)
def attach_storageunits(n, costs, elec_opts): def attach_storageunits(n, costs, elec_opts):
carriers = elec_opts['extendable_carriers']['StorageUnit'] carriers = elec_opts["extendable_carriers"]["StorageUnit"]
max_hours = elec_opts['max_hours'] max_hours = elec_opts["max_hours"]
_add_missing_carriers_from_costs(n, costs, carriers) _add_missing_carriers_from_costs(n, costs, carriers)
@ -76,128 +79,168 @@ def attach_storageunits(n, costs, elec_opts):
lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"}
for carrier in carriers: for carrier in carriers:
n.madd("StorageUnit", buses_i, ' ' + carrier, roundtrip_correction = 0.5 if carrier == "battery" else 1
bus=buses_i,
carrier=carrier, n.madd(
p_nom_extendable=True, "StorageUnit",
capital_cost=costs.at[carrier, 'capital_cost'], buses_i,
marginal_cost=costs.at[carrier, 'marginal_cost'], " " + carrier,
efficiency_store=costs.at[lookup_store[carrier], 'efficiency'], bus=buses_i,
efficiency_dispatch=costs.at[lookup_dispatch[carrier], 'efficiency'], carrier=carrier,
max_hours=max_hours[carrier], p_nom_extendable=True,
cyclic_state_of_charge=True) capital_cost=costs.at[carrier, "capital_cost"],
marginal_cost=costs.at[carrier, "marginal_cost"],
efficiency_store=costs.at[lookup_store[carrier], "efficiency"]
** roundtrip_correction,
efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"]
** roundtrip_correction,
max_hours=max_hours[carrier],
cyclic_state_of_charge=True,
)
def attach_stores(n, costs, elec_opts): def attach_stores(n, costs, elec_opts):
carriers = elec_opts['extendable_carriers']['Store'] carriers = elec_opts["extendable_carriers"]["Store"]
_add_missing_carriers_from_costs(n, costs, carriers) _add_missing_carriers_from_costs(n, costs, carriers)
buses_i = n.buses.index buses_i = n.buses.index
bus_sub_dict = {k: n.buses[k].values for k in ['x', 'y', 'country']} bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]}
if 'H2' in carriers: if "H2" in carriers:
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict)
n.madd("Store", h2_buses_i, n.madd(
bus=h2_buses_i, "Store",
carrier='H2', h2_buses_i,
e_nom_extendable=True, bus=h2_buses_i,
e_cyclic=True, carrier="H2",
capital_cost=costs.at["hydrogen storage", "capital_cost"]) e_nom_extendable=True,
e_cyclic=True,
capital_cost=costs.at["hydrogen storage underground", "capital_cost"],
)
n.madd("Link", h2_buses_i + " Electrolysis", n.madd(
bus0=buses_i, "Link",
bus1=h2_buses_i, h2_buses_i + " Electrolysis",
carrier='H2 electrolysis', bus0=buses_i,
p_nom_extendable=True, bus1=h2_buses_i,
efficiency=costs.at["electrolysis", "efficiency"], carrier="H2 electrolysis",
capital_cost=costs.at["electrolysis", "capital_cost"], p_nom_extendable=True,
marginal_cost=costs.at["electrolysis", "marginal_cost"]) efficiency=costs.at["electrolysis", "efficiency"],
capital_cost=costs.at["electrolysis", "capital_cost"],
marginal_cost=costs.at["electrolysis", "marginal_cost"],
)
n.madd("Link", h2_buses_i + " Fuel Cell", n.madd(
bus0=h2_buses_i, "Link",
bus1=buses_i, h2_buses_i + " Fuel Cell",
carrier='H2 fuel cell', bus0=h2_buses_i,
p_nom_extendable=True, bus1=buses_i,
efficiency=costs.at["fuel cell", "efficiency"], carrier="H2 fuel cell",
#NB: fixed cost is per MWel p_nom_extendable=True,
capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"], efficiency=costs.at["fuel cell", "efficiency"],
marginal_cost=costs.at["fuel cell", "marginal_cost"]) # NB: fixed cost is per MWel
capital_cost=costs.at["fuel cell", "capital_cost"]
* costs.at["fuel cell", "efficiency"],
marginal_cost=costs.at["fuel cell", "marginal_cost"],
)
if 'battery' in carriers: if "battery" in carriers:
b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict) b_buses_i = n.madd(
"Bus", buses_i + " battery", carrier="battery", **bus_sub_dict
)
n.madd("Store", b_buses_i, n.madd(
bus=b_buses_i, "Store",
carrier='battery', b_buses_i,
e_cyclic=True, bus=b_buses_i,
e_nom_extendable=True, carrier="battery",
capital_cost=costs.at['battery storage', 'capital_cost'], e_cyclic=True,
marginal_cost=costs.at["battery", "marginal_cost"]) e_nom_extendable=True,
capital_cost=costs.at["battery storage", "capital_cost"],
marginal_cost=costs.at["battery", "marginal_cost"],
)
n.madd("Link", b_buses_i + " charger", n.madd(
bus0=buses_i, "Link",
bus1=b_buses_i, b_buses_i + " charger",
carrier='battery charger', bus0=buses_i,
efficiency=costs.at['battery inverter', 'efficiency'], bus1=b_buses_i,
capital_cost=costs.at['battery inverter', 'capital_cost'], carrier="battery charger",
p_nom_extendable=True, # the efficiencies are "round trip efficiencies"
marginal_cost=costs.at["battery inverter", "marginal_cost"]) efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
capital_cost=costs.at["battery inverter", "capital_cost"],
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"],
)
n.madd("Link", b_buses_i + " discharger", n.madd(
bus0=b_buses_i, "Link",
bus1=buses_i, b_buses_i + " discharger",
carrier='battery discharger', bus0=b_buses_i,
efficiency=costs.at['battery inverter','efficiency'], bus1=buses_i,
p_nom_extendable=True, carrier="battery discharger",
marginal_cost=costs.at["battery inverter", "marginal_cost"]) efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"],
)
def attach_hydrogen_pipelines(n, costs, elec_opts): def attach_hydrogen_pipelines(n, costs, elec_opts):
ext_carriers = elec_opts['extendable_carriers'] ext_carriers = elec_opts["extendable_carriers"]
as_stores = ext_carriers.get('Store', []) as_stores = ext_carriers.get("Store", [])
if 'H2 pipeline' not in ext_carriers.get('Link',[]): return if "H2 pipeline" not in ext_carriers.get("Link", []):
return
assert 'H2' in as_stores, ("Attaching hydrogen pipelines requires hydrogen " assert "H2" in as_stores, (
"storage to be modelled as Store-Link-Bus combination. See " "Attaching hydrogen pipelines requires hydrogen "
"`config.yaml` at `electricity: extendable_carriers: Store:`.") "storage to be modelled as Store-Link-Bus combination. See "
"`config.yaml` at `electricity: extendable_carriers: Store:`."
)
# determine bus pairs # determine bus pairs
attrs = ["bus0","bus1","length"] attrs = ["bus0", "bus1", "length"]
candidates = pd.concat([n.lines[attrs], n.links.query('carrier=="DC"')[attrs]])\ candidates = pd.concat(
.reset_index(drop=True) [n.lines[attrs], n.links.query('carrier=="DC"')[attrs]]
).reset_index(drop=True)
# remove bus pair duplicates regardless of order of bus0 and bus1 # remove bus pair duplicates regardless of order of bus0 and bus1
h2_links = candidates[~pd.DataFrame(np.sort(candidates[['bus0', 'bus1']])).duplicated()] h2_links = candidates[
~pd.DataFrame(np.sort(candidates[["bus0", "bus1"]])).duplicated()
]
h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1) h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1)
# add pipelines # add pipelines
n.madd("Link", n.madd(
h2_links.index, "Link",
bus0=h2_links.bus0.values + " H2", h2_links.index,
bus1=h2_links.bus1.values + " H2", bus0=h2_links.bus0.values + " H2",
p_min_pu=-1, bus1=h2_links.bus1.values + " H2",
p_nom_extendable=True, p_min_pu=-1,
length=h2_links.length.values, p_nom_extendable=True,
capital_cost=costs.at['H2 pipeline','capital_cost']*h2_links.length, length=h2_links.length.values,
efficiency=costs.at['H2 pipeline','efficiency'], capital_cost=costs.at["H2 pipeline", "capital_cost"] * h2_links.length,
carrier="H2 pipeline") efficiency=costs.at["H2 pipeline", "efficiency"],
carrier="H2 pipeline",
)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('add_extra_components', network='elec',
simpl='', clusters=5) snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5)
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
elec_config = snakemake.config['electricity'] elec_config = snakemake.config["electricity"]
Nyears = n.snapshot_weightings.objective.sum() / 8760. Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], elec_config, Nyears) costs = load_costs(
snakemake.input.tech_costs, snakemake.config["costs"], elec_config, Nyears
)
attach_storageunits(n, costs, elec_config) attach_storageunits(n, costs, elec_config)
attach_stores(n, costs, elec_config) attach_stores(n, costs, elec_config)
@ -205,4 +248,5 @@ if __name__ == "__main__":
add_nice_carrier_names(n, snakemake.config) add_nice_carrier_names(n, snakemake.config)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -1,10 +1,14 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# coding: utf-8 # coding: utf-8
""" """
Creates the network topology from a `ENTSO-E map extract <https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022) as a PyPSA network. Creates the network topology from a `ENTSO-E map extract.
<https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022) as a PyPSA
network.
Relevant Settings Relevant Settings
----------------- -----------------
@ -59,25 +63,24 @@ Outputs
Description Description
----------- -----------
""" """
import logging import logging
from _helpers import configure_logging
import pypsa
import yaml
import pandas as pd
import geopandas as gpd
import numpy as np
import networkx as nx
from scipy import spatial
from scipy.sparse import csgraph
from itertools import product from itertools import product
from shapely.geometry import Point, LineString import geopandas as gpd
import shapely, shapely.prepared, shapely.wkt import networkx as nx
import numpy as np
import pandas as pd
import pypsa
import shapely
import shapely.prepared
import shapely.wkt
import yaml
from _helpers import configure_logging
from scipy import spatial
from scipy.sparse import csgraph
from shapely.geometry import LineString, Point
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -97,48 +100,73 @@ def _get_country(df):
def _find_closest_links(links, new_links, distance_upper_bound=1.5): def _find_closest_links(links, new_links, distance_upper_bound=1.5):
treecoords = np.asarray([np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten() treecoords = np.asarray(
for s in links.geometry]) [
querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']], np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten()
new_links[['x2', 'y2', 'x1', 'y1']]]) for s in links.geometry
]
)
querycoords = np.vstack(
[new_links[["x1", "y1", "x2", "y2"]], new_links[["x2", "y2", "x1", "y1"]]]
)
tree = spatial.KDTree(treecoords) tree = spatial.KDTree(treecoords)
dist, ind = tree.query(querycoords, distance_upper_bound=distance_upper_bound) dist, ind = tree.query(querycoords, distance_upper_bound=distance_upper_bound)
found_b = ind < len(links) found_b = ind < len(links)
found_i = np.arange(len(new_links)*2)[found_b] % len(new_links) found_i = np.arange(len(new_links) * 2)[found_b] % len(new_links)
return pd.DataFrame(dict(D=dist[found_b], return (
i=links.index[ind[found_b] % len(links)]), pd.DataFrame(
index=new_links.index[found_i]).sort_values(by='D')\ dict(D=dist[found_b], i=links.index[ind[found_b] % len(links)]),
[lambda ds: ~ds.index.duplicated(keep='first')]\ index=new_links.index[found_i],
.sort_index()['i'] )
.sort_values(by="D")[lambda ds: ~ds.index.duplicated(keep="first")]
.sort_index()["i"]
)
def _load_buses_from_eg(eg_buses, europe_shape, config_elec): def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
buses = (pd.read_csv(eg_buses, quotechar="'", buses = (
true_values=['t'], false_values=['f'], pd.read_csv(
dtype=dict(bus_id="str")) eg_buses,
.set_index("bus_id") quotechar="'",
.drop(['station_id'], axis=1) true_values=["t"],
.rename(columns=dict(voltage='v_nom'))) false_values=["f"],
dtype=dict(bus_id="str"),
)
.set_index("bus_id")
.drop(["station_id"], axis=1)
.rename(columns=dict(voltage="v_nom"))
)
buses['carrier'] = buses.pop('dc').map({True: 'DC', False: 'AC'}) buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
buses['under_construction'] = buses['under_construction'].fillna(False).astype(bool) buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool)
# remove all buses outside of all countries including exclusive economic zones (offshore) # remove all buses outside of all countries including exclusive economic zones (offshore)
europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry'] europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
europe_shape_prepped = shapely.prepared.prep(europe_shape) europe_shape_prepped = shapely.prepared.prep(europe_shape)
buses_in_europe_b = buses[['x', 'y']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) buses_in_europe_b = buses[["x", "y"]].apply(
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
)
buses_with_v_nom_to_keep_b = buses.v_nom.isin(config_elec['voltages']) | buses.v_nom.isnull() buses_with_v_nom_to_keep_b = (
logger.info("Removing buses with voltages {}".format(pd.Index(buses.v_nom.unique()).dropna().difference(config_elec['voltages']))) buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull()
)
logger.info(
"Removing buses with voltages {}".format(
pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])
)
)
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b]) return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
def _load_transformers_from_eg(buses, eg_transformers): def _load_transformers_from_eg(buses, eg_transformers):
transformers = (pd.read_csv(eg_transformers, quotechar="'", transformers = pd.read_csv(
true_values=['t'], false_values=['f'], eg_transformers,
dtype=dict(transformer_id='str', bus0='str', bus1='str')) quotechar="'",
.set_index('transformer_id')) true_values=["t"],
false_values=["f"],
dtype=dict(transformer_id="str", bus0="str", bus1="str"),
).set_index("transformer_id")
transformers = _remove_dangling_branches(transformers, buses) transformers = _remove_dangling_branches(transformers, buses)
@ -146,33 +174,40 @@ def _load_transformers_from_eg(buses, eg_transformers):
def _load_converters_from_eg(buses, eg_converters): def _load_converters_from_eg(buses, eg_converters):
converters = (pd.read_csv(eg_converters, quotechar="'", converters = pd.read_csv(
true_values=['t'], false_values=['f'], eg_converters,
dtype=dict(converter_id='str', bus0='str', bus1='str')) quotechar="'",
.set_index('converter_id')) true_values=["t"],
false_values=["f"],
dtype=dict(converter_id="str", bus0="str", bus1="str"),
).set_index("converter_id")
converters = _remove_dangling_branches(converters, buses) converters = _remove_dangling_branches(converters, buses)
converters['carrier'] = 'B2B' converters["carrier"] = "B2B"
return converters return converters
def _load_links_from_eg(buses, eg_links): def _load_links_from_eg(buses, eg_links):
links = (pd.read_csv(eg_links, quotechar="'", true_values=['t'], false_values=['f'], links = pd.read_csv(
dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool")) eg_links,
.set_index('link_id')) quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(link_id="str", bus0="str", bus1="str", under_construction="bool"),
).set_index("link_id")
links['length'] /= 1e3 links["length"] /= 1e3
# Skagerrak Link is connected to 132kV bus which is removed in _load_buses_from_eg. # Skagerrak Link is connected to 132kV bus which is removed in _load_buses_from_eg.
# Connect to neighboring 380kV bus # Connect to neighboring 380kV bus
links.loc[links.bus1=='6396', 'bus1'] = '6398' links.loc[links.bus1 == "6396", "bus1"] = "6398"
links = _remove_dangling_branches(links, buses) links = _remove_dangling_branches(links, buses)
# Add DC line parameters # Add DC line parameters
links['carrier'] = 'DC' links["carrier"] = "DC"
return links return links
@ -181,15 +216,21 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
links_tyndp = pd.read_csv(links_tyndp) links_tyndp = pd.read_csv(links_tyndp)
# remove all links from list which lie outside all of the desired countries # remove all links from list which lie outside all of the desired countries
europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry'] europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
europe_shape_prepped = shapely.prepared.prep(europe_shape) europe_shape_prepped = shapely.prepared.prep(europe_shape)
x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) x1y1_in_europe_b = links_tyndp[["x1", "y1"]].apply(
x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) lambda p: europe_shape_prepped.contains(Point(p)), axis=1
)
x2y2_in_europe_b = links_tyndp[["x2", "y2"]].apply(
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
)
is_within_covered_countries_b = x1y1_in_europe_b & x2y2_in_europe_b is_within_covered_countries_b = x1y1_in_europe_b & x2y2_in_europe_b
if not is_within_covered_countries_b.all(): if not is_within_covered_countries_b.all():
logger.info("TYNDP links outside of the covered area (skipping): " + logger.info(
", ".join(links_tyndp.loc[~ is_within_covered_countries_b, "Name"])) "TYNDP links outside of the covered area (skipping): "
+ ", ".join(links_tyndp.loc[~is_within_covered_countries_b, "Name"])
)
links_tyndp = links_tyndp.loc[is_within_covered_countries_b] links_tyndp = links_tyndp.loc[is_within_covered_countries_b]
if links_tyndp.empty: if links_tyndp.empty:
@ -197,25 +238,32 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
has_replaces_b = links_tyndp.replaces.notnull() has_replaces_b = links_tyndp.replaces.notnull()
oids = dict(Bus=_get_oid(buses), Link=_get_oid(links)) oids = dict(Bus=_get_oid(buses), Link=_get_oid(links))
keep_b = dict(Bus=pd.Series(True, index=buses.index), keep_b = dict(
Link=pd.Series(True, index=links.index)) Bus=pd.Series(True, index=buses.index), Link=pd.Series(True, index=links.index)
for reps in links_tyndp.loc[has_replaces_b, 'replaces']: )
for comps in reps.split(':'): for reps in links_tyndp.loc[has_replaces_b, "replaces"]:
oids_to_remove = comps.split('.') for comps in reps.split(":"):
oids_to_remove = comps.split(".")
c = oids_to_remove.pop(0) c = oids_to_remove.pop(0)
keep_b[c] &= ~oids[c].isin(oids_to_remove) keep_b[c] &= ~oids[c].isin(oids_to_remove)
buses = buses.loc[keep_b['Bus']] buses = buses.loc[keep_b["Bus"]]
links = links.loc[keep_b['Link']] links = links.loc[keep_b["Link"]]
links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.20) links_tyndp["j"] = _find_closest_links(
links, links_tyndp, distance_upper_bound=0.20
)
# Corresponds approximately to 20km tolerances # Corresponds approximately to 20km tolerances
if links_tyndp["j"].notnull().any(): if links_tyndp["j"].notnull().any():
logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"])) logger.info(
"TYNDP links already in the dataset (skipping): "
+ ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"])
)
links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()] links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()]
if links_tyndp.empty: return buses, links if links_tyndp.empty:
return buses, links
tree = spatial.KDTree(buses[['x', 'y']]) tree = spatial.KDTree(buses[["x", "y"]])
_, ind0 = tree.query(links_tyndp[["x1", "y1"]]) _, ind0 = tree.query(links_tyndp[["x1", "y1"]])
ind0_b = ind0 < len(buses) ind0_b = ind0 < len(buses)
links_tyndp.loc[ind0_b, "bus0"] = buses.index[ind0[ind0_b]] links_tyndp.loc[ind0_b, "bus0"] = buses.index[ind0[ind0_b]]
@ -224,24 +272,42 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
ind1_b = ind1 < len(buses) ind1_b = ind1 < len(buses)
links_tyndp.loc[ind1_b, "bus1"] = buses.index[ind1[ind1_b]] links_tyndp.loc[ind1_b, "bus1"] = buses.index[ind1[ind1_b]]
links_tyndp_located_b = links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull() links_tyndp_located_b = (
links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull()
)
if not links_tyndp_located_b.all(): if not links_tyndp_located_b.all():
logger.warning("Did not find connected buses for TYNDP links (skipping): " + ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"])) logger.warning(
"Did not find connected buses for TYNDP links (skipping): "
+ ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"])
)
links_tyndp = links_tyndp.loc[links_tyndp_located_b] links_tyndp = links_tyndp.loc[links_tyndp_located_b]
logger.info("Adding the following TYNDP links: " + ", ".join(links_tyndp["Name"])) logger.info("Adding the following TYNDP links: " + ", ".join(links_tyndp["Name"]))
links_tyndp = links_tyndp[["bus0", "bus1"]].assign( links_tyndp = links_tyndp[["bus0", "bus1"]].assign(
carrier='DC', carrier="DC",
p_nom=links_tyndp["Power (MW)"], p_nom=links_tyndp["Power (MW)"],
length=links_tyndp["Length (given) (km)"].fillna(links_tyndp["Length (distance*1.2) (km)"]), length=links_tyndp["Length (given) (km)"].fillna(
links_tyndp["Length (distance*1.2) (km)"]
),
under_construction=True, under_construction=True,
underground=False, underground=False,
geometry=(links_tyndp[["x1", "y1", "x2", "y2"]] geometry=(
.apply(lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1)), links_tyndp[["x1", "y1", "x2", "y2"]].apply(
tags=('"name"=>"' + links_tyndp["Name"] + '", ' + lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1
'"ref"=>"' + links_tyndp["Ref"] + '", ' + )
'"status"=>"' + links_tyndp["status"] + '"') ),
tags=(
'"name"=>"'
+ links_tyndp["Name"]
+ '", '
+ '"ref"=>"'
+ links_tyndp["Ref"]
+ '", '
+ '"status"=>"'
+ links_tyndp["status"]
+ '"'
),
) )
links_tyndp.index = "T" + links_tyndp.index.astype(str) links_tyndp.index = "T" + links_tyndp.index.astype(str)
@ -252,13 +318,25 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
def _load_lines_from_eg(buses, eg_lines): def _load_lines_from_eg(buses, eg_lines):
lines = (pd.read_csv(eg_lines, quotechar="'", true_values=['t'], false_values=['f'], lines = (
dtype=dict(line_id='str', bus0='str', bus1='str', pd.read_csv(
underground="bool", under_construction="bool")) eg_lines,
.set_index('line_id') quotechar="'",
.rename(columns=dict(voltage='v_nom', circuits='num_parallel'))) true_values=["t"],
false_values=["f"],
dtype=dict(
line_id="str",
bus0="str",
bus1="str",
underground="bool",
under_construction="bool",
),
)
.set_index("line_id")
.rename(columns=dict(voltage="v_nom", circuits="num_parallel"))
)
lines['length'] /= 1e3 lines["length"] /= 1e3
lines = _remove_dangling_branches(lines, buses) lines = _remove_dangling_branches(lines, buses)
@ -269,18 +347,20 @@ def _apply_parameter_corrections(n, parameter_corrections):
with open(parameter_corrections) as f: with open(parameter_corrections) as f:
corrections = yaml.safe_load(f) corrections = yaml.safe_load(f)
if corrections is None: return if corrections is None:
return
for component, attrs in corrections.items(): for component, attrs in corrections.items():
df = n.df(component) df = n.df(component)
oid = _get_oid(df) oid = _get_oid(df)
if attrs is None: continue if attrs is None:
continue
for attr, repls in attrs.items(): for attr, repls in attrs.items():
for i, r in repls.items(): for i, r in repls.items():
if i == 'oid': if i == "oid":
r = oid.map(repls["oid"]).dropna() r = oid.map(repls["oid"]).dropna()
elif i == 'index': elif i == "index":
r = pd.Series(repls["index"]) r = pd.Series(repls["index"])
else: else:
raise NotImplementedError() raise NotImplementedError()
@ -289,78 +369,87 @@ def _apply_parameter_corrections(n, parameter_corrections):
def _set_electrical_parameters_lines(lines, config): def _set_electrical_parameters_lines(lines, config):
v_noms = config['electricity']['voltages'] v_noms = config["electricity"]["voltages"]
linetypes = config['lines']['types'] linetypes = config["lines"]["types"]
for v_nom in v_noms: for v_nom in v_noms:
lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom] lines.loc[lines["v_nom"] == v_nom, "type"] = linetypes[v_nom]
lines['s_max_pu'] = config['lines']['s_max_pu'] lines["s_max_pu"] = config["lines"]["s_max_pu"]
return lines return lines
def _set_lines_s_nom_from_linetypes(n): def _set_lines_s_nom_from_linetypes(n):
n.lines['s_nom'] = ( n.lines["s_nom"] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * np.sqrt(3)
n.lines['v_nom'] * n.lines.num_parallel * n.lines["type"].map(n.line_types.i_nom)
* n.lines["v_nom"]
* n.lines.num_parallel
) )
def _set_electrical_parameters_links(links, config, links_p_nom): def _set_electrical_parameters_links(links, config, links_p_nom):
if links.empty: return links if links.empty:
return links
p_max_pu = config['links'].get('p_max_pu', 1.) p_max_pu = config["links"].get("p_max_pu", 1.0)
links['p_max_pu'] = p_max_pu links["p_max_pu"] = p_max_pu
links['p_min_pu'] = -p_max_pu links["p_min_pu"] = -p_max_pu
links_p_nom = pd.read_csv(links_p_nom) links_p_nom = pd.read_csv(links_p_nom)
# filter links that are not in operation anymore # filter links that are not in operation anymore
removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False) removed_b = links_p_nom.Remarks.str.contains("Shut down|Replaced", na=False)
links_p_nom = links_p_nom[~removed_b] links_p_nom = links_p_nom[~removed_b]
# find closest link for all links in links_p_nom # find closest link for all links in links_p_nom
links_p_nom['j'] = _find_closest_links(links, links_p_nom) links_p_nom["j"] = _find_closest_links(links, links_p_nom)
links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'}) links_p_nom = links_p_nom.groupby(["j"], as_index=False).agg({"Power (MW)": "sum"})
p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"] p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"]
# Don't update p_nom if it's already set # Don't update p_nom if it's already set
p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom p_nom_unset = (
p_nom.drop(links.index[links.p_nom.notnull()], errors="ignore")
if "p_nom" in links
else p_nom
)
links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset
return links return links
def _set_electrical_parameters_converters(converters, config): def _set_electrical_parameters_converters(converters, config):
p_max_pu = config['links'].get('p_max_pu', 1.) p_max_pu = config["links"].get("p_max_pu", 1.0)
converters['p_max_pu'] = p_max_pu converters["p_max_pu"] = p_max_pu
converters['p_min_pu'] = -p_max_pu converters["p_min_pu"] = -p_max_pu
converters['p_nom'] = 2000 converters["p_nom"] = 2000
# Converters are combined with links # Converters are combined with links
converters['under_construction'] = False converters["under_construction"] = False
converters['underground'] = False converters["underground"] = False
return converters return converters
def _set_electrical_parameters_transformers(transformers, config): def _set_electrical_parameters_transformers(transformers, config):
config = config['transformers'] config = config["transformers"]
## Add transformer parameters ## Add transformer parameters
transformers["x"] = config.get('x', 0.1) transformers["x"] = config.get("x", 0.1)
transformers["s_nom"] = config.get('s_nom', 2000) transformers["s_nom"] = config.get("s_nom", 2000)
transformers['type'] = config.get('type', '') transformers["type"] = config.get("type", "")
return transformers return transformers
def _remove_dangling_branches(branches, buses): def _remove_dangling_branches(branches, buses):
return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)]) return pd.DataFrame(
branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)]
)
def _remove_unconnected_components(network): def _remove_unconnected_components(network):
@ -370,44 +459,62 @@ def _remove_unconnected_components(network):
component_sizes = component.value_counts() component_sizes = component.value_counts()
components_to_remove = component_sizes.iloc[1:] components_to_remove = component_sizes.iloc[1:]
logger.info("Removing {} unconnected network components with less than {} buses. In total {} buses." logger.info(
.format(len(components_to_remove), components_to_remove.max(), components_to_remove.sum())) "Removing {} unconnected network components with less than {} buses. In total {} buses.".format(
len(components_to_remove),
components_to_remove.max(),
components_to_remove.sum(),
)
)
return network[component == component_sizes.index[0]] return network[component == component_sizes.index[0]]
def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
buses = n.buses buses = n.buses
def buses_in_shape(shape): def buses_in_shape(shape):
shape = shapely.prepared.prep(shape) shape = shapely.prepared.prep(shape)
return pd.Series( return pd.Series(
np.fromiter((shape.contains(Point(x, y)) np.fromiter(
for x, y in buses.loc[:,["x", "y"]].values), (
dtype=bool, count=len(buses)), shape.contains(Point(x, y))
index=buses.index for x, y in buses.loc[:, ["x", "y"]].values
),
dtype=bool,
count=len(buses),
),
index=buses.index,
) )
countries = config['countries'] countries = config["countries"]
country_shapes = gpd.read_file(country_shapes).set_index('name')['geometry'] country_shapes = gpd.read_file(country_shapes).set_index("name")["geometry"]
offshore_shapes = gpd.read_file(offshore_shapes).set_index('name')['geometry'] # reindexing necessary for supporting empty geo-dataframes
substation_b = buses['symbol'].str.contains('substation|converter station', case=False) offshore_shapes = gpd.read_file(offshore_shapes)
offshore_shapes = offshore_shapes.reindex(columns=["name", "geometry"]).set_index(
"name"
)["geometry"]
substation_b = buses["symbol"].str.contains(
"substation|converter station", case=False
)
def prefer_voltage(x, which): def prefer_voltage(x, which):
index = x.index index = x.index
if len(index) == 1: if len(index) == 1:
return pd.Series(index, index) return pd.Series(index, index)
key = (x.index[0] key = (
if x['v_nom'].isnull().all() x.index[0]
else getattr(x['v_nom'], 'idx' + which)()) if x["v_nom"].isnull().all()
else getattr(x["v_nom"], "idx" + which)()
)
return pd.Series(key, index) return pd.Series(key, index)
gb = buses.loc[substation_b].groupby(['x', 'y'], as_index=False, gb = buses.loc[substation_b].groupby(
group_keys=False, sort=False) ["x", "y"], as_index=False, group_keys=False, sort=False
bus_map_low = gb.apply(prefer_voltage, 'min') )
bus_map_low = gb.apply(prefer_voltage, "min")
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False) lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
bus_map_high = gb.apply(prefer_voltage, 'max') bus_map_high = gb.apply(prefer_voltage, "max")
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False) hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
onshore_b = pd.Series(False, buses.index) onshore_b = pd.Series(False, buses.index)
@ -418,47 +525,66 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
onshore_country_b = buses_in_shape(onshore_shape) onshore_country_b = buses_in_shape(onshore_shape)
onshore_b |= onshore_country_b onshore_b |= onshore_country_b
buses.loc[onshore_country_b, 'country'] = country buses.loc[onshore_country_b, "country"] = country
if country not in offshore_shapes.index: continue if country not in offshore_shapes.index:
continue
offshore_country_b = buses_in_shape(offshore_shapes[country]) offshore_country_b = buses_in_shape(offshore_shapes[country])
offshore_b |= offshore_country_b offshore_b |= offshore_country_b
buses.loc[offshore_country_b, 'country'] = country buses.loc[offshore_country_b, "country"] = country
# Only accept buses as low-voltage substations (where load is attached), if # Only accept buses as low-voltage substations (where load is attached), if
# they have at least one connection which is not under_construction # they have at least one connection which is not under_construction
has_connections_b = pd.Series(False, index=buses.index) has_connections_b = pd.Series(False, index=buses.index)
for b, df in product(('bus0', 'bus1'), (n.lines, n.links)): for b, df in product(("bus0", "bus1"), (n.lines, n.links)):
has_connections_b |= ~ df.groupby(b).under_construction.min() has_connections_b |= ~df.groupby(b).under_construction.min()
buses['substation_lv'] = lv_b & onshore_b & (~ buses['under_construction']) & has_connections_b buses["substation_lv"] = (
buses['substation_off'] = (offshore_b | (hv_b & onshore_b)) & (~ buses['under_construction']) lv_b & onshore_b & (~buses["under_construction"]) & has_connections_b
)
buses["substation_off"] = (offshore_b | (hv_b & onshore_b)) & (
~buses["under_construction"]
)
c_nan_b = buses.country.isnull() c_nan_b = buses.country.isnull()
if c_nan_b.sum() > 0: if c_nan_b.sum() > 0:
c_tag = _get_country(buses.loc[c_nan_b]) c_tag = _get_country(buses.loc[c_nan_b])
c_tag.loc[~c_tag.isin(countries)] = np.nan c_tag.loc[~c_tag.isin(countries)] = np.nan
n.buses.loc[c_nan_b, 'country'] = c_tag n.buses.loc[c_nan_b, "country"] = c_tag
c_tag_nan_b = n.buses.country.isnull() c_tag_nan_b = n.buses.country.isnull()
# Nearest country in path length defines country of still homeless buses # Nearest country in path length defines country of still homeless buses
# Work-around until commit 705119 lands in pypsa release # Work-around until commit 705119 lands in pypsa release
n.transformers['length'] = 0. n.transformers["length"] = 0.0
graph = n.graph(weight='length') graph = n.graph(weight="length")
n.transformers.drop('length', axis=1, inplace=True) n.transformers.drop("length", axis=1, inplace=True)
for b in n.buses.index[c_tag_nan_b]: for b in n.buses.index[c_tag_nan_b]:
df = (pd.DataFrame(dict(pathlength=nx.single_source_dijkstra_path_length(graph, b, cutoff=200))) df = (
.join(n.buses.country).dropna()) pd.DataFrame(
assert not df.empty, "No buses with defined country within 200km of bus `{}`".format(b) dict(
n.buses.at[b, 'country'] = df.loc[df.pathlength.idxmin(), 'country'] pathlength=nx.single_source_dijkstra_path_length(
graph, b, cutoff=200
)
)
)
.join(n.buses.country)
.dropna()
)
assert (
not df.empty
), "No buses with defined country within 200km of bus `{}`".format(b)
n.buses.at[b, "country"] = df.loc[df.pathlength.idxmin(), "country"]
logger.warning("{} buses are not in any country or offshore shape," logger.warning(
" {} have been assigned from the tag of the entsoe map," "{} buses are not in any country or offshore shape,"
" the rest from the next bus in terms of pathlength." " {} have been assigned from the tag of the entsoe map,"
.format(c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum())) " the rest from the next bus in terms of pathlength.".format(
c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum()
)
)
return buses return buses
@ -467,11 +593,13 @@ def _replace_b2b_converter_at_country_border_by_link(n):
# Affects only the B2B converter in Lithuania at the Polish border at the moment # Affects only the B2B converter in Lithuania at the Polish border at the moment
buscntry = n.buses.country buscntry = n.buses.country
linkcntry = n.links.bus0.map(buscntry) linkcntry = n.links.bus0.map(buscntry)
converters_i = n.links.index[(n.links.carrier == 'B2B') & (linkcntry == n.links.bus1.map(buscntry))] converters_i = n.links.index[
(n.links.carrier == "B2B") & (linkcntry == n.links.bus1.map(buscntry))
]
def findforeignbus(G, i): def findforeignbus(G, i):
cntry = linkcntry.at[i] cntry = linkcntry.at[i]
for busattr in ('bus0', 'bus1'): for busattr in ("bus0", "bus1"):
b0 = n.links.at[i, busattr] b0 = n.links.at[i, busattr]
for b1 in G[b0]: for b1 in G[b0]:
if buscntry[b1] != cntry: if buscntry[b1] != cntry:
@ -484,67 +612,93 @@ def _replace_b2b_converter_at_country_border_by_link(n):
if busattr is not None: if busattr is not None:
comp, line = next(iter(G[b0][b1])) comp, line = next(iter(G[b0][b1]))
if comp != "Line": if comp != "Line":
logger.warning("Unable to replace B2B `{}` expected a Line, but found a {}" logger.warning(
.format(i, comp)) "Unable to replace B2B `{}` expected a Line, but found a {}".format(
i, comp
)
)
continue continue
n.links.at[i, busattr] = b1 n.links.at[i, busattr] = b1
n.links.at[i, 'p_nom'] = min(n.links.at[i, 'p_nom'], n.lines.at[line, 's_nom']) n.links.at[i, "p_nom"] = min(
n.links.at[i, 'carrier'] = 'DC' n.links.at[i, "p_nom"], n.lines.at[line, "s_nom"]
n.links.at[i, 'underwater_fraction'] = 0. )
n.links.at[i, 'length'] = n.lines.at[line, 'length'] n.links.at[i, "carrier"] = "DC"
n.links.at[i, "underwater_fraction"] = 0.0
n.links.at[i, "length"] = n.lines.at[line, "length"]
n.remove("Line", line) n.remove("Line", line)
n.remove("Bus", b0) n.remove("Bus", b0)
logger.info("Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}" logger.info(
.format(i, b0, line, linkcntry.at[i], buscntry.at[b1])) "Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}".format(
i, b0, line, linkcntry.at[i], buscntry.at[b1]
)
)
def _set_links_underwater_fraction(n, offshore_shapes): def _set_links_underwater_fraction(n, offshore_shapes):
if n.links.empty: return if n.links.empty:
return
if not hasattr(n.links, 'geometry'): if not hasattr(n.links, "geometry"):
n.links['underwater_fraction'] = 0. n.links["underwater_fraction"] = 0.0
else: else:
offshore_shape = gpd.read_file(offshore_shapes).unary_union offshore_shape = gpd.read_file(offshore_shapes).unary_union
links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads)) links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads))
n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length n.links["underwater_fraction"] = (
links.intersection(offshore_shape).length / links.length
)
def _adjust_capacities_of_under_construction_branches(n, config): def _adjust_capacities_of_under_construction_branches(n, config):
lines_mode = config['lines'].get('under_construction', 'undef') lines_mode = config["lines"].get("under_construction", "undef")
if lines_mode == 'zero': if lines_mode == "zero":
n.lines.loc[n.lines.under_construction, 'num_parallel'] = 0. n.lines.loc[n.lines.under_construction, "num_parallel"] = 0.0
n.lines.loc[n.lines.under_construction, 's_nom'] = 0. n.lines.loc[n.lines.under_construction, "s_nom"] = 0.0
elif lines_mode == 'remove': elif lines_mode == "remove":
n.mremove("Line", n.lines.index[n.lines.under_construction]) n.mremove("Line", n.lines.index[n.lines.under_construction])
elif lines_mode != 'keep': elif lines_mode != "keep":
logger.warning("Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines.") logger.warning(
"Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines."
)
links_mode = config['links'].get('under_construction', 'undef') links_mode = config["links"].get("under_construction", "undef")
if links_mode == 'zero': if links_mode == "zero":
n.links.loc[n.links.under_construction, "p_nom"] = 0. n.links.loc[n.links.under_construction, "p_nom"] = 0.0
elif links_mode == 'remove': elif links_mode == "remove":
n.mremove("Link", n.links.index[n.links.under_construction]) n.mremove("Link", n.links.index[n.links.under_construction])
elif links_mode != 'keep': elif links_mode != "keep":
logger.warning("Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links.") logger.warning(
"Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links."
)
if lines_mode == 'remove' or links_mode == 'remove': if lines_mode == "remove" or links_mode == "remove":
# We might need to remove further unconnected components # We might need to remove further unconnected components
n = _remove_unconnected_components(n) n = _remove_unconnected_components(n)
return n return n
def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links, def base_network(
links_p_nom, links_tyndp, europe_shape, country_shapes, offshore_shapes, eg_buses,
parameter_corrections, config): eg_converters,
eg_transformers,
eg_lines,
eg_links,
links_p_nom,
links_tyndp,
europe_shape,
country_shapes,
offshore_shapes,
parameter_corrections,
config,
):
buses = _load_buses_from_eg(eg_buses, europe_shape, config['electricity']) buses = _load_buses_from_eg(eg_buses, europe_shape, config["electricity"])
links = _load_links_from_eg(buses, eg_links) links = _load_links_from_eg(buses, eg_links)
if config['links'].get('include_tyndp'): if config["links"].get("include_tyndp"):
buses, links = _add_links_from_tyndp(buses, links, links_tyndp, europe_shape) buses, links = _add_links_from_tyndp(buses, links, links_tyndp, europe_shape)
converters = _load_converters_from_eg(buses, eg_converters) converters = _load_converters_from_eg(buses, eg_converters)
@ -558,9 +712,9 @@ def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
converters = _set_electrical_parameters_converters(converters, config) converters = _set_electrical_parameters_converters(converters, config)
n = pypsa.Network() n = pypsa.Network()
n.name = 'PyPSA-Eur' n.name = "PyPSA-Eur"
n.set_snapshots(pd.date_range(freq='h', **config['snapshots'])) n.set_snapshots(pd.date_range(freq="h", **config["snapshots"]))
n.import_components_from_dataframe(buses, "Bus") n.import_components_from_dataframe(buses, "Bus")
n.import_components_from_dataframe(lines, "Line") n.import_components_from_dataframe(lines, "Line")
@ -584,14 +738,28 @@ def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
return n return n
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('base_network')
snakemake = mock_snakemake("base_network")
configure_logging(snakemake) configure_logging(snakemake)
n = base_network(snakemake.input.eg_buses, snakemake.input.eg_converters, snakemake.input.eg_transformers, snakemake.input.eg_lines, snakemake.input.eg_links, n = base_network(
snakemake.input.links_p_nom, snakemake.input.links_tyndp, snakemake.input.europe_shape, snakemake.input.country_shapes, snakemake.input.offshore_shapes, snakemake.input.eg_buses,
snakemake.input.parameter_corrections, snakemake.config) snakemake.input.eg_converters,
snakemake.input.eg_transformers,
snakemake.input.eg_lines,
snakemake.input.eg_links,
snakemake.input.links_p_nom,
snakemake.input.links_tyndp,
snakemake.input.europe_shape,
snakemake.input.country_shapes,
snakemake.input.offshore_shapes,
snakemake.input.parameter_corrections,
snakemake.config,
)
n.meta = snakemake.config
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -1,9 +1,11 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
""" """
Creates Voronoi shapes for each bus representing both onshore and offshore regions. Creates Voronoi shapes for each bus representing both onshore and offshore
regions.
Relevant Settings Relevant Settings
----------------- -----------------
@ -38,41 +40,94 @@ Outputs
Description Description
----------- -----------
""" """
import logging import logging
from _helpers import configure_logging
import pypsa
import os import os
import pandas as pd
import geopandas as gpd
from vresutils.graph import voronoi_partition_pts import geopandas as gpd
import numpy as np
import pandas as pd
import pypsa
from _helpers import REGION_COLS, configure_logging
from scipy.spatial import Voronoi
from shapely.geometry import Polygon
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def save_to_geojson(s, fn): def voronoi_partition_pts(points, outline):
if os.path.exists(fn): """
os.unlink(fn) Compute the polygons of a voronoi partition of `points` within the
schema = {**gpd.io.file.infer_schema(s), 'geometry': 'Unknown'} polygon `outline`. Taken from
s.to_file(fn, driver='GeoJSON', schema=schema) https://github.com/FRESNA/vresutils/blob/master/vresutils/graph.py
Attributes
----------
points : Nx2 - ndarray[dtype=float]
outline : Polygon
Returns
-------
polygons : N - ndarray[dtype=Polygon|MultiPolygon]
"""
points = np.asarray(points)
if len(points) == 1:
polygons = [outline]
else:
xmin, ymin = np.amin(points, axis=0)
xmax, ymax = np.amax(points, axis=0)
xspan = xmax - xmin
yspan = ymax - ymin
# to avoid any network positions outside all Voronoi cells, append
# the corners of a rectangle framing these points
vor = Voronoi(
np.vstack(
(
points,
[
[xmin - 3.0 * xspan, ymin - 3.0 * yspan],
[xmin - 3.0 * xspan, ymax + 3.0 * yspan],
[xmax + 3.0 * xspan, ymin - 3.0 * yspan],
[xmax + 3.0 * xspan, ymax + 3.0 * yspan],
],
)
)
)
polygons = []
for i in range(len(points)):
poly = Polygon(vor.vertices[vor.regions[vor.point_region[i]]])
if not poly.is_valid:
poly = poly.buffer(0)
poly = poly.intersection(outline)
polygons.append(poly)
return polygons
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_bus_regions')
snakemake = mock_snakemake("build_bus_regions")
configure_logging(snakemake) configure_logging(snakemake)
countries = snakemake.config['countries'] countries = snakemake.config["countries"]
n = pypsa.Network(snakemake.input.base_network) n = pypsa.Network(snakemake.input.base_network)
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'] country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index("name")[
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('name')['geometry'] "geometry"
]
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes)
offshore_shapes = offshore_shapes.reindex(columns=REGION_COLS).set_index("name")[
"geometry"
]
onshore_regions = [] onshore_regions = []
offshore_regions = [] offshore_regions = []
@ -82,27 +137,42 @@ if __name__ == "__main__":
onshore_shape = country_shapes[country] onshore_shape = country_shapes[country]
onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]] onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]]
onshore_regions.append(gpd.GeoDataFrame({ onshore_regions.append(
'name': onshore_locs.index, gpd.GeoDataFrame(
'x': onshore_locs['x'], {
'y': onshore_locs['y'], "name": onshore_locs.index,
'geometry': voronoi_partition_pts(onshore_locs.values, onshore_shape), "x": onshore_locs["x"],
'country': country "y": onshore_locs["y"],
})) "geometry": voronoi_partition_pts(
onshore_locs.values, onshore_shape
),
"country": country,
}
)
)
if country not in offshore_shapes.index: continue if country not in offshore_shapes.index:
continue
offshore_shape = offshore_shapes[country] offshore_shape = offshore_shapes[country]
offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]] offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]]
offshore_regions_c = gpd.GeoDataFrame({ offshore_regions_c = gpd.GeoDataFrame(
'name': offshore_locs.index, {
'x': offshore_locs['x'], "name": offshore_locs.index,
'y': offshore_locs['y'], "x": offshore_locs["x"],
'geometry': voronoi_partition_pts(offshore_locs.values, offshore_shape), "y": offshore_locs["y"],
'country': country "geometry": voronoi_partition_pts(offshore_locs.values, offshore_shape),
}) "country": country,
}
)
offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2] offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2]
offshore_regions.append(offshore_regions_c) offshore_regions.append(offshore_regions_c)
save_to_geojson(pd.concat(onshore_regions, ignore_index=True), snakemake.output.regions_onshore) pd.concat(onshore_regions, ignore_index=True).to_file(
snakemake.output.regions_onshore
save_to_geojson(pd.concat(offshore_regions, ignore_index=True), snakemake.output.regions_offshore) )
if offshore_regions:
pd.concat(offshore_regions, ignore_index=True).to_file(
snakemake.output.regions_offshore
)
else:
offshore_shapes.to_frame().to_file(snakemake.output.regions_offshore)

View File

@ -1,4 +1,5 @@
# SPDX-FileCopyrightText: : 2017-2021 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -88,43 +89,42 @@ A **SARAH-2 cutout** can be used to amend the fields ``temperature``, ``influx_t
Description Description
----------- -----------
""" """
import logging import logging
import atlite import atlite
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from _helpers import configure_logging from _helpers import configure_logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_cutout', cutout='europe-2013-era5')
snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5")
configure_logging(snakemake) configure_logging(snakemake)
cutout_params = snakemake.config['atlite']['cutouts'][snakemake.wildcards.cutout] cutout_params = snakemake.config["atlite"]["cutouts"][snakemake.wildcards.cutout]
snapshots = pd.date_range(freq='h', **snakemake.config['snapshots']) snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"])
time = [snapshots[0], snapshots[-1]] time = [snapshots[0], snapshots[-1]]
cutout_params['time'] = slice(*cutout_params.get('time', time)) cutout_params["time"] = slice(*cutout_params.get("time", time))
if {'x', 'y', 'bounds'}.isdisjoint(cutout_params): if {"x", "y", "bounds"}.isdisjoint(cutout_params):
# Determine the bounds from bus regions with a buffer of two grid cells # Determine the bounds from bus regions with a buffer of two grid cells
onshore = gpd.read_file(snakemake.input.regions_onshore) onshore = gpd.read_file(snakemake.input.regions_onshore)
offshore = gpd.read_file(snakemake.input.regions_offshore) offshore = gpd.read_file(snakemake.input.regions_offshore)
regions = onshore.append(offshore) regions = pd.concat([onshore, offshore])
d = max(cutout_params.get('dx', 0.25), cutout_params.get('dy', 0.25))*2 d = max(cutout_params.get("dx", 0.25), cutout_params.get("dy", 0.25)) * 2
cutout_params['bounds'] = regions.total_bounds + [-d, -d, d, d] cutout_params["bounds"] = regions.total_bounds + [-d, -d, d, d]
elif {'x', 'y'}.issubset(cutout_params): elif {"x", "y"}.issubset(cutout_params):
cutout_params['x'] = slice(*cutout_params['x']) cutout_params["x"] = slice(*cutout_params["x"])
cutout_params['y'] = slice(*cutout_params['y']) cutout_params["y"] = slice(*cutout_params["y"])
logging.info(f"Preparing cutout with parameters {cutout_params}.") logging.info(f"Preparing cutout with parameters {cutout_params}.")
features = cutout_params.pop('features', None) features = cutout_params.pop("features", None)
cutout = atlite.Cutout(snakemake.output[0], **cutout_params) cutout = atlite.Cutout(snakemake.output[0], **cutout_params)
cutout.prepare(features=features) cutout.prepare(features=features)

View File

@ -1,6 +1,7 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -60,36 +61,98 @@ Description
""" """
import logging import logging
from _helpers import configure_logging
import atlite import atlite
import country_converter as coco
import geopandas as gpd import geopandas as gpd
from vresutils import hydro as vhydro import pandas as pd
from _helpers import configure_logging
cc = coco.CountryConverter()
def get_eia_annual_hydro_generation(fn, countries):
# in billion kWh/a = TWh/a
df = pd.read_csv(fn, skiprows=2, index_col=1, na_values=[" ", "--"]).iloc[1:, 1:]
df.index = df.index.str.strip()
former_countries = {
"Former Czechoslovakia": dict(
countries=["Czech Republic", "Slovakia"], start=1980, end=1992
),
"Former Serbia and Montenegro": dict(
countries=["Serbia", "Montenegro"], start=1992, end=2005
),
"Former Yugoslavia": dict(
countries=[
"Slovenia",
"Croatia",
"Bosnia and Herzegovina",
"Serbia",
"Montenegro",
"North Macedonia",
],
start=1980,
end=1991,
),
}
for k, v in former_countries.items():
period = [str(i) for i in range(v["start"], v["end"] + 1)]
ratio = df.loc[v["countries"]].T.dropna().sum()
ratio /= ratio.sum()
for country in v["countries"]:
df.loc[country, period] = df.loc[k, period] * ratio[country]
baltic_states = ["Latvia", "Estonia", "Lithuania"]
df.loc[baltic_states] = (
df.loc[baltic_states].T.fillna(df.loc[baltic_states].mean(axis=1)).T
)
df.loc["Germany"] = df.filter(like="Germany", axis=0).sum()
df.loc["Serbia"] += df.loc["Kosovo"].fillna(0.0)
df = df.loc[~df.index.str.contains("Former")]
df.drop(["Europe", "Germany, West", "Germany, East", "Kosovo"], inplace=True)
df.index = cc.convert(df.index, to="iso2")
df.index.name = "countries"
df = df.T[countries] * 1e6 # in MWh/a
return df
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_hydro_profile')
snakemake = mock_snakemake("build_hydro_profile")
configure_logging(snakemake) configure_logging(snakemake)
config_hydro = snakemake.config['renewable']['hydro'] config_hydro = snakemake.config["renewable"]["hydro"]
cutout = atlite.Cutout(snakemake.input.cutout) cutout = atlite.Cutout(snakemake.input.cutout)
countries = snakemake.config['countries'] countries = snakemake.config["countries"]
country_shapes = (gpd.read_file(snakemake.input.country_shapes) country_shapes = (
.set_index('name')['geometry'].reindex(countries)) gpd.read_file(snakemake.input.country_shapes)
country_shapes.index.name = 'countries' .set_index("name")["geometry"]
.reindex(countries)
)
country_shapes.index.name = "countries"
eia_stats = vhydro.get_eia_annual_hydro_generation( fn = snakemake.input.eia_hydro_generation
snakemake.input.eia_hydro_generation).reindex(columns=countries) eia_stats = get_eia_annual_hydro_generation(fn, countries)
inflow = cutout.runoff(shapes=country_shapes,
smooth=True,
lower_threshold_quantile=True,
normalize_using_yearly=eia_stats)
if 'clip_min_inflow' in config_hydro: inflow = cutout.runoff(
inflow = inflow.where(inflow > config_hydro['clip_min_inflow'], 0) shapes=country_shapes,
smooth=True,
lower_threshold_quantile=True,
normalize_using_yearly=eia_stats,
)
if "clip_min_inflow" in config_hydro:
inflow = inflow.where(inflow > config_hydro["clip_min_inflow"], 0)
inflow.to_netcdf(snakemake.output[0]) inflow.to_netcdf(snakemake.output[0])

View File

@ -1,10 +1,16 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
""" """
This rule downloads the load data from `Open Power System Data Time series.
This rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file. <https://data.open-power-system-data.org/time_series/>`_. For all countries in
the network, the per country load timeseries with suffix
``_load_actual_entsoe_transparency`` are extracted from the dataset. After
filling small gaps linearly and large gaps by copying time-slice of a given
period, the load data is exported to a ``.csv`` file.
Relevant Settings Relevant Settings
----------------- -----------------
@ -26,22 +32,21 @@ Relevant Settings
Inputs Inputs
------ ------
- ``data/load_raw.csv``:
Outputs Outputs
------- -------
- ``resource/time_series_60min_singleindex_filtered.csv``: - ``resources/load.csv``:
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging
import pandas as pd logger = logging.getLogger(__name__)
import numpy as np
import dateutil import dateutil
import numpy as np
import pandas as pd
from _helpers import configure_logging
from pandas import Timedelta as Delta from pandas import Timedelta as Delta
@ -70,24 +75,29 @@ def load_timeseries(fn, years, countries, powerstatistics=True):
""" """
logger.info(f"Retrieving load data from '{fn}'.") logger.info(f"Retrieving load data from '{fn}'.")
pattern = 'power_statistics' if powerstatistics else 'transparency' pattern = "power_statistics" if powerstatistics else "transparency"
pattern = f'_load_actual_entsoe_{pattern}' pattern = f"_load_actual_entsoe_{pattern}"
rename = lambda s: s[:-len(pattern)] rename = lambda s: s[: -len(pattern)]
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True) date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
return (
return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser) pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
.filter(like=pattern) .filter(like=pattern)
.rename(columns=rename) .rename(columns=rename)
.dropna(how="all", axis=0) .dropna(how="all", axis=0)
.rename(columns={'GB_UKM' : 'GB'}) .rename(columns={"GB_UKM": "GB"})
.filter(items=countries) .filter(items=countries)
.loc[years]) .loc[years]
)
def consecutive_nans(ds): def consecutive_nans(ds):
return (ds.isnull().astype(int) return (
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()]) ds.isnull()
.transform('sum').fillna(0)) .astype(int)
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
.transform("sum")
.fillna(0)
)
def fill_large_gaps(ds, shift): def fill_large_gaps(ds, shift):
@ -97,94 +107,163 @@ def fill_large_gaps(ds, shift):
This function fills gaps ragning from 3 to 168 hours (one week). This function fills gaps ragning from 3 to 168 hours (one week).
""" """
shift = Delta(shift) shift = Delta(shift)
nhours = shift / np.timedelta64(1, 'h') nhours = shift / np.timedelta64(1, "h")
if (consecutive_nans(ds) > nhours).any(): if (consecutive_nans(ds) > nhours).any():
logger.warning('There exist gaps larger then the time shift used for ' logger.warning(
'copying time slices.') "There exist gaps larger then the time shift used for "
"copying time slices."
)
time_shift = pd.Series(ds.values, ds.index + shift) time_shift = pd.Series(ds.values, ds.index + shift)
return ds.where(ds.notnull(), time_shift.reindex_like(ds)) return ds.where(ds.notnull(), time_shift.reindex_like(ds))
def nan_statistics(df): def nan_statistics(df):
def max_consecutive_nans(ds): def max_consecutive_nans(ds):
return (ds.isnull().astype(int) return (
.groupby(ds.notnull().astype(int).cumsum()) ds.isnull()
.sum().max()) .astype(int)
.groupby(ds.notnull().astype(int).cumsum())
.sum()
.max()
)
consecutive = df.apply(max_consecutive_nans) consecutive = df.apply(max_consecutive_nans)
total = df.isnull().sum() total = df.isnull().sum()
max_total_per_month = df.isnull().resample('m').sum().max() max_total_per_month = df.isnull().resample("m").sum().max()
return pd.concat([total, consecutive, max_total_per_month], return pd.concat(
keys=['total', 'consecutive', 'max_total_per_month'], axis=1) [total, consecutive, max_total_per_month],
keys=["total", "consecutive", "max_total_per_month"],
axis=1,
)
def copy_timeslice(load, cntry, start, stop, delta): def copy_timeslice(load, cntry, start, stop, delta, fn_load=None):
start = pd.Timestamp(start) start = pd.Timestamp(start)
stop = pd.Timestamp(stop) stop = pd.Timestamp(stop)
if start-delta in load.index and stop in load.index and cntry in load: if start in load.index and stop in load.index:
load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values if start - delta in load.index and stop - delta in load.index and cntry in load:
load.loc[start:stop, cntry] = load.loc[
start - delta : stop - delta, cntry
].values
elif fn_load is not None:
duration = pd.date_range(freq="h", start=start - delta, end=stop - delta)
load_raw = load_timeseries(fn_load, duration, [cntry], powerstatistics)
load.loc[start:stop, cntry] = load_raw.loc[
start - delta : stop - delta, cntry
].values
def manual_adjustment(load, powerstatistics, countries): def manual_adjustment(load, fn_load, powerstatistics, countries):
""" """
Adjust gaps manual for load data from OPSD time-series package. Adjust gaps manual for load data from OPSD time-series package.
1. For the ENTSOE power statistics load data (if powerstatistics is True) 1. For the ENTSOE power statistics load data (if powerstatistics is True)
Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the
same load curve as Serbia and Albania the same as Macdedonia, both scaled same load curve as Serbia and Albania the same as Macdedonia, both scaled
by the corresponding ratio of total energy consumptions reported by by the corresponding ratio of total energy consumptions reported by
IEA Data browser [0] for the year 2013. IEA Data browser [0] for the year 2013.
2. For the ENTSOE transparency load data (if powerstatistics is False) 2. For the ENTSOE transparency load data (if powerstatistics is False)
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
same load curve as Montenegro, scaled by the corresponding ratio of total energy same load curve as Montenegro, scaled by the corresponding ratio of total energy
consumptions reported by IEA Data browser [0] for the year 2016. consumptions reported by IEA Data browser [0] for the year 2016.
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons [0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
Parameters Parameters
---------- ----------
load : pd.DataFrame load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries Load time-series with UTC timestamps x ISO-2 countries
powerstatistics: bool powerstatistics: bool
Whether argument load comprises the electricity consumption data of Whether argument load comprises the electricity consumption data of
the ENTSOE power statistics or of the ENTSOE transparency map the ENTSOE power statistics or of the ENTSOE transparency map
load_fn: str
File name or url location (file format .csv)
Returns Returns
------- -------
load : pd.DataFrame load : pd.DataFrame
Manual adjusted and interpolated load time-series with UTC Manual adjusted and interpolated load time-series with UTC
timestamps x ISO-2 countries timestamps x ISO-2 countries
""" """
if powerstatistics: if powerstatistics:
if 'MK' in load.columns: if "MK" in load.columns:
if 'AL' not in load.columns or load.AL.isnull().values.all(): if "AL" not in load.columns or load.AL.isnull().values.all():
load['AL'] = load['MK'] * (4.1 / 7.4) load["AL"] = load["MK"] * (4.1 / 7.4)
if 'RS' in load.columns: if "RS" in load.columns:
if 'KV' not in load.columns or load.KV.isnull().values.all(): if "KV" not in load.columns or load.KV.isnull().values.all():
load['KV'] = load['RS'] * (4.8 / 27.) load["KV"] = load["RS"] * (4.8 / 27.0)
copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1)) copy_timeslice(
copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2)) load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1)
copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1)) )
copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1)) copy_timeslice(
load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2)
)
copy_timeslice(
load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1)
)
copy_timeslice(
load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1)
)
# is a WE, so take WE before # is a WE, so take WE before
copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1)) copy_timeslice(
copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1)) load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1)
copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1)) )
copy_timeslice(
load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1)
)
copy_timeslice(
load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1)
)
# whole january missing # whole january missing
copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364)) copy_timeslice(
load,
"GB",
"2010-01-01 00:00",
"2010-01-31 23:00",
Delta(days=-365),
fn_load,
)
# 1.1. at midnight gets special treatment
copy_timeslice(
load,
"IE",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(
load,
"PT",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(
load,
"GB",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
else: else:
if 'ME' in load: if "ME" in load:
if 'AL' not in load and 'AL' in countries: if "AL" not in load and "AL" in countries:
load['AL'] = load.ME * (5.7/2.9) load["AL"] = load.ME * (5.7 / 2.9)
if 'MK' not in load and 'MK' in countries: if "MK" not in load and "MK" in countries:
load['MK'] = load.ME * (6.7/2.9) load["MK"] = load.ME * (6.7 / 2.9)
copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1)) copy_timeslice(
load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)
)
if 'UA' in countries: if 'UA' in countries:
copy_timeslice(load, 'UA', '2013-01-25 14:00', '2013-01-28 21:00', Delta(weeks=1)) copy_timeslice(load, 'UA', '2013-01-25 14:00', '2013-01-28 21:00', Delta(weeks=1))
@ -195,18 +274,19 @@ def manual_adjustment(load, powerstatistics, countries):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_load_data')
snakemake = mock_snakemake("build_load_data")
configure_logging(snakemake) configure_logging(snakemake)
powerstatistics = snakemake.config['load']['power_statistics'] powerstatistics = snakemake.config["load"]["power_statistics"]
interpolate_limit = snakemake.config['load']['interpolate_limit'] interpolate_limit = snakemake.config["load"]["interpolate_limit"]
countries = snakemake.config['countries'] countries = snakemake.config["countries"]
snapshots = pd.date_range(freq='h', **snakemake.config['snapshots']) snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"])
years = slice(snapshots[0], snapshots[-1]) years = slice(snapshots[0], snapshots[-1])
time_shift = snakemake.config['load']['time_shift_for_large_gaps'] time_shift = snakemake.config["load"]["time_shift_for_large_gaps"]
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics) load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
@ -220,20 +300,21 @@ if __name__ == "__main__":
# https://www.iea.org/data-and-statistics/data-browser/?country=MOLDOVA&fuel=Energy%20consumption&indicator=TotElecCons # https://www.iea.org/data-and-statistics/data-browser/?country=MOLDOVA&fuel=Energy%20consumption&indicator=TotElecCons
load['MD'] = 6.2e6*(load_ua/load_ua.sum()) load['MD'] = 6.2e6*(load_ua/load_ua.sum())
if snakemake.config['load']['manual_adjustments']: if snakemake.config["load"]["manual_adjustments"]:
load = manual_adjustment(load, powerstatistics, countries) load = manual_adjustment(load, snakemake.input[0], powerstatistics, countries)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.") logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method='linear', limit=interpolate_limit) load = load.interpolate(method="linear", limit=interpolate_limit)
logger.info("Filling larger gaps by copying time-slices of period " logger.info(
f"'{time_shift}'.") "Filling larger gaps by copying time-slices of period " f"'{time_shift}'."
)
load = load.apply(fill_large_gaps, shift=time_shift) load = load.apply(fill_large_gaps, shift=time_shift)
assert not load.isna().any().any(), ( assert not load.isna().any().any(), (
'Load data contains nans. Adjust the parameters ' "Load data contains nans. Adjust the parameters "
'`time_shift_for_large_gaps` or modify the `manual_adjustment` function ' "`time_shift_for_large_gaps` or modify the `manual_adjustment` function "
'for implementing the needed load data modifications.') "for implementing the needed load data modifications."
)
load.to_csv(snakemake.output[0]) load.to_csv(snakemake.output[0])

View File

@ -1,9 +1,13 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
""" """
Rasters the vector data of the `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas onto all cutout regions. Rasters the vector data of the `Natura 2000.
<https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas onto all
cutout regions.
Relevant Settings Relevant Settings
----------------- -----------------
@ -36,15 +40,14 @@ Outputs
Description Description
----------- -----------
""" """
import logging import logging
from _helpers import configure_logging, retrieve_snakemake_keys
import atlite import atlite
import geopandas as gpd import geopandas as gpd
import rasterio as rio import rasterio as rio
from _helpers import configure_logging
from rasterio.features import geometry_mask from rasterio.features import geometry_mask
from rasterio.warp import transform_bounds from rasterio.warp import transform_bounds
@ -56,11 +59,11 @@ def determine_cutout_xXyY(cutout_name):
assert cutout.crs.to_epsg() == 4326 assert cutout.crs.to_epsg() == 4326
x, X, y, Y = cutout.extent x, X, y, Y = cutout.extent
dx, dy = cutout.dx, cutout.dy dx, dy = cutout.dx, cutout.dy
return [x - dx/2., X + dx/2., y - dy/2., Y + dy/2.] return [x - dx / 2.0, X + dx / 2.0, y - dy / 2.0, Y + dy / 2.0]
def get_transform_and_shape(bounds, res): def get_transform_and_shape(bounds, res):
left, bottom = [(b // res)* res for b in bounds[:2]] left, bottom = [(b // res) * res for b in bounds[:2]]
right, top = [(b // res + 1) * res for b in bounds[2:]] right, top = [(b // res + 1) * res for b in bounds[2:]]
shape = int((top - bottom) // res), int((right - left) / res) shape = int((top - bottom) // res), int((right - left) / res)
transform = rio.Affine(res, 0, left, 0, -res, top) transform = rio.Affine(res, 0, left, 0, -res, top)
@ -68,25 +71,32 @@ def get_transform_and_shape(bounds, res):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_natura_raster')
snakemake = mock_snakemake("build_natura_raster")
configure_logging(snakemake) configure_logging(snakemake)
paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) cutouts = snakemake.input.cutouts
cutouts = paths.cutouts
xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts))
bounds = transform_bounds(4326, 3035, min(xs), min(ys), max(Xs), max(Ys)) bounds = transform_bounds(4326, 3035, min(xs), min(ys), max(Xs), max(Ys))
transform, out_shape = get_transform_and_shape(bounds, res=100) transform, out_shape = get_transform_and_shape(bounds, res=100)
# adjusted boundaries # adjusted boundaries
shapes = gpd.read_file(paths.natura).to_crs(3035) shapes = gpd.read_file(snakemake.input.natura).to_crs(3035)
raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform) raster = ~geometry_mask(shapes.geometry, out_shape, transform)
raster = raster.astype(rio.uint8) raster = raster.astype(rio.uint8)
with rio.open(out[0], 'w', driver='GTiff', dtype=rio.uint8, with rio.open(
count=1, transform=transform, crs=3035, compress='lzw', snakemake.output[0],
width=raster.shape[1], height=raster.shape[0]) as dst: "w",
driver="GTiff",
dtype=rio.uint8,
count=1,
transform=transform,
crs=3035,
compress="lzw",
width=raster.shape[1],
height=raster.shape[0],
) as dst:
dst.write(raster, indexes=1) dst.write(raster, indexes=1)

View File

@ -1,10 +1,15 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# coding: utf-8 # coding: utf-8
""" """
Retrieves conventional powerplant capacities and locations from `powerplantmatching <https://github.com/FRESNA/powerplantmatching>`_, assigns these to buses and creates a ``.csv`` file. It is possible to amend the powerplant database with custom entries provided in ``data/custom_powerplants.csv``. Retrieves conventional powerplant capacities and locations from
`powerplantmatching <https://github.com/FRESNA/powerplantmatching>`_, assigns
these to buses and creates a ``.csv`` file. It is possible to amend the
powerplant database with custom entries provided in
``data/custom_powerplants.csv``.
Relevant Settings Relevant Settings
----------------- -----------------
@ -68,18 +73,15 @@ The configuration options ``electricity: powerplants_filter`` and ``electricity:
powerplants_filter: Country not in ['Germany'] and YearCommissioned <= 2015 powerplants_filter: Country not in ['Germany'] and YearCommissioned <= 2015
custom_powerplants: YearCommissioned <= 2015 custom_powerplants: YearCommissioned <= 2015
""" """
import logging import logging
from _helpers import configure_logging
import pypsa
import powerplantmatching as pm
import pandas as pd import pandas as pd
import numpy as np import powerplantmatching as pm
import pypsa
from scipy.spatial import cKDTree as KDTree from _helpers import configure_logging
from powerplantmatching.export import map_country_bus
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -87,56 +89,78 @@ logger = logging.getLogger(__name__)
def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False): def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
if not custom_ppl_query: if not custom_ppl_query:
return ppl return ppl
add_ppls = pd.read_csv(custom_powerplants, index_col=0, add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={"bus": "str"})
dtype={'bus': 'str'})
if isinstance(custom_ppl_query, str): if isinstance(custom_ppl_query, str):
add_ppls.query(custom_ppl_query, inplace=True) add_ppls.query(custom_ppl_query, inplace=True)
return pd.concat([ppl, add_ppls], sort=False, ignore_index=True, verify_integrity=True) return pd.concat(
[ppl, add_ppls], sort=False, ignore_index=True, verify_integrity=True
)
def replace_natural_gas_technology(df):
mapping = {"Steam Turbine": "OCGT", "Combustion Engine": "OCGT"}
tech = df.Technology.replace(mapping).fillna("OCGT")
return df.Technology.where(df.Fueltype != "Natural Gas", tech)
def replace_natural_gas_fueltype(df):
return df.Fueltype.where(df.Fueltype != "Natural Gas", df.Technology)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_powerplants')
snakemake = mock_snakemake("build_powerplants")
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.base_network) n = pypsa.Network(snakemake.input.base_network)
countries = n.buses.country.unique() countries = n.buses.country.unique()
ppl = (pm.powerplants(from_url=True) ppl = (
.powerplant.fill_missing_decommyears() pm.powerplants(from_url=True)
.powerplant.convert_country_to_alpha2() .powerplant.fill_missing_decommissioning_years()
.query('Fueltype not in ["Solar", "Wind"] and Country in @countries') .powerplant.convert_country_to_alpha2()
.replace({'Technology': {'Steam Turbine': 'OCGT'}}) .query('Fueltype not in ["Solar", "Wind"] and Country in @countries')
.assign(Fueltype=lambda df: ( .assign(Technology=replace_natural_gas_technology)
df.Fueltype .assign(Fueltype=replace_natural_gas_fueltype)
.where(df.Fueltype != 'Natural Gas', )
df.Technology.replace('Steam Turbine',
'OCGT').fillna('OCGT')))))
ppl_query = snakemake.config['electricity']['powerplants_filter'] # Correct bioenergy for countries where possible
opsd = pm.data.OPSD_VRE().powerplant.convert_country_to_alpha2()
opsd = opsd.query('Country in @countries and Fueltype == "Bioenergy"')
opsd["Name"] = "Biomass"
available_countries = opsd.Country.unique()
ppl = ppl.query('not (Country in @available_countries and Fueltype == "Bioenergy")')
ppl = pd.concat([ppl, opsd])
ppl_query = snakemake.config["electricity"]["powerplants_filter"]
if isinstance(ppl_query, str): if isinstance(ppl_query, str):
ppl.query(ppl_query, inplace=True) ppl.query(ppl_query, inplace=True)
# add carriers from own powerplant files: # add carriers from own powerplant files:
custom_ppl_query = snakemake.config['electricity']['custom_powerplants'] custom_ppl_query = snakemake.config["electricity"]["custom_powerplants"]
ppl = add_custom_powerplants(ppl, snakemake.input.custom_powerplants, custom_ppl_query) ppl = add_custom_powerplants(
ppl, snakemake.input.custom_powerplants, custom_ppl_query
)
cntries_without_ppl = [c for c in countries if c not in ppl.Country.unique()] countries_wo_ppl = set(countries) - set(ppl.Country.unique())
if countries_wo_ppl:
logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}")
for c in countries: substations = n.buses.query("substation_lv")
substation_i = n.buses.query('substation_lv and country == @c').index ppl = map_country_bus(ppl, substations)
kdtree = KDTree(n.buses.loc[substation_i, ['x','y']].values)
ppl_i = ppl.query('Country == @c').index
tree_i = kdtree.query(ppl.loc[ppl_i, ['lon','lat']].values)[1]
ppl.loc[ppl_i, 'bus'] = substation_i.append(pd.Index([np.nan]))[tree_i]
if cntries_without_ppl:
logging.warning(f"No powerplants known in: {', '.join(cntries_without_ppl)}")
bus_null_b = ppl["bus"].isnull() bus_null_b = ppl["bus"].isnull()
if bus_null_b.any(): if bus_null_b.any():
logging.warning(f"Couldn't find close bus for {bus_null_b.sum()} powerplants") logging.warning(
f"Couldn't find close bus for {bus_null_b.sum()} powerplants. "
"Removing them from the powerplants list."
)
ppl = ppl[~bus_null_b]
ppl.to_csv(snakemake.output[0]) # TODO: This has to fixed in PPM, some powerplants are still duplicated
cumcount = ppl.groupby(["bus", "Fueltype"]).cumcount() + 1
ppl.Name = ppl.Name.where(cumcount == 1, ppl.Name + " " + cumcount.astype(str))
ppl.reset_index(drop=True).to_csv(snakemake.output[0])

View File

@ -1,15 +1,17 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
"""Calculates for each network node the """
(i) installable capacity (based on land-use), (ii) the available generation time Calculates for each network node the (i) installable capacity (based on land-
series (based on weather data), and (iii) the average distance from the node for use), (ii) the available generation time series (based on weather data), and
onshore wind, AC-connected offshore wind, DC-connected offshore wind and solar (iii) the average distance from the node for onshore wind, AC-connected
PV generators. In addition for offshore wind it calculates the fraction of the offshore wind, DC-connected offshore wind and solar PV generators. In addition
grid connection which is under water. for offshore wind it calculates the fraction of the grid connection which is
under water.
.. note:: Hydroelectric profiles are built in script :mod:`build_hydro_profiles`. .. note:: Hydroelectric profiles are built in script :mod:`build_hydro_profiles`.
@ -177,86 +179,109 @@ node (`p_nom_max`): ``simple`` and ``conservative``:
- ``conservative`` assertains the nodal limit by increasing capacities - ``conservative`` assertains the nodal limit by increasing capacities
proportional to the layout until the limit of an individual grid cell is proportional to the layout until the limit of an individual grid cell is
reached. reached.
""" """
import progressbar as pgb
import geopandas as gpd
import xarray as xr
import numpy as np
import functools import functools
import atlite
import logging import logging
from pypsa.geo import haversine
from shapely.geometry import LineString
import time import time
import atlite
import geopandas as gpd
import numpy as np
import progressbar as pgb
import xarray as xr
from _helpers import configure_logging from _helpers import configure_logging
from dask.distributed import Client, LocalCluster
from pypsa.geo import haversine
from shapely.geometry import LineString
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_renewable_profiles', technology='solar')
snakemake = mock_snakemake("build_renewable_profiles", technology="solar")
configure_logging(snakemake) configure_logging(snakemake)
pgb.streams.wrap_stderr() pgb.streams.wrap_stderr()
nprocesses = snakemake.config['atlite'].get('nprocesses') nprocesses = int(snakemake.threads)
noprogress = not snakemake.config['atlite'].get('show_progress', True) noprogress = not snakemake.config["atlite"].get("show_progress", False)
config = snakemake.config['renewable'][snakemake.wildcards.technology] config = snakemake.config["renewable"][snakemake.wildcards.technology]
resource = config['resource'] # pv panel config / wind turbine config resource = config["resource"] # pv panel config / wind turbine config
correction_factor = config.get('correction_factor', 1.) correction_factor = config.get("correction_factor", 1.0)
capacity_per_sqkm = config['capacity_per_sqkm'] capacity_per_sqkm = config["capacity_per_sqkm"]
p_nom_max_meth = config.get('potential', 'conservative') p_nom_max_meth = config.get("potential", "conservative")
if isinstance(config.get("corine", {}), list): if isinstance(config.get("corine", {}), list):
config['corine'] = {'grid_codes': config['corine']} config["corine"] = {"grid_codes": config["corine"]}
if correction_factor != 1.: if correction_factor != 1.0:
logger.info(f'correction_factor is set as {correction_factor}') logger.info(f"correction_factor is set as {correction_factor}")
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
client = Client(cluster, asynchronous=True)
cutout = atlite.Cutout(snakemake.input['cutout']) cutout = atlite.Cutout(snakemake.input["cutout"])
regions = gpd.read_file(snakemake.input.regions).set_index('name').rename_axis('bus') regions = gpd.read_file(snakemake.input.regions)
assert not regions.empty, (
f"List of regions in {snakemake.input.regions} is empty, please "
"disable the corresponding renewable technology"
)
# do not pull up, set_index does not work if geo dataframe is empty
regions = regions.set_index("name").rename_axis("bus")
buses = regions.index buses = regions.index
excluder = atlite.ExclusionContainer(crs=3035, res=100) res = config.get("excluder_resolution", 100)
excluder = atlite.ExclusionContainer(crs=3035, res=res)
if config['natura']: if config["natura"]:
excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True) excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True)
corine = config.get("corine", {}) corine = config.get("corine", {})
if "grid_codes" in corine: if "grid_codes" in corine:
codes = corine["grid_codes"] codes = corine["grid_codes"]
excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035) excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035)
if corine.get("distance", 0.) > 0.: if corine.get("distance", 0.0) > 0.0:
codes = corine["distance_grid_codes"] codes = corine["distance_grid_codes"]
buffer = corine["distance"] buffer = corine["distance"]
excluder.add_raster(snakemake.input.corine, codes=codes, buffer=buffer, crs=3035) excluder.add_raster(
snakemake.input.corine, codes=codes, buffer=buffer, crs=3035
)
if "ship_threshold" in config:
shipping_threshold = (
config["ship_threshold"] * 8760 * 6
) # approximation because 6 years of data which is hourly collected
func = functools.partial(np.less, shipping_threshold)
excluder.add_raster(
snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True
)
if "max_depth" in config: if "max_depth" in config:
# lambda not supported for atlite + multiprocessing # lambda not supported for atlite + multiprocessing
# use named function np.greater with partially frozen argument instead # use named function np.greater with partially frozen argument instead
# and exclude areas where: -max_depth > grid cell depth # and exclude areas where: -max_depth > grid cell depth
func = functools.partial(np.greater,-config['max_depth']) func = functools.partial(np.greater, -config["max_depth"])
excluder.add_raster(snakemake.input.gebco, codes=func, crs=4236, nodata=-1000) excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000)
if 'min_shore_distance' in config: if "min_shore_distance" in config:
buffer = config['min_shore_distance'] buffer = config["min_shore_distance"]
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer) excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer)
if 'max_shore_distance' in config: if "max_shore_distance" in config:
buffer = config['max_shore_distance'] buffer = config["max_shore_distance"]
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer, invert=True) excluder.add_geometry(
snakemake.input.country_shapes, buffer=buffer, invert=True
)
kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress) kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress)
if noprogress: if noprogress:
logger.info('Calculate landuse availabilities...') logger.info("Calculate landuse availabilities...")
start = time.time() start = time.time()
availability = cutout.availabilitymatrix(regions, excluder, **kwargs) availability = cutout.availabilitymatrix(regions, excluder, **kwargs)
duration = time.time() - start duration = time.time() - start
logger.info(f'Completed availability calculation ({duration:2.2f}s)') logger.info(f"Completed availability calculation ({duration:2.2f}s)")
else: else:
availability = cutout.availabilitymatrix(regions, excluder, **kwargs) availability = cutout.availabilitymatrix(regions, excluder, **kwargs)
@ -267,35 +292,41 @@ if __name__ == '__main__':
availability.loc[availability_MDUA.coords] = availability_MDUA availability.loc[availability_MDUA.coords] = availability_MDUA
area = cutout.grid.to_crs(3035).area / 1e6 area = cutout.grid.to_crs(3035).area / 1e6
area = xr.DataArray(area.values.reshape(cutout.shape), area = xr.DataArray(
[cutout.coords['y'], cutout.coords['x']]) area.values.reshape(cutout.shape), [cutout.coords["y"], cutout.coords["x"]]
)
potential = capacity_per_sqkm * availability.sum('bus') * area potential = capacity_per_sqkm * availability.sum("bus") * area
func = getattr(cutout, resource.pop('method')) func = getattr(cutout, resource.pop("method"))
resource['dask_kwargs'] = {'num_workers': nprocesses} resource["dask_kwargs"] = {"scheduler": client}
capacity_factor = correction_factor * func(capacity_factor=True, **resource) capacity_factor = correction_factor * func(capacity_factor=True, **resource)
layout = capacity_factor * area * capacity_per_sqkm layout = capacity_factor * area * capacity_per_sqkm
profile, capacities = func(matrix=availability.stack(spatial=['y','x']), profile, capacities = func(
layout=layout, index=buses, matrix=availability.stack(spatial=["y", "x"]),
per_unit=True, return_capacity=True, **resource) layout=layout,
index=buses,
per_unit=True,
return_capacity=True,
**resource,
)
logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')") logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')")
if p_nom_max_meth == 'simple': if p_nom_max_meth == "simple":
p_nom_max = capacity_per_sqkm * availability @ area p_nom_max = capacity_per_sqkm * availability @ area
elif p_nom_max_meth == 'conservative': elif p_nom_max_meth == "conservative":
max_cap_factor = capacity_factor.where(availability!=0).max(['x', 'y']) max_cap_factor = capacity_factor.where(availability != 0).max(["x", "y"])
p_nom_max = capacities / max_cap_factor p_nom_max = capacities / max_cap_factor
else: else:
raise AssertionError('Config key `potential` should be one of "simple" ' raise AssertionError(
f'(default) or "conservative", not "{p_nom_max_meth}"') 'Config key `potential` should be one of "simple" '
f'(default) or "conservative", not "{p_nom_max_meth}"'
)
logger.info("Calculate average distances.")
layoutmatrix = (layout * availability).stack(spatial=["y", "x"])
coords = cutout.grid[["x", "y"]]
logger.info('Calculate average distances.') bus_coords = regions[["x", "y"]]
layoutmatrix = (layout * availability).stack(spatial=['y','x'])
coords = cutout.grid[['x', 'y']]
bus_coords = regions[['x', 'y']]
average_distance = [] average_distance = []
centre_of_mass = [] centre_of_mass = []
@ -304,39 +335,45 @@ if __name__ == '__main__':
nz_b = row != 0 nz_b = row != 0
row = row[nz_b] row = row[nz_b]
co = coords[nz_b] co = coords[nz_b]
distances = haversine(bus_coords.loc[bus], co) distances = haversine(bus_coords.loc[bus], co)
average_distance.append((distances * (row / row.sum())).sum()) average_distance.append((distances * (row / row.sum())).sum())
centre_of_mass.append(co.values.T @ (row / row.sum())) centre_of_mass.append(co.values.T @ (row / row.sum()))
average_distance = xr.DataArray(average_distance, [buses]) average_distance = xr.DataArray(average_distance, [buses])
centre_of_mass = xr.DataArray(centre_of_mass, [buses, ('spatial', ['x', 'y'])]) centre_of_mass = xr.DataArray(centre_of_mass, [buses, ("spatial", ["x", "y"])])
ds = xr.merge([(correction_factor * profile).rename('profile'),
capacities.rename('weight'),
p_nom_max.rename('p_nom_max'),
potential.rename('potential'),
average_distance.rename('average_distance')])
ds = xr.merge(
[
(correction_factor * profile).rename("profile"),
capacities.rename("weight"),
p_nom_max.rename("p_nom_max"),
potential.rename("potential"),
average_distance.rename("average_distance"),
]
)
if snakemake.wildcards.technology.startswith("offwind"): if snakemake.wildcards.technology.startswith("offwind"):
logger.info('Calculate underwater fraction of connections.') logger.info("Calculate underwater fraction of connections.")
offshore_shape = gpd.read_file(snakemake.input['offshore_shapes']).unary_union offshore_shape = gpd.read_file(snakemake.input["offshore_shapes"]).unary_union
underwater_fraction = [] underwater_fraction = []
for bus in buses: for bus in buses:
p = centre_of_mass.sel(bus=bus).data p = centre_of_mass.sel(bus=bus).data
line = LineString([p, regions.loc[bus, ['x', 'y']]]) line = LineString([p, regions.loc[bus, ["x", "y"]]])
frac = line.intersection(offshore_shape).length/line.length frac = line.intersection(offshore_shape).length / line.length
underwater_fraction.append(frac) underwater_fraction.append(frac)
ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses]) ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses])
# select only buses with some capacity and minimal capacity factor # select only buses with some capacity and minimal capacity factor
ds = ds.sel(bus=((ds['profile'].mean('time') > config.get('min_p_max_pu', 0.)) & ds = ds.sel(
(ds['p_nom_max'] > config.get('min_p_nom_max', 0.)))) bus=(
(ds["profile"].mean("time") > config.get("min_p_max_pu", 0.0))
& (ds["p_nom_max"] > config.get("min_p_nom_max", 0.0))
)
)
if 'clip_p_max_pu' in config: if "clip_p_max_pu" in config:
min_p_max_pu = config['clip_p_max_pu'] min_p_max_pu = config["clip_p_max_pu"]
ds['profile'] = ds['profile'].where(ds['profile'] >= min_p_max_pu, 0) ds["profile"] = ds["profile"].where(ds["profile"] >= min_p_max_pu, 0)
ds.to_netcdf(snakemake.output.profile) ds.to_netcdf(snakemake.output.profile)

View File

@ -1,9 +1,12 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
""" """
Creates GIS shape files of the countries, exclusive economic zones and `NUTS3 <https://en.wikipedia.org/wiki/Nomenclature_of_Territorial_Units_for_Statistics>`_ areas. Creates GIS shape files of the countries, exclusive economic zones and `NUTS3 <
https://en.wikipedia.org/wiki/Nomenclature_of_Territorial_Units_for_Statistics>
`_ areas.
Relevant Settings Relevant Settings
----------------- -----------------
@ -64,23 +67,20 @@ Outputs
Description Description
----------- -----------
""" """
import logging import logging
from _helpers import configure_logging
import os
import numpy as np
from operator import attrgetter
from functools import reduce from functools import reduce
from itertools import takewhile from itertools import takewhile
from operator import attrgetter
import pandas as pd
import geopandas as gpd import geopandas as gpd
import numpy as np
import pandas as pd
import pycountry as pyc
from _helpers import configure_logging
from shapely.geometry import MultiPolygon, Polygon from shapely.geometry import MultiPolygon, Polygon
from shapely.ops import unary_union from shapely.ops import unary_union
import pycountry as pyc
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -95,140 +95,187 @@ def _get_country(target, **keys):
def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True): def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
if isinstance(polys, MultiPolygon): if isinstance(polys, MultiPolygon):
polys = sorted(polys.geoms, key=attrgetter('area'), reverse=True) polys = sorted(polys.geoms, key=attrgetter("area"), reverse=True)
mainpoly = polys[0] mainpoly = polys[0]
mainlength = np.sqrt(mainpoly.area/(2.*np.pi)) mainlength = np.sqrt(mainpoly.area / (2.0 * np.pi))
if mainpoly.area > minarea: if mainpoly.area > minarea:
polys = MultiPolygon([p polys = MultiPolygon(
for p in takewhile(lambda p: p.area > minarea, polys) [
if not filterremote or (mainpoly.distance(p) < mainlength)]) p
for p in takewhile(lambda p: p.area > minarea, polys)
if not filterremote or (mainpoly.distance(p) < mainlength)
]
)
else: else:
polys = mainpoly polys = mainpoly
return polys.simplify(tolerance=tolerance) return polys.simplify(tolerance=tolerance)
def countries(naturalearth, country_list): def countries(naturalearth, country_list):
if 'RS' in country_list: country_list.append('KV') if "RS" in country_list:
country_list.append("KV")
df = gpd.read_file(naturalearth) df = gpd.read_file(naturalearth)
# Names are a hassle in naturalearth, try several fields # Names are a hassle in naturalearth, try several fields
fieldnames = (df[x].where(lambda s: s!='-99') for x in ('ISO_A2', 'WB_A2', 'ADM0_A3')) fieldnames = (
df['name'] = reduce(lambda x,y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2] df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3")
)
df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2]
df = df.loc[df.name.isin(country_list) & ((df['scalerank'] == 0) | (df['scalerank'] == 5))] df = df.loc[
s = df.set_index('name')['geometry'].map(_simplify_polys) df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5))
if 'RS' in country_list: s['RS'] = s['RS'].union(s.pop('KV')) ]
s = df.set_index("name")["geometry"].map(_simplify_polys)
if "RS" in country_list:
s["RS"] = s["RS"].union(s.pop("KV"))
# cleanup shape union
s["RS"] = Polygon(s["RS"].exterior.coords)
return s return s
def eez(country_shapes, eez, country_list): def eez(country_shapes, eez, country_list):
df = gpd.read_file(eez) df = gpd.read_file(eez)
df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in country_list])] df = df.loc[
df['name'] = df['ISO_3digit'].map(lambda c: _get_country('alpha_2', alpha_3=c)) df["ISO_3digit"].isin(
s = df.set_index('name').geometry.map(lambda s: _simplify_polys(s, filterremote=False)) [_get_country("alpha_3", alpha_2=c) for c in country_list]
s = gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3}) )
]
df["name"] = df["ISO_3digit"].map(lambda c: _get_country("alpha_2", alpha_3=c))
s = df.set_index("name").geometry.map(
lambda s: _simplify_polys(s, filterremote=False)
)
s = gpd.GeoSeries(
{k: v for k, v in s.items() if v.distance(country_shapes[k]) < 1e-3}
)
s = s.to_frame("geometry")
s.index.name = "name" s.index.name = "name"
return s return s
def country_cover(country_shapes, eez_shapes=None): def country_cover(country_shapes, eez_shapes=None):
shapes = list(country_shapes) shapes = country_shapes
if eez_shapes is not None: if eez_shapes is not None:
shapes += list(eez_shapes) shapes = pd.concat([shapes, eez_shapes])
europe_shape = unary_union(shapes) europe_shape = unary_union(shapes)
if isinstance(europe_shape, MultiPolygon): if isinstance(europe_shape, MultiPolygon):
europe_shape = max(europe_shape, key=attrgetter('area')) europe_shape = max(europe_shape, key=attrgetter("area"))
return Polygon(shell=europe_shape.exterior) return Polygon(shell=europe_shape.exterior)
def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp): def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp):
df = gpd.read_file(nuts3) df = gpd.read_file(nuts3)
df = df.loc[df['STAT_LEVL_'] == 3] df = df.loc[df["STAT_LEVL_"] == 3]
df['geometry'] = df['geometry'].map(_simplify_polys) df["geometry"] = df["geometry"].map(_simplify_polys)
df = df.rename(columns={'NUTS_ID': 'id'})[['id', 'geometry']].set_index('id') df = df.rename(columns={"NUTS_ID": "id"})[["id", "geometry"]].set_index("id")
pop = pd.read_table(nuts3pop, na_values=[':'], delimiter=' ?\t', engine='python') pop = pd.read_table(nuts3pop, na_values=[":"], delimiter=" ?\t", engine="python")
pop = (pop pop = (
.set_index(pd.MultiIndex.from_tuples(pop.pop('unit,geo\\time').str.split(','))).loc['THS'] pop.set_index(
.applymap(lambda x: pd.to_numeric(x, errors='coerce')) pd.MultiIndex.from_tuples(pop.pop("unit,geo\\time").str.split(","))
.fillna(method='bfill', axis=1))['2014'] )
.loc["THS"]
.applymap(lambda x: pd.to_numeric(x, errors="coerce"))
.fillna(method="bfill", axis=1)
)["2014"]
gdp = pd.read_table(nuts3gdp, na_values=[':'], delimiter=' ?\t', engine='python') gdp = pd.read_table(nuts3gdp, na_values=[":"], delimiter=" ?\t", engine="python")
gdp = (gdp gdp = (
.set_index(pd.MultiIndex.from_tuples(gdp.pop('unit,geo\\time').str.split(','))).loc['EUR_HAB'] gdp.set_index(
.applymap(lambda x: pd.to_numeric(x, errors='coerce')) pd.MultiIndex.from_tuples(gdp.pop("unit,geo\\time").str.split(","))
.fillna(method='bfill', axis=1))['2014'] )
.loc["EUR_HAB"]
.applymap(lambda x: pd.to_numeric(x, errors="coerce"))
.fillna(method="bfill", axis=1)
)["2014"]
cantons = pd.read_csv(ch_cantons) cantons = pd.read_csv(ch_cantons)
cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS'] cantons = cantons.set_index(cantons["HASC"].str[3:])["NUTS"]
cantons = cantons.str.pad(5, side='right', fillchar='0') cantons = cantons.str.pad(5, side="right", fillchar="0")
swiss = pd.read_excel(ch_popgdp, skiprows=3, index_col=0) swiss = pd.read_excel(ch_popgdp, skiprows=3, index_col=0)
swiss.columns = swiss.columns.to_series().map(cantons) swiss.columns = swiss.columns.to_series().map(cantons)
swiss_pop = pd.to_numeric(swiss.loc['Residents in 1000', 'CH040':]) swiss_pop = pd.to_numeric(swiss.loc["Residents in 1000", "CH040":])
pop = pd.concat([pop, swiss_pop]) pop = pd.concat([pop, swiss_pop])
swiss_gdp = pd.to_numeric(swiss.loc['Gross domestic product per capita in Swiss francs', 'CH040':]) swiss_gdp = pd.to_numeric(
swiss.loc["Gross domestic product per capita in Swiss francs", "CH040":]
)
gdp = pd.concat([gdp, swiss_gdp]) gdp = pd.concat([gdp, swiss_gdp])
df = df.join(pd.DataFrame(dict(pop=pop, gdp=gdp))) df = df.join(pd.DataFrame(dict(pop=pop, gdp=gdp)))
df['country'] = df.index.to_series().str[:2].replace(dict(UK='GB', EL='GR')) df["country"] = df.index.to_series().str[:2].replace(dict(UK="GB", EL="GR"))
excludenuts = pd.Index(('FRA10', 'FRA20', 'FRA30', 'FRA40', 'FRA50', excludenuts = pd.Index(
'PT200', 'PT300', (
'ES707', 'ES703', 'ES704','ES705', 'ES706', 'ES708', 'ES709', "FRA10",
'FI2', 'FR9')) "FRA20",
excludecountry = pd.Index(('MT', 'TR', 'LI', 'IS', 'CY', 'KV')) "FRA30",
"FRA40",
"FRA50",
"PT200",
"PT300",
"ES707",
"ES703",
"ES704",
"ES705",
"ES706",
"ES708",
"ES709",
"FI2",
"FR9",
)
)
excludecountry = pd.Index(("MT", "TR", "LI", "IS", "CY", "KV"))
df = df.loc[df.index.difference(excludenuts)] df = df.loc[df.index.difference(excludenuts)]
df = df.loc[~df.country.isin(excludecountry)] df = df.loc[~df.country.isin(excludecountry)]
manual = gpd.GeoDataFrame( manual = gpd.GeoDataFrame(
[['BA1', 'BA', 3871.], [["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]],
['RS1', 'RS', 7210.], columns=["NUTS_ID", "country", "pop"],
['AL1', 'AL', 2893.]], ).set_index("NUTS_ID")
columns=['NUTS_ID', 'country', 'pop'] manual["geometry"] = manual["country"].map(country_shapes)
).set_index('NUTS_ID')
manual['geometry'] = manual['country'].map(country_shapes)
manual = manual.dropna() manual = manual.dropna()
df = pd.concat([df, manual], sort=False) df = pd.concat([df, manual], sort=False)
df.loc['ME000', 'pop'] = 650. df.loc["ME000", "pop"] = 650.0
return df return df
def save_to_geojson(df, fn):
if os.path.exists(fn):
os.unlink(fn)
if not isinstance(df, gpd.GeoDataFrame):
df = gpd.GeoDataFrame(dict(geometry=df))
df = df.reset_index()
schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'}
df.to_file(fn, driver='GeoJSON', schema=schema)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_shapes')
snakemake = mock_snakemake("build_shapes")
configure_logging(snakemake) configure_logging(snakemake)
country_shapes = countries(snakemake.input.naturalearth, snakemake.config['countries']) country_shapes = countries(
save_to_geojson(country_shapes, snakemake.output.country_shapes) snakemake.input.naturalearth, snakemake.config["countries"]
)
country_shapes.reset_index().to_file(snakemake.output.country_shapes)
offshore_shapes = eez(country_shapes, snakemake.input.eez, snakemake.config['countries']) offshore_shapes = eez(
save_to_geojson(offshore_shapes, snakemake.output.offshore_shapes) country_shapes, snakemake.input.eez, snakemake.config["countries"]
)
offshore_shapes.reset_index().to_file(snakemake.output.offshore_shapes)
europe_shape = country_cover(country_shapes, offshore_shapes) europe_shape = gpd.GeoDataFrame(
save_to_geojson(gpd.GeoSeries(europe_shape), snakemake.output.europe_shape) geometry=[country_cover(country_shapes, offshore_shapes.geometry)]
)
europe_shape.reset_index().to_file(snakemake.output.europe_shape)
nuts3_shapes = nuts3(country_shapes, snakemake.input.nuts3, snakemake.input.nuts3pop, nuts3_shapes = nuts3(
snakemake.input.nuts3gdp, snakemake.input.ch_cantons, snakemake.input.ch_popgdp) country_shapes,
snakemake.input.nuts3,
save_to_geojson(nuts3_shapes, snakemake.output.nuts3_shapes) snakemake.input.nuts3pop,
snakemake.input.nuts3gdp,
snakemake.input.ch_cantons,
snakemake.input.ch_popgdp,
)
nuts3_shapes.reset_index().to_file(snakemake.output.nuts3_shapes)

View File

@ -0,0 +1,68 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Transforms the global ship density data from
https://datacatalog.worldbank.org/search/dataset/0037580/Global-Shipping-
Traffic-Density to the size of the considered cutout. The global ship density
raster is later used for the exclusion when calculating the offshore
potentials.
Relevant Settings
-----------------
.. code:: yaml
renewable:
{technology}:
cutout:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`renewable_cf`
Inputs
------
- ``data/bundle/shipdensity/shipdensity_global.zip``: `Global ship density from <https://datacatalog.worldbank.org/search/dataset/0037580/Global-Shipping-Traffic-Density>`.
Outputs
-------
- ``resources/europe_shipdensity_raster.nc``: Reduced version of `Global ship density from <https://datacatalog.worldbank.org/search/dataset/0037580/` to reduce computation time.
Description
-----------
"""
import logging
import os
import zipfile
import xarray as xr
from _helpers import configure_logging
from build_natura_raster import determine_cutout_xXyY
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_ship_raster")
configure_logging(snakemake)
cutouts = snakemake.input.cutouts
xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts))
with zipfile.ZipFile(snakemake.input.ship_density) as zip_f:
zip_f.extract("shipdensity_global.tif")
with xr.open_rasterio("shipdensity_global.tif") as ship_density:
ship_density = ship_density.drop(["band"]).sel(
x=slice(min(xs), max(Xs)), y=slice(max(Ys), min(ys))
)
ship_density.to_netcdf(snakemake.output[0])
os.remove("shipdensity_global.tif")

View File

@ -1,21 +1,23 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# coding: utf-8 # coding: utf-8
""" """
Creates networks clustered to ``{cluster}`` number of zones with aggregated buses, generators and transmission corridors. Creates networks clustered to ``{cluster}`` number of zones with aggregated
buses, generators and transmission corridors.
Relevant Settings Relevant Settings
----------------- -----------------
.. code:: yaml .. code:: yaml
focus_weights: clustering:
cluster_network:
aggregation_strategies:
renewable: (keys) focus_weights:
{technology}:
potential:
solving: solving:
solver: solver:
@ -118,30 +120,28 @@ Exemplary unsolved network clustered to 37 nodes:
.. image:: ../img/elec_s_37.png .. image:: ../img/elec_s_37.png
:scale: 40 % :scale: 40 %
:align: center :align: center
""" """
import logging import logging
from _helpers import configure_logging, update_p_nom_max import warnings
import pypsa
import os
import shapely
import pandas as pd
import numpy as np
import geopandas as gpd
import pyomo.environ as po
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce from functools import reduce
from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering, import geopandas as gpd
_make_consense, get_clustering_from_busmap) import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyomo.environ as po
import pypsa
import seaborn as sns
from _helpers import configure_logging, get_aggregation_strategies, update_p_nom_max
from pypsa.networkclustering import (
busmap_by_greedy_modularity,
busmap_by_hac,
busmap_by_kmeans,
get_clustering_from_busmap,
)
import warnings warnings.filterwarnings(action="ignore", category=UserWarning)
warnings.filterwarnings(action='ignore', category=UserWarning)
from add_electricity import load_costs from add_electricity import load_costs
@ -150,19 +150,21 @@ idx = pd.IndexSlice
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def normed(x): return (x/x.sum()).fillna(0.) def normed(x):
return (x / x.sum()).fillna(0.0)
def weighting_for_country(n, x): def weighting_for_country(n, x):
conv_carriers = {'OCGT','CCGT','PHS', 'hydro'} conv_carriers = {"OCGT", "CCGT", "PHS", "hydro"}
gen = (n gen = n.generators.loc[n.generators.carrier.isin(conv_carriers)].groupby(
.generators.loc[n.generators.carrier.isin(conv_carriers)] "bus"
.groupby('bus').p_nom.sum() ).p_nom.sum().reindex(n.buses.index, fill_value=0.0) + n.storage_units.loc[
.reindex(n.buses.index, fill_value=0.) + n.storage_units.carrier.isin(conv_carriers)
n ].groupby(
.storage_units.loc[n.storage_units.carrier.isin(conv_carriers)] "bus"
.groupby('bus').p_nom.sum() ).p_nom.sum().reindex(
.reindex(n.buses.index, fill_value=0.)) n.buses.index, fill_value=0.0
)
load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum() load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum()
b_i = x.index b_i = x.index
@ -170,150 +172,278 @@ def weighting_for_country(n, x):
l = normed(load.reindex(b_i, fill_value=0)) l = normed(load.reindex(b_i, fill_value=0))
w = g + l w = g + l
return (w * (100. / w.max())).clip(lower=1.).astype(int) return (w * (100.0 / w.max())).clip(lower=1.0).astype(int)
def get_feature_for_hac(n, buses_i=None, feature=None):
if buses_i is None:
buses_i = n.buses.index
if feature is None:
feature = "solar+onwind-time"
carriers = feature.split("-")[0].split("+")
if "offwind" in carriers:
carriers.remove("offwind")
carriers = np.append(
carriers, network.generators.carrier.filter(like="offwind").unique()
)
if feature.split("-")[1] == "cap":
feature_data = pd.DataFrame(index=buses_i, columns=carriers)
for carrier in carriers:
gen_i = n.generators.query("carrier == @carrier").index
attach = (
n.generators_t.p_max_pu[gen_i]
.mean()
.rename(index=n.generators.loc[gen_i].bus)
)
feature_data[carrier] = attach
if feature.split("-")[1] == "time":
feature_data = pd.DataFrame(columns=buses_i)
for carrier in carriers:
gen_i = n.generators.query("carrier == @carrier").index
attach = n.generators_t.p_max_pu[gen_i].rename(
columns=n.generators.loc[gen_i].bus
)
feature_data = pd.concat([feature_data, attach], axis=0)[buses_i]
feature_data = feature_data.T
# timestamp raises error in sklearn >= v1.2:
feature_data.columns = feature_data.columns.astype(str)
feature_data = feature_data.fillna(0)
return feature_data
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"):
"""Determine the number of clusters per country""" """
Determine the number of clusters per country.
"""
L = (n.loads_t.p_set.mean() L = (
.groupby(n.loads.bus).sum() n.loads_t.p_set.mean()
.groupby([n.buses.country, n.buses.sub_network]).sum() .groupby(n.loads.bus)
.pipe(normed)) .sum()
.groupby([n.buses.country, n.buses.sub_network])
.sum()
.pipe(normed)
)
N = n.buses.groupby(['country', 'sub_network']).size() N = n.buses.groupby(["country", "sub_network"]).size()
assert n_clusters >= len(N) and n_clusters <= N.sum(), \ assert (
f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." n_clusters >= len(N) and n_clusters <= N.sum()
), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
if focus_weights is not None: if focus_weights is not None:
total_focus = sum(list(focus_weights.values())) total_focus = sum(list(focus_weights.values()))
assert total_focus <= 1.0, "The sum of focus weights must be less than or equal to 1." assert (
total_focus <= 1.0
), "The sum of focus weights must be less than or equal to 1."
for country, weight in focus_weights.items(): for country, weight in focus_weights.items():
L[country] = weight / len(L[country]) L[country] = weight / len(L[country])
remainder = [c not in focus_weights.keys() for c in L.index.get_level_values('country')] remainder = [
c not in focus_weights.keys() for c in L.index.get_level_values("country")
]
L[remainder] = L.loc[remainder].pipe(normed) * (1 - total_focus) L[remainder] = L.loc[remainder].pipe(normed) * (1 - total_focus)
logger.warning('Using custom focus weights for determining number of clusters.') logger.warning("Using custom focus weights for determining number of clusters.")
assert np.isclose(L.sum(), 1.0, rtol=1e-3), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." assert np.isclose(
L.sum(), 1.0, rtol=1e-3
), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
m = po.ConcreteModel() m = po.ConcreteModel()
def n_bounds(model, *n_id): def n_bounds(model, *n_id):
return (1, N[n_id]) return (1, N[n_id])
m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers) m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers)
m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters)) m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters))
m.objective = po.Objective(expr=sum((m.n[i] - L.loc[i]*n_clusters)**2 for i in L.index), m.objective = po.Objective(
sense=po.minimize) expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index),
sense=po.minimize,
)
opt = po.SolverFactory(solver_name) opt = po.SolverFactory(solver_name)
if not opt.has_capability('quadratic_objective'): if not opt.has_capability("quadratic_objective"):
logger.warning(f'The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`.') logger.warning(
opt = po.SolverFactory('ipopt') f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`."
)
opt = po.SolverFactory("ipopt")
results = opt.solve(m) results = opt.solve(m)
assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}" assert (
results["Solver"][0]["Status"] == "ok"
), f"Solver returned non-optimally: {results}"
return pd.Series(m.n.get_values(), index=L.index).round().astype(int) return pd.Series(m.n.get_values(), index=L.index).round().astype(int)
def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds): def busmap_for_n_clusters(
n,
n_clusters,
solver_name,
focus_weights=None,
algorithm="kmeans",
feature=None,
**algorithm_kwds,
):
if algorithm == "kmeans": if algorithm == "kmeans":
algorithm_kwds.setdefault('n_init', 1000) algorithm_kwds.setdefault("n_init", 1000)
algorithm_kwds.setdefault('max_iter', 30000) algorithm_kwds.setdefault("max_iter", 30000)
algorithm_kwds.setdefault('tol', 1e-6) algorithm_kwds.setdefault("tol", 1e-6)
algorithm_kwds.setdefault('random_state', 0) algorithm_kwds.setdefault("random_state", 0)
def fix_country_assignment_for_hac(n):
from scipy.sparse import csgraph
# overwrite country of nodes that are disconnected from their country-topology
for country in n.buses.country.unique():
m = n[n.buses.country == country].copy()
_, labels = csgraph.connected_components(
m.adjacency_matrix(), directed=False
)
component = pd.Series(labels, index=m.buses.index)
component_sizes = component.value_counts()
if len(component_sizes) > 1:
disconnected_bus = component[
component == component_sizes.index[-1]
].index[0]
neighbor_bus = n.lines.query(
"bus0 == @disconnected_bus or bus1 == @disconnected_bus"
).iloc[0][["bus0", "bus1"]]
new_country = list(
set(n.buses.loc[neighbor_bus].country) - set([country])
)[0]
logger.info(
f"overwriting country `{country}` of bus `{disconnected_bus}` "
f"to new country `{new_country}`, because it is disconnected "
"from its initial inter-country transmission grid."
)
n.buses.at[disconnected_bus, "country"] = new_country
return n
if algorithm == "hac":
feature = get_feature_for_hac(n, buses_i=n.buses.index, feature=feature)
n = fix_country_assignment_for_hac(n)
if (algorithm != "hac") and (feature is not None):
logger.warning(
f"Keyword argument feature is only valid for algorithm `hac`. "
f"Given feature `{feature}` will be ignored."
)
n.determine_network_topology() n.determine_network_topology()
n_clusters = distribute_clusters(n, n_clusters, focus_weights=focus_weights, solver_name=solver_name) n_clusters = distribute_clusters(
n, n_clusters, focus_weights=focus_weights, solver_name=solver_name
def reduce_network(n, buses): )
nr = pypsa.Network()
nr.import_components_from_dataframe(buses, "Bus")
nr.import_components_from_dataframe(n.lines.loc[n.lines.bus0.isin(buses.index) & n.lines.bus1.isin(buses.index)], "Line")
return nr
def busmap_for_country(x): def busmap_for_country(x):
prefix = x.name[0] + x.name[1] + ' ' prefix = x.name[0] + x.name[1] + " "
logger.debug(f"Determining busmap for country {prefix[:-1]}") logger.debug(f"Determining busmap for country {prefix[:-1]}")
if len(x) == 1: if len(x) == 1:
return pd.Series(prefix + '0', index=x.index) return pd.Series(prefix + "0", index=x.index)
weight = weighting_for_country(n, x) weight = weighting_for_country(n, x)
if algorithm == "kmeans": if algorithm == "kmeans":
return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds) return prefix + busmap_by_kmeans(
elif algorithm == "spectral": n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds
return prefix + busmap_by_spectral_clustering(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) )
elif algorithm == "louvain": elif algorithm == "hac":
return prefix + busmap_by_louvain(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) return prefix + busmap_by_hac(
n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index]
)
elif algorithm == "modularity":
return prefix + busmap_by_greedy_modularity(
n, n_clusters[x.name], buses_i=x.index
)
else: else:
raise ValueError(f"`algorithm` must be one of 'kmeans', 'spectral' or 'louvain'. Is {algorithm}.") raise ValueError(
f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}."
)
return (n.buses.groupby(['country', 'sub_network'], group_keys=False) return (
.apply(busmap_for_country).squeeze().rename('busmap')) n.buses.groupby(["country", "sub_network"], group_keys=False)
.apply(busmap_for_country)
.squeeze()
.rename("busmap")
)
def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None, def clustering_for_n_clusters(
line_length_factor=1.25, potential_mode='simple', solver_name="cbc", n,
algorithm="kmeans", extended_link_costs=0, focus_weights=None): n_clusters,
custom_busmap=False,
aggregate_carriers=None,
line_length_factor=1.25,
aggregation_strategies=dict(),
solver_name="cbc",
algorithm="hac",
feature=None,
extended_link_costs=0,
focus_weights=None,
):
if potential_mode == 'simple': bus_strategies, generator_strategies = get_aggregation_strategies(
p_nom_max_strategy = pd.Series.sum aggregation_strategies
elif potential_mode == 'conservative': )
p_nom_max_strategy = pd.Series.min
else:
raise AttributeError(f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'")
if not isinstance(custom_busmap, pd.Series): if not isinstance(custom_busmap, pd.Series):
busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm) busmap = busmap_for_n_clusters(
n, n_clusters, solver_name, focus_weights, algorithm, feature
)
else: else:
busmap = custom_busmap busmap = custom_busmap
clustering = get_clustering_from_busmap( clustering = get_clustering_from_busmap(
n, busmap, n,
bus_strategies=dict(country=_make_consense("Bus", "country")), busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True, aggregate_generators_weighted=True,
aggregate_generators_carriers=aggregate_carriers, aggregate_generators_carriers=aggregate_carriers,
aggregate_one_ports=["Load", "StorageUnit"], aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=line_length_factor, line_length_factor=line_length_factor,
generator_strategies={'p_nom_max': p_nom_max_strategy, 'p_nom_min': pd.Series.sum}, generator_strategies=generator_strategies,
scale_link_capital_costs=False) scale_link_capital_costs=False,
)
if not n.links.empty: if not n.links.empty:
nc = clustering.network nc = clustering.network
nc.links['underwater_fraction'] = (n.links.eval('underwater_fraction * length') nc.links["underwater_fraction"] = (
.div(nc.links.length).dropna()) n.links.eval("underwater_fraction * length").div(nc.links.length).dropna()
nc.links['capital_cost'] = (nc.links['capital_cost'] )
.add((nc.links.length - n.links.length) nc.links["capital_cost"] = nc.links["capital_cost"].add(
.clip(lower=0).mul(extended_link_costs), (nc.links.length - n.links.length).clip(lower=0).mul(extended_link_costs),
fill_value=0)) fill_value=0,
)
return clustering return clustering
def save_to_geojson(s, fn):
if os.path.exists(fn):
os.unlink(fn)
df = s.reset_index()
schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'}
df.to_file(fn, driver='GeoJSON', schema=schema)
def cluster_regions(busmaps, input=None, output=None): def cluster_regions(busmaps, input=None, output=None):
busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0])
for which in ('regions_onshore', 'regions_offshore'): for which in ("regions_onshore", "regions_offshore"):
regions = gpd.read_file(getattr(input, which)).set_index('name') regions = gpd.read_file(getattr(input, which))
geom_c = regions.geometry.groupby(busmap).apply(shapely.ops.unary_union) regions = regions.reindex(columns=["name", "geometry"]).set_index("name")
regions_c = gpd.GeoDataFrame(dict(geometry=geom_c)) regions_c = regions.dissolve(busmap)
regions_c.index.name = 'name' regions_c.index.name = "name"
save_to_geojson(regions_c, getattr(output, which)) regions_c = regions_c.reset_index()
regions_c.to_file(getattr(output, which))
def plot_busmap_for_n_clusters(n, n_clusters, fn=None): def plot_busmap_for_n_clusters(n, n_clusters, fn=None):
@ -322,69 +452,112 @@ def plot_busmap_for_n_clusters(n, n_clusters, fn=None):
cr = sns.color_palette("hls", len(cs)) cr = sns.color_palette("hls", len(cs))
n.plot(bus_colors=busmap.map(dict(zip(cs, cr)))) n.plot(bus_colors=busmap.map(dict(zip(cs, cr))))
if fn is not None: if fn is not None:
plt.savefig(fn, bbox_inches='tight') plt.savefig(fn, bbox_inches="tight")
del cs, cr del cs, cr
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('cluster_network', network='elec', simpl='', clusters='5')
snakemake = mock_snakemake("cluster_network", simpl="", clusters="5")
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
focus_weights = snakemake.config.get('focus_weights', None) focus_weights = snakemake.config.get("focus_weights", None)
renewable_carriers = pd.Index([tech renewable_carriers = pd.Index(
for tech in n.generators.carrier.unique() [
if tech in snakemake.config['renewable']]) tech
for tech in n.generators.carrier.unique()
if tech in snakemake.config["renewable"]
]
)
if snakemake.wildcards.clusters.endswith('m'): exclude_carriers = snakemake.config["clustering"]["cluster_network"].get(
"exclude_carriers", []
)
aggregate_carriers = set(n.generators.carrier) - set(exclude_carriers)
if snakemake.wildcards.clusters.endswith("m"):
n_clusters = int(snakemake.wildcards.clusters[:-1]) n_clusters = int(snakemake.wildcards.clusters[:-1])
aggregate_carriers = pd.Index(n.generators.carrier.unique()).difference(renewable_carriers) aggregate_carriers = snakemake.config["electricity"].get(
elif snakemake.wildcards.clusters == 'all': "conventional_carriers"
)
elif snakemake.wildcards.clusters == "all":
n_clusters = len(n.buses) n_clusters = len(n.buses)
aggregate_carriers = None # All
else: else:
n_clusters = int(snakemake.wildcards.clusters) n_clusters = int(snakemake.wildcards.clusters)
aggregate_carriers = None # All
if n_clusters == len(n.buses): if n_clusters == len(n.buses):
# Fast-path if no clustering is necessary # Fast-path if no clustering is necessary
busmap = n.buses.index.to_series() busmap = n.buses.index.to_series()
linemap = n.lines.index.to_series() linemap = n.lines.index.to_series()
clustering = pypsa.networkclustering.Clustering(n, busmap, linemap, linemap, pd.Series(dtype='O')) clustering = pypsa.networkclustering.Clustering(
n, busmap, linemap, linemap, pd.Series(dtype="O")
)
else: else:
line_length_factor = snakemake.config['lines']['length_factor'] line_length_factor = snakemake.config["lines"]["length_factor"]
Nyears = n.snapshot_weightings.objective.sum()/8760 Nyears = n.snapshot_weightings.objective.sum() / 8760
hvac_overhead_cost = (load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears) hvac_overhead_cost = load_costs(
.at['HVAC overhead', 'capital_cost']) snakemake.input.tech_costs,
snakemake.config["costs"],
snakemake.config["electricity"],
Nyears,
).at["HVAC overhead", "capital_cost"]
def consense(x): def consense(x):
v = x.iat[0] v = x.iat[0]
assert ((x == v).all() or x.isnull().all()), ( assert (
"The `potential` configuration option must agree for all renewable carriers, for now!" x == v
) ).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!"
return v return v
potential_mode = consense(pd.Series([snakemake.config['renewable'][tech]['potential']
for tech in renewable_carriers])) aggregation_strategies = snakemake.config["clustering"].get(
"aggregation_strategies", {}
)
# translate str entries of aggregation_strategies to pd.Series functions:
aggregation_strategies = {
p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()}
for p in aggregation_strategies.keys()
}
custom_busmap = snakemake.config["enable"].get("custom_busmap", False) custom_busmap = snakemake.config["enable"].get("custom_busmap", False)
if custom_busmap: if custom_busmap:
custom_busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True) custom_busmap = pd.read_csv(
snakemake.input.custom_busmap, index_col=0, squeeze=True
)
custom_busmap.index = custom_busmap.index.astype(str) custom_busmap.index = custom_busmap.index.astype(str)
logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}") logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}")
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers, cluster_config = snakemake.config.get("clustering", {}).get(
line_length_factor, potential_mode, "cluster_network", {}
snakemake.config['solving']['solver']['name'], )
"kmeans", hvac_overhead_cost, focus_weights) clustering = clustering_for_n_clusters(
n,
n_clusters,
custom_busmap,
aggregate_carriers,
line_length_factor,
aggregation_strategies,
snakemake.config["solving"]["solver"]["name"],
cluster_config.get("algorithm", "hac"),
cluster_config.get("feature", "solar+onwind-time"),
hvac_overhead_cost,
focus_weights,
)
update_p_nom_max(n) update_p_nom_max(clustering.network)
clustering.network.meta = dict(
snakemake.config, **dict(wildcards=dict(snakemake.wildcards))
)
clustering.network.export_to_netcdf(snakemake.output.network) clustering.network.export_to_netcdf(snakemake.output.network)
for attr in ('busmap', 'linemap'): #also available: linemap_positive, linemap_negative for attr in (
"busmap",
"linemap",
): # also available: linemap_positive, linemap_negative
getattr(clustering, attr).to_csv(snakemake.output[attr]) getattr(clustering, attr).to_csv(snakemake.output[attr])
cluster_regions((clustering.busmap,), snakemake.input, snakemake.output) cluster_regions((clustering.busmap,), snakemake.input, snakemake.output)

View File

@ -1,4 +1,5 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -11,8 +12,9 @@ Relevant Settings
.. code:: yaml .. code:: yaml
costs: costs:
USD2013_to_EUR2013: year:
discountrate: version:
fill_values:
marginal_cost: marginal_cost:
capital_cost: capital_cost:
@ -32,9 +34,9 @@ Outputs
Description Description
----------- -----------
The following rule can be used to summarize the results in seperate .csv files: The following rule can be used to summarize the results in separate .csv files:
.. code:: .. code:: bash
snakemake results/summaries/elec_s_all_lall_Co2L-3H_all snakemake results/summaries/elec_s_all_lall_Co2L-3H_all
clusters clusters
@ -45,28 +47,26 @@ The following rule can be used to summarize the results in seperate .csv files:
the line volume/cost cap field can be set to one of the following: the line volume/cost cap field can be set to one of the following:
* ``lv1.25`` for a particular line volume extension by 25% * ``lv1.25`` for a particular line volume extension by 25%
* ``lc1.25`` for a line cost extension by 25 % * ``lc1.25`` for a line cost extension by 25 %
* ``lall`` for all evalutated caps * ``lall`` for all evaluated caps
* ``lvall`` for all line volume caps * ``lvall`` for all line volume caps
* ``lcall`` for all line cost caps * ``lcall`` for all line cost caps
Replacing '/summaries/' with '/plots/' creates nice colored maps of the results. Replacing '/summaries/' with '/plots/' creates nice colored maps of the results.
""" """
import logging import logging
from _helpers import configure_logging
import os import os
import pypsa
import pandas as pd
import pandas as pd
import pypsa
from _helpers import configure_logging
from add_electricity import load_costs, update_transmission_costs from add_electricity import load_costs, update_transmission_costs
idx = pd.IndexSlice idx = pd.IndexSlice
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
def _add_indexed_rows(df, raw_index): def _add_indexed_rows(df, raw_index):
@ -78,102 +78,149 @@ def _add_indexed_rows(df, raw_index):
def assign_carriers(n): def assign_carriers(n):
if "carrier" not in n.loads: if "carrier" not in n.loads:
n.loads["carrier"] = "electricity" n.loads["carrier"] = "electricity"
for carrier in ["transport","heat","urban heat"]: for carrier in ["transport", "heat", "urban heat"]:
n.loads.loc[n.loads.index.str.contains(carrier),"carrier"] = carrier n.loads.loc[n.loads.index.str.contains(carrier), "carrier"] = carrier
n.storage_units['carrier'].replace({'hydro': 'hydro+PHS', 'PHS': 'hydro+PHS'}, inplace=True) n.storage_units["carrier"].replace(
{"hydro": "hydro+PHS", "PHS": "hydro+PHS"}, inplace=True
)
if "carrier" not in n.lines: if "carrier" not in n.lines:
n.lines["carrier"] = "AC" n.lines["carrier"] = "AC"
n.lines["carrier"].replace({"AC": "lines"}, inplace=True) n.lines["carrier"].replace({"AC": "lines"}, inplace=True)
if n.links.empty: n.links["carrier"] = pd.Series(dtype=str) if n.links.empty:
n.links["carrier"] = pd.Series(dtype=str)
n.links["carrier"].replace({"DC": "lines"}, inplace=True) n.links["carrier"].replace({"DC": "lines"}, inplace=True)
if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "": if (
n.stores.loc["EU gas Store","carrier"] = "gas Store" "EU gas store" in n.stores.index
and n.stores.loc["EU gas Store", "carrier"] == ""
):
n.stores.loc["EU gas Store", "carrier"] = "gas Store"
def calculate_costs(n, label, costs): def calculate_costs(n, label, costs):
for c in n.iterate_components(
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): n.branch_components | n.controllable_one_port_components ^ {"Load"}
capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] ):
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum() capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum()
# Index tuple(s) indicating the newly to-be-added row(s) # Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([[c.list_name],["capital"],list(capital_costs_grouped.index)]) raw_index = tuple(
[[c.list_name], ["capital"], list(capital_costs_grouped.index)]
)
costs = _add_indexed_rows(costs, raw_index) costs = _add_indexed_rows(costs, raw_index)
costs.loc[idx[raw_index],label] = capital_costs_grouped.values costs.loc[idx[raw_index], label] = capital_costs_grouped.values
if c.name == "Link": if c.name == "Link":
p = c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum() p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
elif c.name == "Line": elif c.name == "Line":
continue continue
elif c.name == "StorageUnit": elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0) p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
p_all[p_all < 0.] = 0. p_all[p_all < 0.0] = 0.0
p = p_all.sum() p = p_all.sum()
else: else:
p = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum() p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
marginal_costs = p*c.df.marginal_cost marginal_costs = p * c.df.marginal_cost
marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum() marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum()
costs = costs.reindex(costs.index.union(pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index]))) costs = costs.reindex(
costs.index.union(
pd.MultiIndex.from_product(
[[c.list_name], ["marginal"], marginal_costs_grouped.index]
)
)
)
costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values costs.loc[
idx[c.list_name, "marginal", list(marginal_costs_grouped.index)], label
] = marginal_costs_grouped.values
return costs return costs
def calculate_curtailment(n, label, curtailment):
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum() def calculate_curtailment(n, label, curtailment):
avail = (
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
.sum()
.groupby(n.generators.carrier)
.sum()
)
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
curtailment[label] = (((avail - used)/avail)*100).round(3) curtailment[label] = (((avail - used) / avail) * 100).round(3)
return curtailment return curtailment
def calculate_energy(n, label, energy): def calculate_energy(n, label, energy):
for c in n.iterate_components(n.one_port_components | n.branch_components):
for c in n.iterate_components(n.one_port_components|n.branch_components): if c.name in {"Generator", "Load", "ShuntImpedance"}:
c_energies = (
if c.name in {'Generator', 'Load', 'ShuntImpedance'}: c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() .sum()
elif c.name in {'StorageUnit', 'Store'}: .multiply(c.df.sign)
c_energies = c.pnl.p.multiply(n.snapshot_weightings.stores,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() .groupby(c.df.carrier)
.sum()
)
elif c.name in {"StorageUnit", "Store"}:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier)
.sum()
)
else: else:
c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings.generators,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum()).groupby(c.df.carrier).sum() c_energies = (
(
-c.pnl.p1.multiply(n.snapshot_weightings.generators, axis=0).sum()
- c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
)
.groupby(c.df.carrier)
.sum()
)
energy = include_in_summary(energy, [c.list_name], label, c_energies) energy = include_in_summary(energy, [c.list_name], label, c_energies)
return energy return energy
def include_in_summary(summary, multiindexprefix, label, item):
def include_in_summary(summary, multiindexprefix, label, item):
# Index tuple(s) indicating the newly to-be-added row(s) # Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([multiindexprefix,list(item.index)]) raw_index = tuple([multiindexprefix, list(item.index)])
summary = _add_indexed_rows(summary, raw_index) summary = _add_indexed_rows(summary, raw_index)
summary.loc[idx[raw_index], label] = item.values summary.loc[idx[raw_index], label] = item.values
return summary return summary
def calculate_capacity(n,label,capacity):
def calculate_capacity(n, label, capacity):
for c in n.iterate_components(n.one_port_components): for c in n.iterate_components(n.one_port_components):
if 'p_nom_opt' in c.df.columns: if "p_nom_opt" in c.df.columns:
c_capacities = abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() c_capacities = (
abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
)
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
elif "e_nom_opt" in c.df.columns:
c_capacities = (
abs(c.df.e_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
)
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
for c in n.iterate_components(n.passive_branch_components): for c in n.iterate_components(n.passive_branch_components):
c_capacities = c.df['s_nom_opt'].groupby(c.df.carrier).sum() c_capacities = c.df["s_nom_opt"].groupby(c.df.carrier).sum()
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
for c in n.iterate_components(n.controllable_branch_components): for c in n.iterate_components(n.controllable_branch_components):
@ -182,16 +229,20 @@ def calculate_capacity(n,label,capacity):
return capacity return capacity
def calculate_supply(n, label, supply):
"""calculate the max dispatch of each component at the buses where the loads are attached"""
load_types = n.loads.carrier.value_counts().index def calculate_supply(n, label, supply):
"""
calculate the max dispatch of each component at the buses where the loads
are attached.
"""
load_types = n.buses.carrier.unique()
for i in load_types: for i in load_types:
buses = n.loads.bus[n.loads.carrier == i].values buses = n.buses.query("carrier == @i").index
bus_map = pd.Series(False,index=n.buses.index) bus_map = pd.Series(False, index=n.buses.index)
bus_map.loc[buses] = True bus_map.loc[buses] = True
@ -202,43 +253,57 @@ def calculate_supply(n, label, supply):
if len(items) == 0 or c.pnl.p.empty: if len(items) == 0 or c.pnl.p.empty:
continue continue
s = c.pnl.p[items].max().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum() s = (
c.pnl.p[items]
.max()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
# Index tuple(s) indicating the newly to-be-added row(s) # Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([[i],[c.list_name],list(s.index)]) raw_index = tuple([[i], [c.list_name], list(s.index)])
supply = _add_indexed_rows(supply, raw_index) supply = _add_indexed_rows(supply, raw_index)
supply.loc[idx[raw_index],label] = s.values supply.loc[idx[raw_index], label] = s.values
for c in n.iterate_components(n.branch_components): for c in n.iterate_components(n.branch_components):
for end in ["0","1"]: for end in ["0", "1"]:
items = c.df.index[c.df["bus" + end].map(bus_map)] items = c.df.index[c.df["bus" + end].map(bus_map)]
if len(items) == 0 or c.pnl["p"+end].empty: if len(items) == 0 or c.pnl["p" + end].empty:
continue continue
#lots of sign compensation for direction and to do maximums # lots of sign compensation for direction and to do maximums
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum() s = (-1) ** (1 - int(end)) * (
(-1) ** int(end) * c.pnl["p" + end][items]
).max().groupby(c.df.loc[items, "carrier"]).sum()
supply = supply.reindex(supply.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) supply = supply.reindex(
supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values supply.index.union(
pd.MultiIndex.from_product([[i], [c.list_name], s.index])
)
)
supply.loc[idx[i, c.list_name, list(s.index)], label] = s.values
return supply return supply
def calculate_supply_energy(n, label, supply_energy): def calculate_supply_energy(n, label, supply_energy):
"""calculate the total dispatch of each component at the buses where the loads are attached""" """
calculate the total dispatch of each component at the buses where the loads
are attached.
"""
load_types = n.loads.carrier.value_counts().index load_types = n.buses.carrier.unique()
for i in load_types: for i in load_types:
buses = n.loads.bus[n.loads.carrier == i].values buses = n.buses.query("carrier == @i").index
bus_map = pd.Series(False,index=n.buses.index) bus_map = pd.Series(False, index=n.buses.index)
bus_map.loc[buses] = True bus_map.loc[buses] = True
@ -249,55 +314,83 @@ def calculate_supply_energy(n, label, supply_energy):
if len(items) == 0 or c.pnl.p.empty: if len(items) == 0 or c.pnl.p.empty:
continue continue
s = c.pnl.p[items].sum().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum() s = (
c.pnl.p[items]
.sum()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
# Index tuple(s) indicating the newly to-be-added row(s) # Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([[i],[c.list_name],list(s.index)]) raw_index = tuple([[i], [c.list_name], list(s.index)])
supply_energy = _add_indexed_rows(supply_energy, raw_index) supply_energy = _add_indexed_rows(supply_energy, raw_index)
supply_energy.loc[idx[raw_index],label] = s.values supply_energy.loc[idx[raw_index], label] = s.values
for c in n.iterate_components(n.branch_components): for c in n.iterate_components(n.branch_components):
for end in ["0","1"]: for end in ["0", "1"]:
items = c.df.index[c.df["bus" + end].map(bus_map)] items = c.df.index[c.df["bus" + end].map(bus_map)]
if len(items) == 0 or c.pnl['p' + end].empty: if len(items) == 0 or c.pnl["p" + end].empty:
continue continue
s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum() s = (-1) * c.pnl["p" + end][items].sum().groupby(
c.df.loc[items, "carrier"]
).sum()
supply_energy = supply_energy.reindex(supply_energy.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) supply_energy = supply_energy.reindex(
supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values supply_energy.index.union(
pd.MultiIndex.from_product([[i], [c.list_name], s.index])
)
)
supply_energy.loc[idx[i, c.list_name, list(s.index)], label] = s.values
return supply_energy return supply_energy
def calculate_metrics(n,label,metrics): def calculate_metrics(n, label, metrics):
metrics = metrics.reindex(
metrics.index.union(
pd.Index(
[
"line_volume",
"line_volume_limit",
"line_volume_AC",
"line_volume_DC",
"line_volume_shadow",
"co2_shadow",
]
)
)
)
metrics = metrics.reindex(metrics.index.union(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]))) metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
n.links.carrier == "DC"
].sum()
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
metrics.at["line_volume", label] = metrics.loc[
["line_volume_AC", "line_volume_DC"], label
].sum()
metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum() if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum() metrics.at["line_volume_limit", label] = n.line_volume_limit
metrics.at["line_volume",label] = metrics.loc[["line_volume_AC","line_volume_DC"],label].sum()
if hasattr(n,"line_volume_limit"): if hasattr(n, "line_volume_limit_dual"):
metrics.at["line_volume_limit",label] = n.line_volume_limit metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
if hasattr(n,"line_volume_limit_dual"):
metrics.at["line_volume_shadow",label] = n.line_volume_limit_dual
if "CO2Limit" in n.global_constraints.index: if "CO2Limit" in n.global_constraints.index:
metrics.at["co2_shadow",label] = n.global_constraints.at["CO2Limit","mu"] metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"]
return metrics return metrics
def calculate_prices(n,label,prices): def calculate_prices(n, label, prices):
bus_type = pd.Series(n.buses.index.str[3:], n.buses.index).replace(
bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity") "", "electricity"
)
prices = prices.reindex(prices.index.union(bus_type.value_counts().index)) prices = prices.reindex(prices.index.union(bus_type.value_counts().index))
@ -307,19 +400,37 @@ def calculate_prices(n,label,prices):
return prices return prices
def calculate_weighted_prices(n,label,weighted_prices): def calculate_weighted_prices(n, label, weighted_prices):
logger.warning("Weighted prices don't include storage units as loads") logger.warning("Weighted prices don't include storage units as loads")
weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"])) weighted_prices = weighted_prices.reindex(
pd.Index(
[
"electricity",
"heat",
"space heat",
"urban heat",
"space urban heat",
"gas",
"H2",
]
)
)
link_loads = {"electricity" : ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"], link_loads = {
"heat" : ["water tanks charger"], "electricity": [
"urban heat" : ["water tanks charger"], "heat pump",
"space heat" : [], "resistive heater",
"space urban heat" : [], "battery charger",
"gas" : ["OCGT","gas boiler","CHP electric","CHP heat"], "H2 Electrolysis",
"H2" : ["Sabatier", "H2 Fuel Cell"]} ],
"heat": ["water tanks charger"],
"urban heat": ["water tanks charger"],
"space heat": [],
"space urban heat": [],
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
"H2": ["Sabatier", "H2 Fuel Cell"],
}
for carrier in link_loads: for carrier in link_loads:
@ -328,64 +439,77 @@ def calculate_weighted_prices(n,label,weighted_prices):
elif carrier[:5] == "space": elif carrier[:5] == "space":
suffix = carrier[5:] suffix = carrier[5:]
else: else:
suffix = " " + carrier suffix = " " + carrier
buses = n.buses.index[n.buses.index.str[2:] == suffix] buses = n.buses.index[n.buses.index.str[2:] == suffix]
if buses.empty: if buses.empty:
continue continue
if carrier in ["H2","gas"]: if carrier in ["H2", "gas"]:
load = pd.DataFrame(index=n.snapshots,columns=buses,data=0.) load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
elif carrier[:5] == "space": elif carrier[:5] == "space":
load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix) load = heat_demand_df[buses.str[:2]].rename(
columns=lambda i: str(i) + suffix
)
else: else:
load = n.loads_t.p_set[buses] load = n.loads_t.p_set[buses]
for tech in link_loads[carrier]: for tech in link_loads[carrier]:
names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech] names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty: if names.empty:
continue continue
load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1) load += (
n.links_t.p0[names]
.groupby(n.links.loc[names, "bus0"], axis=1)
.sum(axis=1)
)
# Add H2 Store when charging # Add H2 Store when charging
if carrier == "H2": if carrier == "H2":
stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1) stores = (
stores[stores > 0.] = 0. n.stores_t.p[buses + " Store"]
.groupby(n.stores.loc[buses + " Store", "bus"], axis=1)
.sum(axis=1)
)
stores[stores > 0.0] = 0.0
load += -stores load += -stores
weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum() weighted_prices.loc[carrier, label] = (
load * n.buses_t.marginal_price[buses]
).sum().sum() / load.sum().sum()
if carrier[:5] == "space": if carrier[:5] == "space":
print(load*n.buses_t.marginal_price[buses]) print(load * n.buses_t.marginal_price[buses])
return weighted_prices return weighted_prices
outputs = ["costs", outputs = [
"curtailment", "costs",
"energy", "curtailment",
"capacity", "energy",
"supply", "capacity",
"supply_energy", "supply",
"prices", "supply_energy",
"weighted_prices", "prices",
"metrics", "weighted_prices",
] "metrics",
]
def make_summaries(networks_dict, paths, config, country='all'): def make_summaries(networks_dict, paths, config, country="all"):
columns = pd.MultiIndex.from_tuples(
columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"]) networks_dict.keys(), names=["simpl", "clusters", "ll", "opts"]
)
dfs = {} dfs = {}
for output in outputs: for output in outputs:
dfs[output] = pd.DataFrame(columns=columns,dtype=float) dfs[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in networks_dict.items(): for label, filename in networks_dict.items():
print(label, filename) print(label, filename)
@ -399,12 +523,12 @@ def make_summaries(networks_dict, paths, config, country='all'):
logger.warning("Skipping {filename}".format(filename=filename)) logger.warning("Skipping {filename}".format(filename=filename))
continue continue
if country != 'all': if country != "all":
n = n[n.buses.country == country] n = n[n.buses.country == country]
Nyears = n.snapshot_weightings.objective.sum() / 8760. Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(paths[0], config['costs'], config['electricity'], Nyears) costs = load_costs(paths[0], config["costs"], config["electricity"], Nyears)
update_transmission_costs(n, costs, simple_hvdc_costs=False) update_transmission_costs(n, costs)
assign_carriers(n) assign_carriers(n)
@ -421,13 +545,24 @@ def to_csv(dfs, dir):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('make_summary', network='elec', simpl='',
clusters='5', ll='copt', opts='Co2L-24H', country='all') snakemake = mock_snakemake(
network_dir = os.path.join('..', 'results', 'networks') "make_summary",
simpl="",
clusters="5",
ll="copt",
opts="Co2L-24H",
country="all",
)
network_dir = os.path.join(
"..", "results", "networks", snakemake.config["run"]["name"]
)
else: else:
network_dir = os.path.join('results', 'networks') network_dir = os.path.join(
"results", "networks", snakemake.config["run"]["name"]
)
configure_logging(snakemake) configure_logging(snakemake)
config = snakemake.config config = snakemake.config
@ -444,14 +579,18 @@ if __name__ == "__main__":
else: else:
ll = [wildcards.ll] ll = [wildcards.ll]
networks_dict = {(simpl,clusters,l,opts) : networks_dict = {
os.path.join(network_dir, f'elec_s{simpl}_' (simpl, clusters, l, opts): os.path.join(
f'{clusters}_ec_l{l}_{opts}.nc') network_dir, f"elec_s{simpl}_" f"{clusters}_ec_l{l}_{opts}.nc"
for simpl in expand_from_wildcard("simpl", config) )
for clusters in expand_from_wildcard("clusters", config) for simpl in expand_from_wildcard("simpl", config)
for l in ll for clusters in expand_from_wildcard("clusters", config)
for opts in expand_from_wildcard("opts", config)} for l in ll
for opts in expand_from_wildcard("opts", config)
}
dfs = make_summaries(networks_dict, snakemake.input, config, country=wildcards.country) dfs = make_summaries(
networks_dict, snakemake.input, config, country=wildcards.country
)
to_csv(dfs, snakemake.output[0]) to_csv(dfs, snakemake.output[0])

View File

@ -1,4 +1,5 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -16,21 +17,24 @@ Outputs
Description Description
----------- -----------
""" """
import logging import logging
from _helpers import (retrieve_snakemake_keys, load_network_for_plots,
aggregate_p, aggregate_costs, configure_logging)
import pandas as pd
import numpy as np
import cartopy.crs as ccrs import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib as mpl import matplotlib as mpl
from matplotlib.patches import Circle, Ellipse import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from _helpers import (
aggregate_costs,
aggregate_p,
configure_logging,
load_network_for_plots,
)
from matplotlib.legend_handler import HandlerPatch from matplotlib.legend_handler import HandlerPatch
from matplotlib.patches import Circle, Ellipse
to_rgba = mpl.colors.colorConverter.to_rgba to_rgba = mpl.colors.colorConverter.to_rgba
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -38,253 +42,368 @@ logger = logging.getLogger(__name__)
def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False):
fig = ax.get_figure() fig = ax.get_figure()
def axes2pt(): def axes2pt():
return np.diff(ax.transData.transform([(0,0), (1,1)]), axis=0)[0] * (72./fig.dpi) return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * (
72.0 / fig.dpi
)
ellipses = [] ellipses = []
if not dont_resize_actively: if not dont_resize_actively:
def update_width_height(event): def update_width_height(event):
dist = axes2pt() dist = axes2pt()
for e, radius in ellipses: e.width, e.height = 2. * radius * dist for e, radius in ellipses:
fig.canvas.mpl_connect('resize_event', update_width_height) e.width, e.height = 2.0 * radius * dist
ax.callbacks.connect('xlim_changed', update_width_height)
ax.callbacks.connect('ylim_changed', update_width_height)
def legend_circle_handler(legend, orig_handle, xdescent, ydescent, fig.canvas.mpl_connect("resize_event", update_width_height)
width, height, fontsize): ax.callbacks.connect("xlim_changed", update_width_height)
w, h = 2. * orig_handle.get_radius() * axes2pt() ax.callbacks.connect("ylim_changed", update_width_height)
e = Ellipse(xy=(0.5*width-0.5*xdescent, 0.5*height-0.5*ydescent), width=w, height=w)
def legend_circle_handler(
legend, orig_handle, xdescent, ydescent, width, height, fontsize
):
w, h = 2.0 * orig_handle.get_radius() * axes2pt()
e = Ellipse(
xy=(0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent),
width=w,
height=w,
)
ellipses.append((e, orig_handle.get_radius())) ellipses.append((e, orig_handle.get_radius()))
return e return e
return {Circle: HandlerPatch(patch_func=legend_circle_handler)} return {Circle: HandlerPatch(patch_func=legend_circle_handler)}
def make_legend_circles_for(sizes, scale=1.0, **kw): def make_legend_circles_for(sizes, scale=1.0, **kw):
return [Circle((0,0), radius=(s/scale)**0.5, **kw) for s in sizes] return [Circle((0, 0), radius=(s / scale) ** 0.5, **kw) for s in sizes]
def set_plot_style(): def set_plot_style():
plt.style.use(['classic', 'seaborn-white', plt.style.use(
{'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6', [
'hatch.color': 'white', "classic",
'patch.linewidth': 0.5, "seaborn-white",
'font.size': 12, {
'legend.fontsize': 'medium', "axes.grid": False,
'lines.linewidth': 1.5, "grid.linestyle": "--",
'pdf.fonttype': 42, "grid.color": "0.6",
}]) "hatch.color": "white",
"patch.linewidth": 0.5,
"font.size": 12,
"legend.fontsize": "medium",
"lines.linewidth": 1.5,
"pdf.fonttype": 42,
},
]
)
def plot_map(n, ax=None, attribute='p_nom', opts={}): def plot_map(n, opts, ax=None, attribute="p_nom"):
if ax is None: if ax is None:
ax = plt.gca() ax = plt.gca()
## DATA ## DATA
line_colors = {'cur': "purple", line_colors = {
'exp': mpl.colors.rgb2hex(to_rgba("red", 0.7), True)} "cur": "purple",
tech_colors = opts['tech_colors'] "exp": mpl.colors.rgb2hex(to_rgba("red", 0.7), True),
}
tech_colors = opts["tech_colors"]
if attribute == 'p_nom': if attribute == "p_nom":
# bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum() # bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum()
bus_sizes = pd.concat((n.generators.query('carrier != "load"').groupby(['bus', 'carrier']).p_nom_opt.sum(), bus_sizes = pd.concat(
n.storage_units.groupby(['bus', 'carrier']).p_nom_opt.sum())) (
n.generators.query('carrier != "load"')
.groupby(["bus", "carrier"])
.p_nom_opt.sum(),
n.storage_units.groupby(["bus", "carrier"]).p_nom_opt.sum(),
)
)
line_widths_exp = n.lines.s_nom_opt line_widths_exp = n.lines.s_nom_opt
line_widths_cur = n.lines.s_nom_min line_widths_cur = n.lines.s_nom_min
link_widths_exp = n.links.p_nom_opt link_widths_exp = n.links.p_nom_opt
link_widths_cur = n.links.p_nom_min link_widths_cur = n.links.p_nom_min
else: else:
raise 'plotting of {} has not been implemented yet'.format(attribute) raise "plotting of {} has not been implemented yet".format(attribute)
line_colors_with_alpha = \
((line_widths_cur / n.lines.s_nom > 1e-3)
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}))
link_colors_with_alpha = \
((link_widths_cur / n.links.p_nom > 1e-3)
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}))
line_colors_with_alpha = (line_widths_cur / n.lines.s_nom > 1e-3).map(
{True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)}
)
link_colors_with_alpha = (link_widths_cur / n.links.p_nom > 1e-3).map(
{True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)}
)
## FORMAT ## FORMAT
linewidth_factor = opts['map'][attribute]['linewidth_factor'] linewidth_factor = opts["map"][attribute]["linewidth_factor"]
bus_size_factor = opts['map'][attribute]['bus_size_factor'] bus_size_factor = opts["map"][attribute]["bus_size_factor"]
## PLOT ## PLOT
n.plot(line_widths=line_widths_exp/linewidth_factor, n.plot(
link_widths=link_widths_exp/linewidth_factor, line_widths=line_widths_exp / linewidth_factor,
line_colors=line_colors['exp'], link_widths=link_widths_exp / linewidth_factor,
link_colors=line_colors['exp'], line_colors=line_colors["exp"],
bus_sizes=bus_sizes/bus_size_factor, link_colors=line_colors["exp"],
bus_colors=tech_colors, bus_sizes=bus_sizes / bus_size_factor,
boundaries=map_boundaries, bus_colors=tech_colors,
color_geomap=True, geomap=True, boundaries=map_boundaries,
ax=ax) color_geomap=True,
n.plot(line_widths=line_widths_cur/linewidth_factor, geomap=True,
link_widths=link_widths_cur/linewidth_factor, ax=ax,
line_colors=line_colors_with_alpha, )
link_colors=link_colors_with_alpha, n.plot(
bus_sizes=0, line_widths=line_widths_cur / linewidth_factor,
boundaries=map_boundaries, link_widths=link_widths_cur / linewidth_factor,
color_geomap=True, geomap=False, line_colors=line_colors_with_alpha,
ax=ax) link_colors=link_colors_with_alpha,
ax.set_aspect('equal') bus_sizes=0,
ax.axis('off') boundaries=map_boundaries,
color_geomap=True,
geomap=True,
ax=ax,
)
ax.set_aspect("equal")
ax.axis("off")
# Rasterize basemap # Rasterize basemap
# TODO : Check if this also works with cartopy # TODO : Check if this also works with cartopy
for c in ax.collections[:2]: c.set_rasterized(True) for c in ax.collections[:2]:
c.set_rasterized(True)
# LEGEND # LEGEND
handles = [] handles = []
labels = [] labels = []
for s in (10, 1): for s in (10, 1):
handles.append(plt.Line2D([0],[0],color=line_colors['exp'], handles.append(
linewidth=s*1e3/linewidth_factor)) plt.Line2D(
[0], [0], color=line_colors["exp"], linewidth=s * 1e3 / linewidth_factor
)
)
labels.append("{} GW".format(s)) labels.append("{} GW".format(s))
l1_1 = ax.legend(handles, labels, l1_1 = ax.legend(
loc="upper left", bbox_to_anchor=(0.24, 1.01), handles,
frameon=False, labels,
labelspacing=0.8, handletextpad=1.5, loc="upper left",
title='Transmission Exp./Exist. ') bbox_to_anchor=(0.24, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title="Transmission Exp./Exist. ",
)
ax.add_artist(l1_1) ax.add_artist(l1_1)
handles = [] handles = []
labels = [] labels = []
for s in (10, 5): for s in (10, 5):
handles.append(plt.Line2D([0],[0],color=line_colors['cur'], handles.append(
linewidth=s*1e3/linewidth_factor)) plt.Line2D(
[0], [0], color=line_colors["cur"], linewidth=s * 1e3 / linewidth_factor
)
)
labels.append("/") labels.append("/")
l1_2 = ax.legend(handles, labels, l1_2 = ax.legend(
loc="upper left", bbox_to_anchor=(0.26, 1.01), handles,
frameon=False, labels,
labelspacing=0.8, handletextpad=0.5, loc="upper left",
title=' ') bbox_to_anchor=(0.26, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=0.5,
title=" ",
)
ax.add_artist(l1_2) ax.add_artist(l1_2)
handles = make_legend_circles_for([10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w") handles = make_legend_circles_for(
[10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w"
)
labels = ["{} GW".format(s) for s in (10, 5, 3)] labels = ["{} GW".format(s) for s in (10, 5, 3)]
l2 = ax.legend(handles, labels, l2 = ax.legend(
loc="upper left", bbox_to_anchor=(0.01, 1.01), handles,
frameon=False, labelspacing=1.0, labels,
title='Generation', loc="upper left",
handler_map=make_handler_map_to_scale_circles_as_in(ax)) bbox_to_anchor=(0.01, 1.01),
frameon=False,
labelspacing=1.0,
title="Generation",
handler_map=make_handler_map_to_scale_circles_as_in(ax),
)
ax.add_artist(l2) ax.add_artist(l2)
techs = (bus_sizes.index.levels[1]).intersection(pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs'])) techs = (bus_sizes.index.levels[1]).intersection(
pd.Index(opts["vre_techs"] + opts["conv_techs"] + opts["storage_techs"])
)
handles = [] handles = []
labels = [] labels = []
for t in techs: for t in techs:
handles.append(plt.Line2D([0], [0], color=tech_colors[t], marker='o', markersize=8, linewidth=0)) handles.append(
labels.append(opts['nice_names'].get(t, t)) plt.Line2D(
l3 = ax.legend(handles, labels, loc="upper center", bbox_to_anchor=(0.5, -0.), # bbox_to_anchor=(0.72, -0.05), [0], [0], color=tech_colors[t], marker="o", markersize=8, linewidth=0
handletextpad=0., columnspacing=0.5, ncol=4, title='Technology') )
)
labels.append(opts["nice_names"].get(t, t))
l3 = ax.legend(
handles,
labels,
loc="upper center",
bbox_to_anchor=(0.5, -0.0), # bbox_to_anchor=(0.72, -0.05),
handletextpad=0.0,
columnspacing=0.5,
ncol=4,
title="Technology",
)
return fig return fig
def plot_total_energy_pie(n, ax=None): def plot_total_energy_pie(n, opts, ax=None):
if ax is None: ax = plt.gca() if ax is None:
ax = plt.gca()
ax.set_title('Energy per technology', fontdict=dict(fontsize="medium")) ax.set_title("Energy per technology", fontdict=dict(fontsize="medium"))
e_primary = aggregate_p(n).drop('load', errors='ignore').loc[lambda s: s>0] e_primary = aggregate_p(n).drop("load", errors="ignore").loc[lambda s: s > 0]
patches, texts, autotexts = ax.pie(e_primary, patches, texts, autotexts = ax.pie(
e_primary,
startangle=90, startangle=90,
labels = e_primary.rename(opts['nice_names']).index, labels=e_primary.rename(opts["nice_names"]).index,
autopct='%.0f%%', autopct="%.0f%%",
shadow=False, shadow=False,
colors = [opts['tech_colors'][tech] for tech in e_primary.index]) colors=[opts["tech_colors"][tech] for tech in e_primary.index],
)
for t1, t2, i in zip(texts, autotexts, e_primary.index): for t1, t2, i in zip(texts, autotexts, e_primary.index):
if e_primary.at[i] < 0.04 * e_primary.sum(): if e_primary.at[i] < 0.04 * e_primary.sum():
t1.remove() t1.remove()
t2.remove() t2.remove()
def plot_total_cost_bar(n, ax=None):
if ax is None: ax = plt.gca() def plot_total_cost_bar(n, opts, ax=None):
if ax is None:
ax = plt.gca()
total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum() total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum()
tech_colors = opts['tech_colors'] tech_colors = opts["tech_colors"]
def split_costs(n): def split_costs(n):
costs = aggregate_costs(n).reset_index(level=0, drop=True) costs = aggregate_costs(n).reset_index(level=0, drop=True)
costs_ex = aggregate_costs(n, existing_only=True).reset_index(level=0, drop=True) costs_ex = aggregate_costs(n, existing_only=True).reset_index(
return (costs['capital'].add(costs['marginal'], fill_value=0.), level=0, drop=True
costs_ex['capital'], costs['capital'] - costs_ex['capital'], costs['marginal']) )
return (
costs["capital"].add(costs["marginal"], fill_value=0.0),
costs_ex["capital"],
costs["capital"] - costs_ex["capital"],
costs["marginal"],
)
costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n) costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n)
costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore')), costs_graph = pd.DataFrame(
index=['AC-AC', 'AC line', 'onwind', 'offwind-ac', dict(a=costs.drop("load", errors="ignore")),
'offwind-dc', 'solar', 'OCGT','CCGT', 'battery', 'H2']).dropna() index=[
bottom = np.array([0., 0.]) "AC-AC",
"AC line",
"onwind",
"offwind-ac",
"offwind-dc",
"solar",
"OCGT",
"CCGT",
"battery",
"H2",
],
).dropna()
bottom = np.array([0.0, 0.0])
texts = [] texts = []
for i,ind in enumerate(costs_graph.index): for i, ind in enumerate(costs_graph.index):
data = np.asarray(costs_graph.loc[ind])/total_load data = np.asarray(costs_graph.loc[ind]) / total_load
ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], width=0.7, zorder=-1)
width=0.7, zorder=-1)
bottom_sub = bottom bottom_sub = bottom
bottom = bottom+data bottom = bottom + data
if ind in opts['conv_techs'] + ['AC line']: if ind in opts["conv_techs"] + ["AC line"]:
for c in [costs_cap_ex, costs_marg]: for c in [costs_cap_ex, costs_marg]:
if ind in c: if ind in c:
data_sub = np.asarray([c.loc[ind]])/total_load data_sub = np.asarray([c.loc[ind]]) / total_load
ax.bar([0.5], data_sub, linewidth=0, ax.bar(
bottom=bottom_sub, color=tech_colors[ind], [0.5],
width=0.7, zorder=-1, alpha=0.8) data_sub,
linewidth=0,
bottom=bottom_sub,
color=tech_colors[ind],
width=0.7,
zorder=-1,
alpha=0.8,
)
bottom_sub += data_sub bottom_sub += data_sub
if abs(data[-1]) < 5: if abs(data[-1]) < 5:
continue continue
text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind)) text = ax.text(
1.1, (bottom - 0.5 * data)[-1] - 3, opts["nice_names"].get(ind, ind)
)
texts.append(text) texts.append(text)
ax.set_ylabel("Average system cost [Eur/MWh]") ax.set_ylabel("Average system cost [Eur/MWh]")
ax.set_ylim([0, opts.get('costs_max', 80)]) ax.set_ylim([0, opts.get("costs_max", 80)])
ax.set_xlim([0, 1]) ax.set_xlim([0, 1])
ax.set_xticklabels([]) ax.set_xticklabels([])
ax.grid(True, axis="y", color='k', linestyle='dotted') ax.grid(True, axis="y", color="k", linestyle="dotted")
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('plot_network', network='elec', simpl='',
clusters='5', ll='copt', opts='Co2L-24H', snakemake = mock_snakemake(
attr='p_nom', ext="pdf") "plot_network",
simpl="",
clusters="5",
ll="copt",
opts="Co2L-24H",
attr="p_nom",
ext="pdf",
)
configure_logging(snakemake) configure_logging(snakemake)
set_plot_style() set_plot_style()
paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) config, wildcards = snakemake.config, snakemake.wildcards
map_figsize = config['map']['figsize'] map_figsize = config["plotting"]["map"]["figsize"]
map_boundaries = config['map']['boundaries'] map_boundaries = config["plotting"]["map"]["boundaries"]
n = load_network_for_plots(paths.network, paths.tech_costs, config) n = load_network_for_plots(
snakemake.input.network, snakemake.input.tech_costs, config
)
scenario_opts = wildcards.opts.split('-') scenario_opts = wildcards.opts.split("-")
fig, ax = plt.subplots(figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()}) fig, ax = plt.subplots(
plot_map(n, ax, wildcards.attr, config) figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()}
)
plot_map(n, config["plotting"], ax=ax, attribute=wildcards.attr)
fig.savefig(out.only_map, dpi=150, bbox_inches='tight') fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches="tight")
ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2]) ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2])
plot_total_energy_pie(n, ax1) plot_total_energy_pie(n, config["plotting"], ax=ax1)
ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45]) ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45])
plot_total_cost_bar(n, ax2) plot_total_cost_bar(n, config["plotting"], ax=ax2)
ll = wildcards.ll ll = wildcards.ll
ll_type = ll[0] ll_type = ll[0]
ll_factor = ll[1:] ll_factor = ll[1:]
lbl = dict(c='line cost', v='line volume')[ll_type] lbl = dict(c="line cost", v="line volume")[ll_type]
amnt = '{ll} x today\'s'.format(ll=ll_factor) if ll_factor != 'opt' else 'optimal' amnt = "{ll} x today's".format(ll=ll_factor) if ll_factor != "opt" else "optimal"
fig.suptitle('Expansion to {amount} {label} at {clusters} clusters' fig.suptitle(
.format(amount=amnt, label=lbl, clusters=wildcards.clusters)) "Expansion to {amount} {label} at {clusters} clusters".format(
amount=amnt, label=lbl, clusters=wildcards.clusters
)
)
fig.savefig(out.ext, transparent=True, bbox_inches='tight') fig.savefig(snakemake.output.ext, transparent=True, bbox_inches="tight")

View File

@ -1,4 +1,5 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -16,14 +17,13 @@ Outputs
Description Description
----------- -----------
""" """
import logging import logging
from _helpers import configure_logging, retrieve_snakemake_keys
import pypsa
import pandas as pd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd
import pypsa
from _helpers import configure_logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -31,11 +31,13 @@ logger = logging.getLogger(__name__)
def cum_p_nom_max(net, tech, country=None): def cum_p_nom_max(net, tech, country=None):
carrier_b = net.generators.carrier == tech carrier_b = net.generators.carrier == tech
generators = pd.DataFrame(dict( generators = pd.DataFrame(
p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'], dict(
p_max_pu=net.generators_t.p_max_pu.loc[:,carrier_b].mean(), p_nom_max=net.generators.loc[carrier_b, "p_nom_max"],
country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country) p_max_pu=net.generators_t.p_max_pu.loc[:, carrier_b].mean(),
)).sort_values("p_max_pu", ascending=False) country=net.generators.loc[carrier_b, "bus"].map(net.buses.country),
)
).sort_values("p_max_pu", ascending=False)
if country is not None: if country is not None:
generators = generators.loc[generators.country == country] generators = generators.loc[generators.country == country]
@ -46,33 +48,38 @@ def cum_p_nom_max(net, tech, country=None):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('plot_p_nom_max', network='elec', simpl='',
techs='solar,onwind,offwind-dc', ext='png',
clusts= '5,full', country= 'all')
configure_logging(snakemake)
paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) snakemake = mock_snakemake(
"plot_p_nom_max",
simpl="",
techs="solar,onwind,offwind-dc",
ext="png",
clusts="5,full",
country="all",
)
configure_logging(snakemake)
plot_kwds = dict(drawstyle="steps-post") plot_kwds = dict(drawstyle="steps-post")
clusters = wildcards.clusts.split(',') clusters = snakemake.wildcards.clusts.split(",")
techs = wildcards.techs.split(',') techs = snakemake.wildcards.techs.split(",")
country = wildcards.country country = snakemake.wildcards.country
if country == 'all': if country == "all":
country = None country = None
else: else:
plot_kwds['marker'] = 'x' plot_kwds["marker"] = "x"
fig, axes = plt.subplots(1, len(techs)) fig, axes = plt.subplots(1, len(techs))
for j, cluster in enumerate(clusters): for j, cluster in enumerate(clusters):
net = pypsa.Network(paths[j]) net = pypsa.Network(snakemake.input[j])
for i, tech in enumerate(techs): for i, tech in enumerate(techs):
cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="cum_p_nom_max", cum_p_nom_max(net, tech, country).plot(
label=cluster, ax=axes[i], **plot_kwds) x="p_max_pu", y="cum_p_nom_max", label=cluster, ax=axes[i], **plot_kwds
)
for i, tech in enumerate(techs): for i, tech in enumerate(techs):
ax = axes[i] ax = axes[i]
@ -81,4 +88,4 @@ if __name__ == "__main__":
plt.legend(title="Cluster level") plt.legend(title="Cluster level")
fig.savefig(out[0], transparent=True, bbox_inches='tight') fig.savefig(snakemake.output[0], transparent=True, bbox_inches="tight")

View File

@ -1,4 +1,5 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -16,15 +17,14 @@ Outputs
Description Description
----------- -----------
""" """
import os
import logging import logging
from _helpers import configure_logging, retrieve_snakemake_keys import os
import pandas as pd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -52,22 +52,37 @@ def rename_techs(label):
return label return label
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"]) preferred_order = pd.Index(
[
"transmission lines",
"hydroelectricity",
"hydro reservoir",
"run of river",
"pumped hydro storage",
"onshore wind",
"offshore wind ac",
"offshore wind dc",
"solar PV",
"solar thermal",
"OCGT",
"hydrogen storage",
"battery storage",
]
)
def plot_costs(infn, config, fn=None): def plot_costs(infn, config, fn=None):
## For now ignore the simpl header ## For now ignore the simpl header
cost_df = pd.read_csv(infn,index_col=list(range(3)),header=[1,2,3]) cost_df = pd.read_csv(infn, index_col=list(range(3)), header=[1, 2, 3])
df = cost_df.groupby(cost_df.index.get_level_values(2)).sum() df = cost_df.groupby(cost_df.index.get_level_values(2)).sum()
#convert to billions # convert to billions
df = df/1e9 df = df / 1e9
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.max(axis=1) < config['plotting']['costs_threshold']] to_drop = df.index[df.max(axis=1) < config["plotting"]["costs_threshold"]]
print("dropping") print("dropping")
@ -77,22 +92,28 @@ def plot_costs(infn, config, fn=None):
print(df.sum()) print(df.sum())
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) new_index = (preferred_order.intersection(df.index)).append(
df.index.difference(preferred_order)
)
new_columns = df.sum().sort_values().index new_columns = df.sum().sort_values().index
fig, ax = plt.subplots() fig, ax = plt.subplots()
fig.set_size_inches((12,8)) fig.set_size_inches((12, 8))
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index]) df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
color=[config["plotting"]["tech_colors"][i] for i in new_index],
)
handles, labels = ax.get_legend_handles_labels()
handles,labels = ax.get_legend_handles_labels()
handles.reverse() handles.reverse()
labels.reverse() labels.reverse()
ax.set_ylim([0,config['plotting']['costs_max']]) ax.set_ylim([0, config["plotting"]["costs_max"]])
ax.set_ylabel("System Cost [EUR billion per year]") ax.set_ylabel("System Cost [EUR billion per year]")
@ -100,8 +121,7 @@ def plot_costs(infn, config, fn=None):
ax.grid(axis="y") ax.grid(axis="y")
ax.legend(handles,labels,ncol=4,loc="upper left") ax.legend(handles, labels, ncol=4, loc="upper left")
fig.tight_layout() fig.tight_layout()
@ -110,17 +130,16 @@ def plot_costs(infn, config, fn=None):
def plot_energy(infn, config, fn=None): def plot_energy(infn, config, fn=None):
energy_df = pd.read_csv(infn, index_col=list(range(2)), header=[1, 2, 3])
energy_df = pd.read_csv(infn, index_col=list(range(2)),header=[1,2,3])
df = energy_df.groupby(energy_df.index.get_level_values(1)).sum() df = energy_df.groupby(energy_df.index.get_level_values(1)).sum()
#convert MWh to TWh # convert MWh to TWh
df = df/1e6 df = df / 1e6
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.abs().max(axis=1) < config['plotting']['energy_threshold']] to_drop = df.index[df.abs().max(axis=1) < config["plotting"]["energy_threshold"]]
print("dropping") print("dropping")
@ -130,22 +149,28 @@ def plot_energy(infn, config, fn=None):
print(df.sum()) print(df.sum())
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) new_index = (preferred_order.intersection(df.index)).append(
df.index.difference(preferred_order)
)
new_columns = df.columns.sort_values() new_columns = df.columns.sort_values()
fig, ax = plt.subplots() fig, ax = plt.subplots()
fig.set_size_inches((12,8)) fig.set_size_inches((12, 8))
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index]) df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
color=[config["plotting"]["tech_colors"][i] for i in new_index],
)
handles, labels = ax.get_legend_handles_labels()
handles,labels = ax.get_legend_handles_labels()
handles.reverse() handles.reverse()
labels.reverse() labels.reverse()
ax.set_ylim([config['plotting']['energy_min'], config['plotting']['energy_max']]) ax.set_ylim([config["plotting"]["energy_min"], config["plotting"]["energy_max"]])
ax.set_ylabel("Energy [TWh/a]") ax.set_ylabel("Energy [TWh/a]")
@ -153,8 +178,7 @@ def plot_energy(infn, config, fn=None):
ax.grid(axis="y") ax.grid(axis="y")
ax.legend(handles,labels,ncol=4,loc="upper left") ax.legend(handles, labels, ncol=4, loc="upper left")
fig.tight_layout() fig.tight_layout()
@ -163,19 +187,30 @@ def plot_energy(infn, config, fn=None):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('plot_summary', summary='energy', network='elec',
simpl='', clusters=5, ll='copt', opts='Co2L-24H', snakemake = mock_snakemake(
attr='', ext='png', country='all') "plot_summary",
summary="energy",
simpl="",
clusters=5,
ll="copt",
opts="Co2L-24H",
attr="",
ext="png",
country="all",
)
configure_logging(snakemake) configure_logging(snakemake)
paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) config = snakemake.config
summary = wildcards.summary summary = snakemake.wildcards.summary
try: try:
func = globals()[f"plot_{summary}"] func = globals()[f"plot_{summary}"]
except KeyError: except KeyError:
raise RuntimeError(f"plotting function for {summary} has not been defined") raise RuntimeError(f"plotting function for {summary} has not been defined")
func(os.path.join(paths[0], f"{summary}.csv"), config, out[0]) func(
os.path.join(snakemake.input[0], f"{summary}.csv"), config, snakemake.output[0]
)

View File

@ -1,11 +1,14 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
""" """
Extracts capacities of HVDC links from `Wikipedia <https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_. Extracts capacities of HVDC links from `Wikipedia.
<https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_.
Relevant Settings Relevant Settings
----------------- -----------------
@ -33,13 +36,12 @@ Description
----------- -----------
*None* *None*
""" """
import logging import logging
from _helpers import configure_logging, retrieve_snakemake_keys
import pandas as pd import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -49,31 +51,45 @@ def multiply(s):
def extract_coordinates(s): def extract_coordinates(s):
regex = (r"(\d{1,2})°(\d{1,2})(\d{1,2})″(N|S) " regex = (
r"(\d{1,2})°(\d{1,2})(\d{1,2})″(E|W)") r"(\d{1,2})°(\d{1,2})(\d{1,2})″(N|S) " r"(\d{1,2})°(\d{1,2})(\d{1,2})″(E|W)"
)
e = s.str.extract(regex, expand=True) e = s.str.extract(regex, expand=True)
lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.}) lat = (
lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.}) e[0].astype(float) + (e[1].astype(float) + e[2].astype(float) / 60.0) / 60.0
) * e[3].map({"N": +1.0, "S": -1.0})
lon = (
e[4].astype(float) + (e[5].astype(float) + e[6].astype(float) / 60.0) / 60.0
) * e[7].map({"E": +1.0, "W": -1.0})
return lon, lat return lon, lat
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake #rule must be enabled in config from _helpers import mock_snakemake # rule must be enabled in config
snakemake = mock_snakemake('prepare_links_p_nom', simpl='', network='elec')
snakemake = mock_snakemake("prepare_links_p_nom", simpl="")
configure_logging(snakemake) configure_logging(snakemake)
paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) links_p_nom = pd.read_html(
"https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol"
links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0] )[0]
mw = "Power (MW)" mw = "Power (MW)"
m_b = links_p_nom[mw].str.contains('x').fillna(False) m_b = links_p_nom[mw].str.contains("x").fillna(False)
links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split('x').pipe(multiply) links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split("x").pipe(multiply)
links_p_nom[mw] = links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float) links_p_nom[mw] = (
links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float)
)
links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1']) links_p_nom["x1"], links_p_nom["y1"] = extract_coordinates(
links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2']) links_p_nom["Converterstation 1"]
)
links_p_nom["x2"], links_p_nom["y2"] = extract_coordinates(
links_p_nom["Converterstation 2"]
)
links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(out[0], index=False) links_p_nom.dropna(subset=["x1", "y1", "x2", "y2"]).to_csv(
snakemake.output[0], index=False
)

View File

@ -1,10 +1,12 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# coding: utf-8 # coding: utf-8
""" """
Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such as Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such
as.
- adding an annual **limit** of carbon-dioxide emissions, - adding an annual **limit** of carbon-dioxide emissions,
- adding an exogenous **price** per tonne emissions of carbon-dioxide (or other kinds), - adding an exogenous **price** per tonne emissions of carbon-dioxide (or other kinds),
@ -20,9 +22,10 @@ Relevant Settings
.. code:: yaml .. code:: yaml
costs: costs:
year:
version:
fill_values:
emission_prices: emission_prices:
USD2013_to_EUR2013:
discountrate:
marginal_cost: marginal_cost:
capital_cost: capital_cost:
@ -37,7 +40,7 @@ Relevant Settings
Inputs Inputs
------ ------
- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. - ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``networks/elec_s{simpl}_{clusters}.nc``: confer :ref:`cluster` - ``networks/elec_s{simpl}_{clusters}.nc``: confer :ref:`cluster`
Outputs Outputs
@ -52,17 +55,15 @@ Description
The rule :mod:`prepare_all_networks` runs The rule :mod:`prepare_all_networks` runs
for all ``scenario`` s in the configuration file for all ``scenario`` s in the configuration file
the rule :mod:`prepare_network`. the rule :mod:`prepare_network`.
""" """
import logging import logging
from _helpers import configure_logging
import re import re
import pypsa
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa
from _helpers import configure_logging
from add_electricity import load_costs, update_transmission_costs from add_electricity import load_costs, update_transmission_costs
idx = pd.IndexSlice idx = pd.IndexSlice
@ -70,55 +71,84 @@ idx = pd.IndexSlice
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def add_co2limit(n, co2limit, Nyears=1.): def add_co2limit(n, co2limit, Nyears=1.0):
n.add(
n.add("GlobalConstraint", "CO2Limit", "GlobalConstraint",
carrier_attribute="co2_emissions", sense="<=", "CO2Limit",
constant=co2limit * Nyears) carrier_attribute="co2_emissions",
sense="<=",
constant=co2limit * Nyears,
)
def add_emission_prices(n, emission_prices={'co2': 0.}, exclude_co2=False): def add_gaslimit(n, gaslimit, Nyears=1.0):
if exclude_co2: emission_prices.pop('co2') sel = n.carriers.index.intersection(["OCGT", "CCGT", "CHP"])
ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') * n.carriers.loc[sel, "gas_usage"] = 1.0
n.carriers.filter(like='_emissions')).sum(axis=1)
n.add(
"GlobalConstraint",
"GasLimit",
carrier_attribute="gas_usage",
sense="<=",
constant=gaslimit * Nyears,
)
def add_emission_prices(n, emission_prices={"co2": 0.0}, exclude_co2=False):
if exclude_co2:
emission_prices.pop("co2")
ep = (
pd.Series(emission_prices).rename(lambda x: x + "_emissions")
* n.carriers.filter(like="_emissions")
).sum(axis=1)
gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency
n.generators['marginal_cost'] += gen_ep n.generators["marginal_cost"] += gen_ep
su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch
n.storage_units['marginal_cost'] += su_ep n.storage_units["marginal_cost"] += su_ep
def set_line_s_max_pu(n, s_max_pu = 0.7): def set_line_s_max_pu(n, s_max_pu=0.7):
n.lines['s_max_pu'] = s_max_pu n.lines["s_max_pu"] = s_max_pu
logger.info(f"N-1 security margin of lines set to {s_max_pu}") logger.info(f"N-1 security margin of lines set to {s_max_pu}")
def set_transmission_limit(n, ll_type, factor, costs, Nyears=1): def set_transmission_limit(n, ll_type, factor, costs, Nyears=1):
links_dc_b = n.links.carrier == 'DC' if not n.links.empty else pd.Series() links_dc_b = n.links.carrier == "DC" if not n.links.empty else pd.Series()
_lines_s_nom = (np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) * _lines_s_nom = (
n.lines.num_parallel * n.lines.bus0.map(n.buses.v_nom)) np.sqrt(3)
lines_s_nom = n.lines.s_nom.where(n.lines.type == '', _lines_s_nom) * n.lines.type.map(n.line_types.i_nom)
* n.lines.num_parallel
* n.lines.bus0.map(n.buses.v_nom)
)
lines_s_nom = n.lines.s_nom.where(n.lines.type == "", _lines_s_nom)
col = "capital_cost" if ll_type == "c" else "length"
ref = (
lines_s_nom @ n.lines[col]
+ n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col]
)
col = 'capital_cost' if ll_type == 'c' else 'length' update_transmission_costs(n, costs)
ref = (lines_s_nom @ n.lines[col] +
n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col])
update_transmission_costs(n, costs, simple_hvdc_costs=False) if factor == "opt" or float(factor) > 1.0:
n.lines["s_nom_min"] = lines_s_nom
n.lines["s_nom_extendable"] = True
if factor == 'opt' or float(factor) > 1.0: n.links.loc[links_dc_b, "p_nom_min"] = n.links.loc[links_dc_b, "p_nom"]
n.lines['s_nom_min'] = lines_s_nom n.links.loc[links_dc_b, "p_nom_extendable"] = True
n.lines['s_nom_extendable'] = True
n.links.loc[links_dc_b, 'p_nom_min'] = n.links.loc[links_dc_b, 'p_nom'] if factor != "opt":
n.links.loc[links_dc_b, 'p_nom_extendable'] = True con_type = "expansion_cost" if ll_type == "c" else "volume_expansion"
if factor != 'opt':
con_type = 'expansion_cost' if ll_type == 'c' else 'volume_expansion'
rhs = float(factor) * ref rhs = float(factor) * ref
n.add('GlobalConstraint', f'l{ll_type}_limit', n.add(
type=f'transmission_{con_type}_limit', "GlobalConstraint",
sense='<=', constant=rhs, carrier_attribute='AC, DC') f"l{ll_type}_limit",
type=f"transmission_{con_type}_limit",
sense="<=",
constant=rhs,
carrier_attribute="AC, DC",
)
return n return n
@ -132,7 +162,7 @@ def average_every_nhours(n, offset):
m.snapshot_weightings = snapshot_weightings m.snapshot_weightings = snapshot_weightings
for c in n.iterate_components(): for c in n.iterate_components():
pnl = getattr(m, c.list_name+"_t") pnl = getattr(m, c.list_name + "_t")
for k, df in c.pnl.items(): for k, df in c.pnl.items():
if not df.empty: if not df.empty:
pnl[k] = df.resample(offset).mean() pnl[k] = df.resample(offset).mean()
@ -145,8 +175,9 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
try: try:
import tsam.timeseriesaggregation as tsam import tsam.timeseriesaggregation as tsam
except: except:
raise ModuleNotFoundError("Optional dependency 'tsam' not found." raise ModuleNotFoundError(
"Install via 'pip install tsam'") "Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)
p_max_pu_norm = n.generators_t.p_max_pu.max() p_max_pu_norm = n.generators_t.p_max_pu.max()
p_max_pu = n.generators_t.p_max_pu / p_max_pu_norm p_max_pu = n.generators_t.p_max_pu / p_max_pu_norm
@ -159,9 +190,14 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False) raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False)
agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw), agg = tsam.TimeSeriesAggregation(
noTypicalPeriods=1, noSegments=int(segments), raw,
segmentation=True, solver=solver_name) hoursPerPeriod=len(raw),
noTypicalPeriods=1,
noSegments=int(segments),
segmentation=True,
solver=solver_name,
)
segmented = agg.createTypicalPeriods() segmented = agg.createTypicalPeriods()
@ -169,8 +205,10 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0) offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
snapshots = [n.snapshots[0] + pd.Timedelta(f"{offset}h") for offset in offsets] snapshots = [n.snapshots[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
n.set_snapshots(pd.DatetimeIndex(snapshots, name='name')) n.set_snapshots(pd.DatetimeIndex(snapshots, name="name"))
n.snapshot_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64") n.snapshot_weightings = pd.Series(
weightings, index=snapshots, name="weightings", dtype="float64"
)
segmented.index = snapshots segmented.index = snapshots
n.generators_t.p_max_pu = segmented[n.generators_t.p_max_pu.columns] * p_max_pu_norm n.generators_t.p_max_pu = segmented[n.generators_t.p_max_pu.columns] * p_max_pu_norm
@ -179,49 +217,57 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
return n return n
def enforce_autarky(n, only_crossborder=False): def enforce_autarky(n, only_crossborder=False):
if only_crossborder: if only_crossborder:
lines_rm = n.lines.loc[ lines_rm = n.lines.loc[
n.lines.bus0.map(n.buses.country) != n.lines.bus0.map(n.buses.country) != n.lines.bus1.map(n.buses.country)
n.lines.bus1.map(n.buses.country) ].index
].index
links_rm = n.links.loc[ links_rm = n.links.loc[
n.links.bus0.map(n.buses.country) != n.links.bus0.map(n.buses.country) != n.links.bus1.map(n.buses.country)
n.links.bus1.map(n.buses.country) ].index
].index
else: else:
lines_rm = n.lines.index lines_rm = n.lines.index
links_rm = n.links.loc[n.links.carrier=="DC"].index links_rm = n.links.loc[n.links.carrier == "DC"].index
n.mremove("Line", lines_rm) n.mremove("Line", lines_rm)
n.mremove("Link", links_rm) n.mremove("Link", links_rm)
def set_line_nom_max(n, s_nom_max_set=np.inf, p_nom_max_set=np.inf): def set_line_nom_max(n, s_nom_max_set=np.inf, p_nom_max_set=np.inf):
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True)
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('prepare_network', network='elec', simpl='',
clusters='40', ll='v0.3', opts='Co2L-24H') snakemake = mock_snakemake(
"prepare_network", simpl="", clusters="40", ll="v0.3", opts="Co2L-24H"
)
configure_logging(snakemake) configure_logging(snakemake)
opts = snakemake.wildcards.opts.split('-') opts = snakemake.wildcards.opts.split("-")
n = pypsa.Network(snakemake.input[0]) n = pypsa.Network(snakemake.input[0])
Nyears = n.snapshot_weightings.objective.sum() / 8760. Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears) costs = load_costs(
snakemake.input.tech_costs,
snakemake.config["costs"],
snakemake.config["electricity"],
Nyears,
)
set_line_s_max_pu(n, snakemake.config['lines']['s_max_pu']) set_line_s_max_pu(n, snakemake.config["lines"]["s_max_pu"])
for o in opts: for o in opts:
m = re.match(r'^\d+h$', o, re.IGNORECASE) m = re.match(r"^\d+h$", o, re.IGNORECASE)
if m is not None: if m is not None:
n = average_every_nhours(n, m.group(0)) n = average_every_nhours(n, m.group(0))
break break
for o in opts: for o in opts:
m = re.match(r'^\d+seg$', o, re.IGNORECASE) m = re.match(r"^\d+seg$", o, re.IGNORECASE)
if m is not None: if m is not None:
solver_name = snakemake.config["solving"]["solver"]["name"] solver_name = snakemake.config["solving"]["solver"]["name"]
n = apply_time_segmentation(n, m.group(0)[:-3], solver_name) n = apply_time_segmentation(n, m.group(0)[:-3], solver_name)
@ -231,10 +277,24 @@ if __name__ == "__main__":
if "Co2L" in o: if "Co2L" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o) m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0: if len(m) > 0:
co2limit = float(m[0]) * snakemake.config['electricity']['co2base'] co2limit = float(m[0]) * snakemake.config["electricity"]["co2base"]
add_co2limit(n, co2limit, Nyears) add_co2limit(n, co2limit, Nyears)
logger.info("Setting CO2 limit according to wildcard value.")
else: else:
add_co2limit(n, snakemake.config['electricity']['co2limit'], Nyears) add_co2limit(n, snakemake.config["electricity"]["co2limit"], Nyears)
logger.info("Setting CO2 limit according to config value.")
break
for o in opts:
if "CH4L" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0:
limit = float(m[0]) * 1e6
add_gaslimit(n, limit, Nyears)
logger.info("Setting gas usage limit according to wildcard value.")
else:
add_gaslimit(n, snakemake.config["electricity"].get("gaslimit"), Nyears)
logger.info("Setting gas usage limit according to config value.")
break break
for o in opts: for o in opts:
@ -243,7 +303,7 @@ if __name__ == "__main__":
if oo[0].startswith(tuple(suptechs)): if oo[0].startswith(tuple(suptechs)):
carrier = oo[0] carrier = oo[0]
# handles only p_nom_max as stores and lines have no potentials # handles only p_nom_max as stores and lines have no potentials
attr_lookup = {"p": "p_nom_max", "c": "capital_cost"} attr_lookup = {"p": "p_nom_max", "c": "capital_cost", "m": "marginal_cost"}
attr = attr_lookup[oo[1][0]] attr = attr_lookup[oo[1][0]]
factor = float(oo[1][1:]) factor = float(oo[1][1:])
if carrier == "AC": # lines do not have carrier if carrier == "AC": # lines do not have carrier
@ -252,20 +312,32 @@ if __name__ == "__main__":
comps = {"Generator", "Link", "StorageUnit", "Store"} comps = {"Generator", "Link", "StorageUnit", "Store"}
for c in n.iterate_components(comps): for c in n.iterate_components(comps):
sel = c.df.carrier.str.contains(carrier) sel = c.df.carrier.str.contains(carrier)
c.df.loc[sel,attr] *= factor c.df.loc[sel, attr] *= factor
if 'Ep' in opts: for o in opts:
add_emission_prices(n, snakemake.config['costs']['emission_prices']) if "Ep" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0:
logger.info("Setting emission prices according to wildcard value.")
add_emission_prices(n, dict(co2=float(m[0])))
else:
logger.info("Setting emission prices according to config value.")
add_emission_prices(n, snakemake.config["costs"]["emission_prices"])
break
ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:] ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:]
set_transmission_limit(n, ll_type, factor, costs, Nyears) set_transmission_limit(n, ll_type, factor, costs, Nyears)
set_line_nom_max(n, s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf), set_line_nom_max(
p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf)) n,
s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf),
p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf),
)
if "ATK" in opts: if "ATK" in opts:
enforce_autarky(n) enforce_autarky(n)
elif "ATKc" in opts: elif "ATKc" in opts:
enforce_autarky(n, only_crossborder=True) enforce_autarky(n, only_crossborder=True)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -1,5 +1,6 @@
# Copyright 2019-2020 Fabian Hofmann (FIAS) # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # Copyright 2019-2022 Fabian Hofmann (TUB, FIAS)
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -11,7 +12,7 @@ The data bundle (1.4 GB) contains common GIS datasets like NUTS3 shapes, EEZ sha
This rule downloads the data bundle from `zenodo <https://doi.org/10.5281/zenodo.3517935>`_ and extracts it in the ``data`` sub-directory, such that all files of the bundle are stored in the ``data/bundle`` subdirectory. This rule downloads the data bundle from `zenodo <https://doi.org/10.5281/zenodo.3517935>`_ and extracts it in the ``data`` sub-directory, such that all files of the bundle are stored in the ``data/bundle`` subdirectory.
The :ref:`tutorial` uses a smaller `data bundle <https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz>`_ than required for the full model (19 MB) The :ref:`tutorial` uses a smaller `data bundle <https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz>`_ than required for the full model (188 MB)
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3517921.svg .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3517921.svg
:target: https://doi.org/10.5281/zenodo.3517921 :target: https://doi.org/10.5281/zenodo.3517921
@ -28,29 +29,32 @@ The :ref:`tutorial` uses a smaller `data bundle <https://zenodo.org/record/35179
**Outputs** **Outputs**
- ``cutouts/bundle``: input data collected from various sources - ``data/bundle``: input data collected from various sources
""" """
import logging import logging
from _helpers import progress_retrieve, configure_logging
import tarfile import tarfile
from pathlib import Path from pathlib import Path
from _helpers import configure_logging, progress_retrieve
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('retrieve_databundle')
rootpath = '..'
else:
rootpath = '.'
configure_logging(snakemake) # TODO Make logging compatible with progressbar (see PR #102)
if snakemake.config['tutorial']: snakemake = mock_snakemake("retrieve_databundle")
rootpath = ".."
else:
rootpath = "."
configure_logging(
snakemake
) # TODO Make logging compatible with progressbar (see PR #102)
if snakemake.config["tutorial"]:
url = "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz" url = "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz"
else: else:
url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz" url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz"

View File

@ -1,31 +1,34 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# coding: utf-8 # coding: utf-8
""" """
Lifts electrical transmission network to a single 380 kV voltage layer, Lifts electrical transmission network to a single 380 kV voltage layer, removes
removes dead-ends of the network, dead-ends of the network, and reduces multi-hop HVDC connections to a single
and reduces multi-hop HVDC connections to a single link. link.
Relevant Settings Relevant Settings
----------------- -----------------
.. code:: yaml .. code:: yaml
clustering:
simplify_network:
cluster_network:
aggregation_strategies:
costs: costs:
USD2013_to_EUR2013: year:
discountrate: version:
fill_values:
marginal_cost: marginal_cost:
capital_cost: capital_cost:
electricity: electricity:
max_hours: max_hours:
renewables: (keys)
{technology}:
potential:
lines: lines:
length_factor: length_factor:
@ -44,7 +47,7 @@ Relevant Settings
Inputs Inputs
------ ------
- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. - ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions` - ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/regions_offshore.geojson``: confer :ref:`busregions` - ``resources/regions_offshore.geojson``: confer :ref:`busregions`
- ``networks/elec.nc``: confer :ref:`electricity` - ``networks/elec.nc``: confer :ref:`electricity`
@ -75,7 +78,7 @@ The rule :mod:`simplify_network` does up to four things:
1. Create an equivalent transmission network in which all voltage levels are mapped to the 380 kV level by the function ``simplify_network(...)``. 1. Create an equivalent transmission network in which all voltage levels are mapped to the 380 kV level by the function ``simplify_network(...)``.
2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``. The components attached to buses in between are moved to the nearest endpoint. The grid connection cost of offshore wind generators are added to the captial costs of the generator. 2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``. The components attached to buses in between are moved to the nearest endpoint. The grid connection cost of offshore wind generators are added to the capital costs of the generator.
3. Stub lines and links, i.e. dead-ends of the network, are sequentially removed from the network in the function ``remove_stubs(...)``. Components are moved along. 3. Stub lines and links, i.e. dead-ends of the network, are sequentially removed from the network in the function ``remove_stubs(...)``. Components are moved along.
@ -83,54 +86,60 @@ The rule :mod:`simplify_network` does up to four things:
""" """
import logging import logging
from _helpers import configure_logging, update_p_nom_max
from cluster_network import clustering_for_n_clusters, cluster_regions
from add_electricity import load_costs
import pandas as pd
import numpy as np
import scipy as sp
from scipy.sparse.csgraph import connected_components, dijkstra
from functools import reduce from functools import reduce
import numpy as np
import pandas as pd
import pypsa import pypsa
import scipy as sp
from _helpers import configure_logging, get_aggregation_strategies, update_p_nom_max
from add_electricity import load_costs
from cluster_network import cluster_regions, clustering_for_n_clusters
from pypsa.io import import_components_from_dataframe, import_series_from_dataframe from pypsa.io import import_components_from_dataframe, import_series_from_dataframe
from pypsa.networkclustering import busmap_by_stubs, aggregategenerators, aggregateoneport, get_clustering_from_busmap, _make_consense from pypsa.networkclustering import (
aggregategenerators,
aggregateoneport,
busmap_by_stubs,
get_clustering_from_busmap,
)
from scipy.sparse.csgraph import connected_components, dijkstra
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def simplify_network_to_380(n): def simplify_network_to_380(n):
## All goes to v_nom == 380 """
Fix all lines to a voltage level of 380 kV and remove all transformers.
The function preserves the transmission capacity for each line while updating
its voltage level, line type and number of parallel bundles (num_parallel).
Transformers are removed and connected components are moved from their
starting bus to their ending bus. The corresponding starting buses are
removed as well.
"""
logger.info("Mapping all network lines onto a single 380kV layer") logger.info("Mapping all network lines onto a single 380kV layer")
n.buses['v_nom'] = 380. n.buses["v_nom"] = 380.0
linetype_380, = n.lines.loc[n.lines.v_nom == 380., 'type'].unique() (linetype_380,) = n.lines.loc[n.lines.v_nom == 380.0, "type"].unique()
lines_v_nom_b = n.lines.v_nom != 380. n.lines["type"] = linetype_380
n.lines.loc[lines_v_nom_b, 'num_parallel'] *= (n.lines.loc[lines_v_nom_b, 'v_nom'] / 380.)**2 n.lines["v_nom"] = 380
n.lines.loc[lines_v_nom_b, 'v_nom'] = 380. n.lines["i_nom"] = n.line_types.i_nom[linetype_380]
n.lines.loc[lines_v_nom_b, 'type'] = linetype_380 n.lines["num_parallel"] = n.lines.eval("s_nom / (sqrt(3) * v_nom * i_nom)")
n.lines.loc[lines_v_nom_b, 's_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel
)
# Replace transformers by lines trafo_map = pd.Series(n.transformers.bus1.values, n.transformers.bus0.values)
trafo_map = pd.Series(n.transformers.bus1.values, index=n.transformers.bus0.values) trafo_map = trafo_map[~trafo_map.index.duplicated(keep="first")]
trafo_map = trafo_map[~trafo_map.index.duplicated(keep='first')]
several_trafo_b = trafo_map.isin(trafo_map.index) several_trafo_b = trafo_map.isin(trafo_map.index)
trafo_map.loc[several_trafo_b] = trafo_map.loc[several_trafo_b].map(trafo_map) trafo_map[several_trafo_b] = trafo_map[several_trafo_b].map(trafo_map)
missing_buses_i = n.buses.index.difference(trafo_map.index) missing_buses_i = n.buses.index.difference(trafo_map.index)
missing = pd.Series(missing_buses_i, missing_buses_i) missing = pd.Series(missing_buses_i, missing_buses_i)
trafo_map = pd.concat([trafo_map, missing]) trafo_map = pd.concat([trafo_map, missing])
for c in n.one_port_components|n.branch_components: for c in n.one_port_components | n.branch_components:
df = n.df(c) df = n.df(c)
for col in df.columns: for col in df.columns:
if col.startswith('bus'): if col.startswith("bus"):
df[col] = df[col].map(trafo_map) df[col] = df[col].map(trafo_map)
n.mremove("Transformer", n.transformers.index) n.mremove("Transformer", n.transformers.index)
@ -140,22 +149,30 @@ def simplify_network_to_380(n):
def _prepare_connection_costs_per_link(n, costs, config): def _prepare_connection_costs_per_link(n, costs, config):
if n.links.empty: return {} if n.links.empty:
return {}
connection_costs_per_link = {} connection_costs_per_link = {}
for tech in config['renewable']: for tech in config["renewable"]:
if tech.startswith('offwind'): if tech.startswith("offwind"):
connection_costs_per_link[tech] = ( connection_costs_per_link[tech] = (
n.links.length * config['lines']['length_factor'] * n.links.length
(n.links.underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] + * config["lines"]["length_factor"]
(1. - n.links.underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost']) * (
n.links.underwater_fraction
* costs.at[tech + "-connection-submarine", "capital_cost"]
+ (1.0 - n.links.underwater_fraction)
* costs.at[tech + "-connection-underground", "capital_cost"]
)
) )
return connection_costs_per_link return connection_costs_per_link
def _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link=None, buses=None): def _compute_connection_costs_to_bus(
n, busmap, costs, config, connection_costs_per_link=None, buses=None
):
if connection_costs_per_link is None: if connection_costs_per_link is None:
connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config) connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config)
@ -165,12 +182,21 @@ def _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_
connection_costs_to_bus = pd.DataFrame(index=buses) connection_costs_to_bus = pd.DataFrame(index=buses)
for tech in connection_costs_per_link: for tech in connection_costs_per_link:
adj = n.adjacency_matrix(weights=pd.concat(dict(Link=connection_costs_per_link[tech].reindex(n.links.index), adj = n.adjacency_matrix(
Line=pd.Series(0., n.lines.index)))) weights=pd.concat(
dict(
Link=connection_costs_per_link[tech].reindex(n.links.index),
Line=pd.Series(0.0, n.lines.index),
)
)
)
costs_between_buses = dijkstra(adj, directed=False, indices=n.buses.index.get_indexer(buses)) costs_between_buses = dijkstra(
connection_costs_to_bus[tech] = costs_between_buses[np.arange(len(buses)), adj, directed=False, indices=n.buses.index.get_indexer(buses)
n.buses.index.get_indexer(busmap.loc[buses])] )
connection_costs_to_bus[tech] = costs_between_buses[
np.arange(len(buses)), n.buses.index.get_indexer(busmap.loc[buses])
]
return connection_costs_to_bus return connection_costs_to_bus
@ -179,17 +205,35 @@ def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, out
connection_costs = {} connection_costs = {}
for tech in connection_costs_to_bus: for tech in connection_costs_to_bus:
tech_b = n.generators.carrier == tech tech_b = n.generators.carrier == tech
costs = n.generators.loc[tech_b, "bus"].map(connection_costs_to_bus[tech]).loc[lambda s: s>0] costs = (
n.generators.loc[tech_b, "bus"]
.map(connection_costs_to_bus[tech])
.loc[lambda s: s > 0]
)
if not costs.empty: if not costs.empty:
n.generators.loc[costs.index, "capital_cost"] += costs n.generators.loc[costs.index, "capital_cost"] += costs
logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} " logger.info(
.format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems()))) "Displacing {} generator(s) and adding connection costs to capital_costs: {} ".format(
tech,
", ".join(
"{:.0f} Eur/MW/a for `{}`".format(d, b)
for b, d in costs.items()
),
)
)
connection_costs[tech] = costs connection_costs[tech] = costs
pd.DataFrame(connection_costs).to_csv(output.connection_costs) pd.DataFrame(connection_costs).to_csv(output.connection_costs)
def _aggregate_and_move_components(
def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, aggregate_one_ports={"Load", "StorageUnit"}): n,
busmap,
connection_costs_to_bus,
output,
aggregate_one_ports={"Load", "StorageUnit"},
aggregation_strategies=dict(),
exclude_carriers=None,
):
def replace_components(n, c, df, pnl): def replace_components(n, c, df, pnl):
n.mremove(c, n.df(c).index) n.mremove(c, n.df(c).index)
@ -200,7 +244,13 @@ def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, a
_adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output) _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output)
generators, generators_pnl = aggregategenerators(n, busmap, custom_strategies={'p_nom_min': np.sum}) _, generator_strategies = get_aggregation_strategies(aggregation_strategies)
carriers = set(n.generators.carrier) - set(exclude_carriers)
generators, generators_pnl = aggregategenerators(
n, busmap, carriers=carriers, custom_strategies=generator_strategies
)
replace_components(n, "Generator", generators, generators_pnl) replace_components(n, "Generator", generators, generators_pnl)
for one_port in aggregate_one_ports: for one_port in aggregate_one_ports:
@ -214,7 +264,7 @@ def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, a
n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)]) n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)])
def simplify_links(n, costs, config, output): def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
## Complex multi-node links are folded into end-points ## Complex multi-node links are folded into end-points
logger.info("Simplifying connected link components") logger.info("Simplifying connected link components")
@ -222,8 +272,10 @@ def simplify_links(n, costs, config, output):
return n, n.buses.index.to_series() return n, n.buses.index.to_series()
# Determine connected link components, ignore all links but DC # Determine connected link components, ignore all links but DC
adjacency_matrix = n.adjacency_matrix(branch_components=['Link'], adjacency_matrix = n.adjacency_matrix(
weights=dict(Link=(n.links.carrier == 'DC').astype(float))) branch_components=["Link"],
weights=dict(Link=(n.links.carrier == "DC").astype(float)),
)
_, labels = connected_components(adjacency_matrix, directed=False) _, labels = connected_components(adjacency_matrix, directed=False)
labels = pd.Series(labels, n.buses.index) labels = pd.Series(labels, n.buses.index)
@ -234,22 +286,23 @@ def simplify_links(n, costs, config, output):
nodes = frozenset(nodes) nodes = frozenset(nodes)
seen = set() seen = set()
supernodes = {m for m in nodes supernodes = {m for m in nodes if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)}
if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)}
for u in supernodes: for u in supernodes:
for m, ls in G.adj[u].items(): for m, ls in G.adj[u].items():
if m not in nodes or m in seen: continue if m not in nodes or m in seen:
continue
buses = [u, m] buses = [u, m]
links = [list(ls)] #[name for name in ls]] links = [list(ls)] # [name for name in ls]]
while m not in (supernodes | seen): while m not in (supernodes | seen):
seen.add(m) seen.add(m)
for m2, ls in G.adj[m].items(): for m2, ls in G.adj[m].items():
if m2 in seen or m2 == u: continue if m2 in seen or m2 == u:
continue
buses.append(m2) buses.append(m2)
links.append(list(ls)) # [name for name in ls]) links.append(list(ls)) # [name for name in ls])
break break
else: else:
# stub # stub
@ -262,81 +315,135 @@ def simplify_links(n, costs, config, output):
busmap = n.buses.index.to_series() busmap = n.buses.index.to_series()
connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config) connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config)
connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link)) connection_costs_to_bus = pd.DataFrame(
0.0, index=n.buses.index, columns=list(connection_costs_per_link)
)
for lbl in labels.value_counts().loc[lambda s: s > 2].index: for lbl in labels.value_counts().loc[lambda s: s > 2].index:
for b, buses, links in split_links(labels.index[labels == lbl]): for b, buses, links in split_links(labels.index[labels == lbl]):
if len(buses) <= 2: continue if len(buses) <= 2:
continue
logger.debug('nodes = {}'.format(labels.index[labels == lbl])) logger.debug("nodes = {}".format(labels.index[labels == lbl]))
logger.debug('b = {}\nbuses = {}\nlinks = {}'.format(b, buses, links)) logger.debug("b = {}\nbuses = {}\nlinks = {}".format(b, buses, links))
m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']], m = sp.spatial.distance_matrix(
n.buses.loc[buses[1:-1], ['x', 'y']]) n.buses.loc[b, ["x", "y"]], n.buses.loc[buses[1:-1], ["x", "y"]]
)
busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]] busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]]
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link, buses) connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(
n, busmap, costs, config, connection_costs_per_link, buses
)
all_links = [i for _, i in sum(links, [])] all_links = [i for _, i in sum(links, [])]
p_max_pu = config['links'].get('p_max_pu', 1.) p_max_pu = config["links"].get("p_max_pu", 1.0)
lengths = n.links.loc[all_links, 'length'] lengths = n.links.loc[all_links, "length"]
name = lengths.idxmax() + '+{}'.format(len(links) - 1) name = lengths.idxmax() + "+{}".format(len(links) - 1)
params = dict( params = dict(
carrier='DC', carrier="DC",
bus0=b[0], bus1=b[1], bus0=b[0],
length=sum(n.links.loc[[i for _, i in l], 'length'].mean() for l in links), bus1=b[1],
p_nom=min(n.links.loc[[i for _, i in l], 'p_nom'].sum() for l in links), length=sum(
underwater_fraction=sum(lengths/lengths.sum() * n.links.loc[all_links, 'underwater_fraction']), n.links.loc[[i for _, i in l], "length"].mean() for l in links
),
p_nom=min(n.links.loc[[i for _, i in l], "p_nom"].sum() for l in links),
underwater_fraction=sum(
lengths
/ lengths.sum()
* n.links.loc[all_links, "underwater_fraction"]
),
p_max_pu=p_max_pu, p_max_pu=p_max_pu,
p_min_pu=-p_max_pu, p_min_pu=-p_max_pu,
underground=False, underground=False,
under_construction=False under_construction=False,
) )
logger.info("Joining the links {} connecting the buses {} to simple link {}".format(", ".join(all_links), ", ".join(buses), name)) logger.info(
"Joining the links {} connecting the buses {} to simple link {}".format(
", ".join(all_links), ", ".join(buses), name
)
)
n.mremove("Link", all_links) n.mremove("Link", all_links)
static_attrs = n.components["Link"]["attrs"].loc[lambda df: df.static] static_attrs = n.components["Link"]["attrs"].loc[lambda df: df.static]
for attr, default in static_attrs.default.iteritems(): params.setdefault(attr, default) for attr, default in static_attrs.default.items():
params.setdefault(attr, default)
n.links.loc[name] = pd.Series(params) n.links.loc[name] = pd.Series(params)
# n.add("Link", **params) # n.add("Link", **params)
logger.debug("Collecting all components using the busmap") logger.debug("Collecting all components using the busmap")
_aggregate_and_move_components(n, busmap, connection_costs_to_bus, output) exclude_carriers = config["clustering"]["simplify_network"].get(
"exclude_carriers", []
)
_aggregate_and_move_components(
n,
busmap,
connection_costs_to_bus,
output,
aggregation_strategies=aggregation_strategies,
exclude_carriers=exclude_carriers,
)
return n, busmap return n, busmap
def remove_stubs(n, costs, config, output):
def remove_stubs(n, costs, config, output, aggregation_strategies=dict()):
logger.info("Removing stubs") logger.info("Removing stubs")
busmap = busmap_by_stubs(n) # ['country']) across_borders = config["clustering"]["simplify_network"].get("remove_stubs_across_borders", True)
matching_attrs = [] if across_borders else ['country']
busmap = busmap_by_stubs(n, matching_attrs)
connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap, costs, config) connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap, costs, config)
_aggregate_and_move_components(n, busmap, connection_costs_to_bus, output) exclude_carriers = config["clustering"]["simplify_network"].get(
"exclude_carriers", []
)
_aggregate_and_move_components(
n,
busmap,
connection_costs_to_bus,
output,
aggregation_strategies=aggregation_strategies,
exclude_carriers=exclude_carriers,
)
return n, busmap return n, busmap
def aggregate_to_substations(n, buses_i=None):
def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None):
# can be used to aggregate a selection of buses to electrically closest neighbors # can be used to aggregate a selection of buses to electrically closest neighbors
# if no buses are given, nodes that are no substations or without offshore connection are aggregated # if no buses are given, nodes that are no substations or without offshore connection are aggregated
if buses_i is None: if buses_i is None:
logger.info("Aggregating buses that are no substations or have no valid offshore connection") logger.info(
buses_i = list(set(n.buses.index)-set(n.generators.bus)-set(n.loads.bus)) "Aggregating buses that are no substations or have no valid offshore connection"
)
buses_i = list(set(n.buses.index) - set(n.generators.bus) - set(n.loads.bus))
weight = pd.concat({'Line': n.lines.length/n.lines.s_nom.clip(1e-3), weight = pd.concat(
'Link': n.links.length/n.links.p_nom.clip(1e-3)}) {
"Line": n.lines.length / n.lines.s_nom.clip(1e-3),
"Link": n.links.length / n.links.p_nom.clip(1e-3),
}
)
adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight) adj = n.adjacency_matrix(branch_components=["Line", "Link"], weights=weight)
bus_indexer = n.buses.index.get_indexer(buses_i) bus_indexer = n.buses.index.get_indexer(buses_i)
dist = pd.DataFrame(dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index) dist = pd.DataFrame(
dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index
)
dist[buses_i] = np.inf # bus in buses_i should not be assigned to different bus in buses_i dist[
buses_i
] = np.inf # bus in buses_i should not be assigned to different bus in buses_i
for c in n.buses.country.unique(): for c in n.buses.country.unique():
incountry_b = n.buses.country == c incountry_b = n.buses.country == c
@ -345,77 +452,150 @@ def aggregate_to_substations(n, buses_i=None):
busmap = n.buses.index.to_series() busmap = n.buses.index.to_series()
busmap.loc[buses_i] = dist.idxmin(1) busmap.loc[buses_i] = dist.idxmin(1)
clustering = get_clustering_from_busmap(n, busmap, bus_strategies, generator_strategies = get_aggregation_strategies(
bus_strategies=dict(country=_make_consense("Bus", "country")), aggregation_strategies
aggregate_generators_weighted=True, )
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
generator_strategies={'p_nom_max': 'sum'},
scale_link_capital_costs=False)
clustering = get_clustering_from_busmap(
n,
busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True,
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
generator_strategies=generator_strategies,
scale_link_capital_costs=False,
)
return clustering.network, busmap return clustering.network, busmap
def cluster(n, n_clusters, config): def cluster(
n, n_clusters, config, algorithm="hac", feature=None, aggregation_strategies=dict()
):
logger.info(f"Clustering to {n_clusters} buses") logger.info(f"Clustering to {n_clusters} buses")
focus_weights = config.get('focus_weights', None) focus_weights = config.get("focus_weights", None)
renewable_carriers = pd.Index([tech renewable_carriers = pd.Index(
for tech in n.generators.carrier.unique() [
if tech.split('-', 2)[0] in config['renewable']]) tech
def consense(x): for tech in n.generators.carrier.unique()
v = x.iat[0] if tech.split("-", 2)[0] in config["renewable"]
assert ((x == v).all() or x.isnull().all()), ( ]
"The `potential` configuration option must agree for all renewable carriers, for now!" )
)
return v clustering = clustering_for_n_clusters(
potential_mode = (consense(pd.Series([config['renewable'][tech]['potential'] n,
for tech in renewable_carriers])) n_clusters,
if len(renewable_carriers) > 0 else 'conservative') custom_busmap=False,
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False, potential_mode=potential_mode, aggregation_strategies=aggregation_strategies,
solver_name=config['solving']['solver']['name'], solver_name=config["solving"]["solver"]["name"],
focus_weights=focus_weights) algorithm=algorithm,
feature=feature,
focus_weights=focus_weights,
)
return clustering.network, clustering.busmap return clustering.network, clustering.busmap
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('simplify_network', simpl='', network='elec')
snakemake = mock_snakemake("simplify_network", simpl="")
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
aggregation_strategies = snakemake.config["clustering"].get(
"aggregation_strategies", {}
)
# translate str entries of aggregation_strategies to pd.Series functions:
aggregation_strategies = {
p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()}
for p in aggregation_strategies.keys()
}
n, trafo_map = simplify_network_to_380(n) n, trafo_map = simplify_network_to_380(n)
Nyears = n.snapshot_weightings.objective.sum() / 8760 Nyears = n.snapshot_weightings.objective.sum() / 8760
technology_costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears) technology_costs = load_costs(
snakemake.input.tech_costs,
snakemake.config["costs"],
snakemake.config["electricity"],
Nyears,
)
n, simplify_links_map = simplify_links(n, technology_costs, snakemake.config, snakemake.output) n, simplify_links_map = simplify_links(
n, technology_costs, snakemake.config, snakemake.output, aggregation_strategies
)
n, stub_map = remove_stubs(n, technology_costs, snakemake.config, snakemake.output) busmaps = [trafo_map, simplify_links_map]
busmaps = [trafo_map, simplify_links_map, stub_map] cluster_config = snakemake.config["clustering"]["simplify_network"]
if cluster_config.get("remove_stubs", True):
n, stub_map = remove_stubs(
n,
technology_costs,
snakemake.config,
snakemake.output,
aggregation_strategies=aggregation_strategies,
)
busmaps.append(stub_map)
if snakemake.config.get('clustering', {}).get('simplify', {}).get('to_substations', False): if cluster_config.get("to_substations", False):
n, substation_map = aggregate_to_substations(n) n, substation_map = aggregate_to_substations(n, aggregation_strategies)
busmaps.append(substation_map) busmaps.append(substation_map)
# treatment of outliers (nodes without a profile for considered carrier):
# all nodes that have no profile of the given carrier are being aggregated to closest neighbor
if (
snakemake.config.get("clustering", {})
.get("cluster_network", {})
.get("algorithm", "hac")
== "hac"
or cluster_config.get("algorithm", "hac") == "hac"
):
carriers = (
cluster_config.get("feature", "solar+onwind-time").split("-")[0].split("+")
)
for carrier in carriers:
buses_i = list(
set(n.buses.index) - set(n.generators.query("carrier == @carrier").bus)
)
logger.info(
f"clustering preparaton (hac): aggregating {len(buses_i)} buses of type {carrier}."
)
n, busmap_hac = aggregate_to_substations(n, aggregation_strategies, buses_i)
busmaps.append(busmap_hac)
if snakemake.wildcards.simpl: if snakemake.wildcards.simpl:
n, cluster_map = cluster(n, int(snakemake.wildcards.simpl), snakemake.config) n, cluster_map = cluster(
n,
int(snakemake.wildcards.simpl),
snakemake.config,
cluster_config.get("algorithm", "hac"),
cluster_config.get("feature", None),
aggregation_strategies,
)
busmaps.append(cluster_map) busmaps.append(cluster_map)
# some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed # some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed
# and are lost when clustering (for example with the simpl wildcard), we remove them for consistency: # and are lost when clustering (for example with the simpl wildcard), we remove them for consistency:
buses_c = {'symbol', 'tags', 'under_construction', 'substation_lv', 'substation_off'}.intersection(n.buses.columns) buses_c = {
"symbol",
"tags",
"under_construction",
"substation_lv",
"substation_off",
}.intersection(n.buses.columns)
n.buses = n.buses.drop(buses_c, axis=1) n.buses = n.buses.drop(buses_c, axis=1)
update_p_nom_max(n) update_p_nom_max(n)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output.network) n.export_to_netcdf(snakemake.output.network)
busmap_s = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) busmap_s = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0])

View File

@ -1,9 +1,11 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
""" """
Solves linear optimal power flow for a network iteratively while updating reactances. Solves linear optimal power flow for a network iteratively while updating
reactances.
Relevant Settings Relevant Settings
----------------- -----------------
@ -73,101 +75,123 @@ Details (and errors made through this heuristic) are discussed in the paper
The rule :mod:`solve_all_networks` runs The rule :mod:`solve_all_networks` runs
for all ``scenario`` s in the configuration file for all ``scenario`` s in the configuration file
the rule :mod:`solve_network`. the rule :mod:`solve_network`.
""" """
import logging import logging
from _helpers import configure_logging import re
from pathlib import Path
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import re
import pypsa import pypsa
from pypsa.linopf import (get_var, define_constraints, linexpr, join_exprs, from _helpers import configure_logging
network_lopf, ilopf) from pypsa.descriptors import get_switchable_as_dense as get_as_dense
from pypsa.linopf import (
from pathlib import Path define_constraints,
define_variables,
get_var,
ilopf,
join_exprs,
linexpr,
network_lopf,
)
from vresutils.benchmark import memory_logger from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def prepare_network(n, solve_opts): def prepare_network(n, solve_opts):
if "clip_p_max_pu" in solve_opts:
if 'clip_p_max_pu' in solve_opts:
for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow): for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow):
df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True) df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
if solve_opts.get('load_shedding'): load_shedding = solve_opts.get("load_shedding")
n.add("Carrier", "Load") if load_shedding:
n.add("Carrier", "load", color="#dd2e23", nice_name="Load shedding")
buses_i = n.buses.query("carrier == 'AC'").index buses_i = n.buses.query("carrier == 'AC'").index
n.madd("Generator", buses_i, " load", if not np.isscalar(load_shedding):
bus=buses_i, load_shedding = 1e2 # Eur/kWh
carrier='load', # intersect between macroeconomic and surveybased
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW # willingness to pay
marginal_cost=1e2, # Eur/kWh # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full)
# intersect between macroeconomic and surveybased n.madd(
# willingness to pay "Generator",
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full buses_i,
p_nom=1e9 # kW " load",
) bus=buses_i,
carrier="load",
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
marginal_cost=load_shedding,
p_nom=1e9, # kW
)
if solve_opts.get('noisy_costs'): if solve_opts.get("noisy_costs"):
for t in n.iterate_components(n.one_port_components): for t in n.iterate_components(n.one_port_components):
#if 'capital_cost' in t.df: # if 'capital_cost' in t.df:
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5) # t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
if 'marginal_cost' in t.df: if "marginal_cost" in t.df:
t.df['marginal_cost'] += (1e-2 + 2e-3 * t.df["marginal_cost"] += 1e-2 + 2e-3 * (
(np.random.random(len(t.df)) - 0.5)) np.random.random(len(t.df)) - 0.5
)
for t in n.iterate_components(['Line', 'Link']): for t in n.iterate_components(["Line", "Link"]):
t.df['capital_cost'] += (1e-1 + t.df["capital_cost"] += (
2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length'] 1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)
) * t.df["length"]
if solve_opts.get('nhours'): if solve_opts.get("nhours"):
nhours = solve_opts['nhours'] nhours = solve_opts["nhours"]
n.set_snapshots(n.snapshots[:nhours]) n.set_snapshots(n.snapshots[:nhours])
n.snapshot_weightings[:] = 8760. / nhours n.snapshot_weightings[:] = 8760.0 / nhours
return n return n
def add_CCL_constraints(n, config): def add_CCL_constraints(n, config):
agg_p_nom_limits = config['electricity'].get('agg_p_nom_limits') agg_p_nom_limits = config["electricity"].get("agg_p_nom_limits")
try: try:
agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, index_col=list(range(2)))
index_col=list(range(2)))
except IOError: except IOError:
logger.exception("Need to specify the path to a .csv file containing " logger.exception(
"aggregate capacity limits per country in " "Need to specify the path to a .csv file containing "
"config['electricity']['agg_p_nom_limit'].") "aggregate capacity limits per country in "
logger.info("Adding per carrier generation capacity constraints for " "config['electricity']['agg_p_nom_limit']."
"individual countries") )
logger.info(
"Adding per carrier generation capacity constraints for " "individual countries"
)
gen_country = n.generators.bus.map(n.buses.country) gen_country = n.generators.bus.map(n.buses.country)
# cc means country and carrier # cc means country and carrier
p_nom_per_cc = (pd.DataFrame( p_nom_per_cc = (
{'p_nom': linexpr((1, get_var(n, 'Generator', 'p_nom'))), pd.DataFrame(
'country': gen_country, 'carrier': n.generators.carrier}) {
.dropna(subset=['p_nom']) "p_nom": linexpr((1, get_var(n, "Generator", "p_nom"))),
.groupby(['country', 'carrier']).p_nom "country": gen_country,
.apply(join_exprs)) "carrier": n.generators.carrier,
minimum = agg_p_nom_minmax['min'].dropna() }
)
.dropna(subset=["p_nom"])
.groupby(["country", "carrier"])
.p_nom.apply(join_exprs)
)
minimum = agg_p_nom_minmax["min"].dropna()
if not minimum.empty: if not minimum.empty:
minconstraint = define_constraints(n, p_nom_per_cc[minimum.index], minconstraint = define_constraints(
'>=', minimum, 'agg_p_nom', 'min') n, p_nom_per_cc[minimum.index], ">=", minimum, "agg_p_nom", "min"
maximum = agg_p_nom_minmax['max'].dropna() )
maximum = agg_p_nom_minmax["max"].dropna()
if not maximum.empty: if not maximum.empty:
maxconstraint = define_constraints(n, p_nom_per_cc[maximum.index], maxconstraint = define_constraints(
'<=', maximum, 'agg_p_nom', 'max') n, p_nom_per_cc[maximum.index], "<=", maximum, "agg_p_nom", "max"
)
def add_EQ_constraints(n, o, scaling=1e-1): def add_EQ_constraints(n, o, scaling=1e-1):
float_regex = "[0-9]*\.?[0-9]+" float_regex = "[0-9]*\.?[0-9]+"
level = float(re.findall(float_regex, o)[0]) level = float(re.findall(float_regex, o)[0])
if o[-1] == 'c': if o[-1] == "c":
ggrouper = n.generators.bus.map(n.buses.country) ggrouper = n.generators.bus.map(n.buses.country)
lgrouper = n.loads.bus.map(n.buses.country) lgrouper = n.loads.bus.map(n.buses.country)
sgrouper = n.storage_units.bus.map(n.buses.country) sgrouper = n.storage_units.bus.map(n.buses.country)
@ -175,116 +199,239 @@ def add_EQ_constraints(n, o, scaling=1e-1):
ggrouper = n.generators.bus ggrouper = n.generators.bus
lgrouper = n.loads.bus lgrouper = n.loads.bus
sgrouper = n.storage_units.bus sgrouper = n.storage_units.bus
load = n.snapshot_weightings.generators @ \ load = (
n.loads_t.p_set.groupby(lgrouper, axis=1).sum() n.snapshot_weightings.generators
inflow = n.snapshot_weightings.stores @ \ @ n.loads_t.p_set.groupby(lgrouper, axis=1).sum()
n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum() )
inflow = inflow.reindex(load.index).fillna(0.) inflow = (
rhs = scaling * ( level * load - inflow ) n.snapshot_weightings.stores
lhs_gen = linexpr((n.snapshot_weightings.generators * scaling, @ n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum()
get_var(n, "Generator", "p").T) )
).T.groupby(ggrouper, axis=1).apply(join_exprs) inflow = inflow.reindex(load.index).fillna(0.0)
lhs_spill = linexpr((-n.snapshot_weightings.stores * scaling, rhs = scaling * (level * load - inflow)
get_var(n, "StorageUnit", "spill").T) lhs_gen = (
).T.groupby(sgrouper, axis=1).apply(join_exprs) linexpr(
(n.snapshot_weightings.generators * scaling, get_var(n, "Generator", "p").T)
)
.T.groupby(ggrouper, axis=1)
.apply(join_exprs)
)
lhs_spill = (
linexpr(
(
-n.snapshot_weightings.stores * scaling,
get_var(n, "StorageUnit", "spill").T,
)
)
.T.groupby(sgrouper, axis=1)
.apply(join_exprs)
)
lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("") lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("")
lhs = lhs_gen + lhs_spill lhs = lhs_gen + lhs_spill
define_constraints(n, lhs, ">=", rhs, "equity", "min") define_constraints(n, lhs, ">=", rhs, "equity", "min")
def add_BAU_constraints(n, config): def add_BAU_constraints(n, config):
mincaps = pd.Series(config['electricity']['BAU_mincapacities']) mincaps = pd.Series(config["electricity"]["BAU_mincapacities"])
lhs = (linexpr((1, get_var(n, 'Generator', 'p_nom'))) lhs = (
.groupby(n.generators.carrier).apply(join_exprs)) linexpr((1, get_var(n, "Generator", "p_nom")))
define_constraints(n, lhs, '>=', mincaps[lhs.index], 'Carrier', 'bau_mincaps') .groupby(n.generators.carrier)
.apply(join_exprs)
)
define_constraints(n, lhs, ">=", mincaps[lhs.index], "Carrier", "bau_mincaps")
def add_SAFE_constraints(n, config): def add_SAFE_constraints(n, config):
peakdemand = (1. + config['electricity']['SAFE_reservemargin']) *\ peakdemand = (
n.loads_t.p_set.sum(axis=1).max() 1.0 + config["electricity"]["SAFE_reservemargin"]
conv_techs = config['plotting']['conv_techs'] ) * n.loads_t.p_set.sum(axis=1).max()
exist_conv_caps = n.generators.query('~p_nom_extendable & carrier in @conv_techs')\ conv_techs = config["plotting"]["conv_techs"]
.p_nom.sum() exist_conv_caps = n.generators.query(
ext_gens_i = n.generators.query('carrier in @conv_techs & p_nom_extendable').index "~p_nom_extendable & carrier in @conv_techs"
lhs = linexpr((1, get_var(n, 'Generator', 'p_nom')[ext_gens_i])).sum() ).p_nom.sum()
ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index
lhs = linexpr((1, get_var(n, "Generator", "p_nom")[ext_gens_i])).sum()
rhs = peakdemand - exist_conv_caps rhs = peakdemand - exist_conv_caps
define_constraints(n, lhs, '>=', rhs, 'Safe', 'mintotalcap') define_constraints(n, lhs, ">=", rhs, "Safe", "mintotalcap")
def add_operational_reserve_margin_constraint(n, config):
reserve_config = config["electricity"]["operational_reserve"]
EPSILON_LOAD = reserve_config["epsilon_load"]
EPSILON_VRES = reserve_config["epsilon_vres"]
CONTINGENCY = reserve_config["contingency"]
# Reserve Variables
reserve = get_var(n, "Generator", "r")
lhs = linexpr((1, reserve)).sum(1)
# Share of extendable renewable capacities
ext_i = n.generators.query("p_nom_extendable").index
vres_i = n.generators_t.p_max_pu.columns
if not ext_i.empty and not vres_i.empty:
capacity_factor = n.generators_t.p_max_pu[vres_i.intersection(ext_i)]
renewable_capacity_variables = get_var(n, "Generator", "p_nom")[
vres_i.intersection(ext_i)
]
lhs += linexpr(
(-EPSILON_VRES * capacity_factor, renewable_capacity_variables)
).sum(1)
# Total demand at t
demand = n.loads_t.p_set.sum(1)
# VRES potential of non extendable generators
capacity_factor = n.generators_t.p_max_pu[vres_i.difference(ext_i)]
renewable_capacity = n.generators.p_nom[vres_i.difference(ext_i)]
potential = (capacity_factor * renewable_capacity).sum(1)
# Right-hand-side
rhs = EPSILON_LOAD * demand + EPSILON_VRES * potential + CONTINGENCY
define_constraints(n, lhs, ">=", rhs, "Reserve margin")
def update_capacity_constraint(n):
gen_i = n.generators.index
ext_i = n.generators.query("p_nom_extendable").index
fix_i = n.generators.query("not p_nom_extendable").index
dispatch = get_var(n, "Generator", "p")
reserve = get_var(n, "Generator", "r")
capacity_fixed = n.generators.p_nom[fix_i]
p_max_pu = get_as_dense(n, "Generator", "p_max_pu")
lhs = linexpr((1, dispatch), (1, reserve))
if not ext_i.empty:
capacity_variable = get_var(n, "Generator", "p_nom")
lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex(
columns=gen_i, fill_value=""
)
rhs = (p_max_pu[fix_i] * capacity_fixed).reindex(columns=gen_i, fill_value=0)
define_constraints(n, lhs, "<=", rhs, "Generators", "updated_capacity_constraint")
def add_operational_reserve_margin(n, sns, config):
"""
Build reserve margin constraints based on the formulation given in
https://genxproject.github.io/GenX/dev/core/#Reserves.
"""
define_variables(n, 0, np.inf, "Generator", "r", axes=[sns, n.generators.index])
add_operational_reserve_margin_constraint(n, config)
update_capacity_constraint(n)
def add_battery_constraints(n): def add_battery_constraints(n):
nodes = n.buses.index[n.buses.carrier == "battery"] nodes = n.buses.index[n.buses.carrier == "battery"]
if nodes.empty or ('Link', 'p_nom') not in n.variables.index: if nodes.empty or ("Link", "p_nom") not in n.variables.index:
return return
link_p_nom = get_var(n, "Link", "p_nom") link_p_nom = get_var(n, "Link", "p_nom")
lhs = linexpr((1,link_p_nom[nodes + " charger"]), lhs = linexpr(
(-n.links.loc[nodes + " discharger", "efficiency"].values, (1, link_p_nom[nodes + " charger"]),
link_p_nom[nodes + " discharger"].values)) (
define_constraints(n, lhs, "=", 0, 'Link', 'charger_ratio') -n.links.loc[nodes + " discharger", "efficiency"].values,
link_p_nom[nodes + " discharger"].values,
),
)
define_constraints(n, lhs, "=", 0, "Link", "charger_ratio")
def extra_functionality(n, snapshots): def extra_functionality(n, snapshots):
""" """
Collects supplementary constraints which will be passed to ``pypsa.linopf.network_lopf``. Collects supplementary constraints which will be passed to
If you want to enforce additional custom constraints, this is a good location to add them. ``pypsa.linopf.network_lopf``.
The arguments ``opts`` and ``snakemake.config`` are expected to be attached to the network.
If you want to enforce additional custom constraints, this is a good
location to add them. The arguments ``opts`` and
``snakemake.config`` are expected to be attached to the network.
""" """
opts = n.opts opts = n.opts
config = n.config config = n.config
if 'BAU' in opts and n.generators.p_nom_extendable.any(): if "BAU" in opts and n.generators.p_nom_extendable.any():
add_BAU_constraints(n, config) add_BAU_constraints(n, config)
if 'SAFE' in opts and n.generators.p_nom_extendable.any(): if "SAFE" in opts and n.generators.p_nom_extendable.any():
add_SAFE_constraints(n, config) add_SAFE_constraints(n, config)
if 'CCL' in opts and n.generators.p_nom_extendable.any(): if "CCL" in opts and n.generators.p_nom_extendable.any():
add_CCL_constraints(n, config) add_CCL_constraints(n, config)
reserve = config["electricity"].get("operational_reserve", {})
if reserve.get("activate"):
add_operational_reserve_margin(n, snapshots, config)
for o in opts: for o in opts:
if "EQ" in o: if "EQ" in o:
add_EQ_constraints(n, o) add_EQ_constraints(n, o)
add_battery_constraints(n) add_battery_constraints(n)
def solve_network(n, config, opts='', **kwargs): def solve_network(n, config, opts="", **kwargs):
solver_options = config['solving']['solver'].copy() solver_options = config["solving"]["solver"].copy()
solver_name = solver_options.pop('name') solver_name = solver_options.pop("name")
cf_solving = config['solving']['options'] cf_solving = config["solving"]["options"]
track_iterations = cf_solving.get('track_iterations', False) track_iterations = cf_solving.get("track_iterations", False)
min_iterations = cf_solving.get('min_iterations', 4) min_iterations = cf_solving.get("min_iterations", 4)
max_iterations = cf_solving.get('max_iterations', 6) max_iterations = cf_solving.get("max_iterations", 6)
# add to network for extra_functionality # add to network for extra_functionality
n.config = config n.config = config
n.opts = opts n.opts = opts
if cf_solving.get('skip_iterations', False): skip_iterations = cf_solving.get("skip_iterations", False)
network_lopf(n, solver_name=solver_name, solver_options=solver_options, if not n.lines.s_nom_extendable.any():
extra_functionality=extra_functionality, **kwargs) skip_iterations = True
logger.info("No expandable lines found. Skipping iterative solving.")
if skip_iterations:
network_lopf(
n, solver_name=solver_name, solver_options=solver_options, **kwargs
)
else: else:
ilopf(n, solver_name=solver_name, solver_options=solver_options, ilopf(
track_iterations=track_iterations, n,
min_iterations=min_iterations, solver_name=solver_name,
max_iterations=max_iterations, solver_options=solver_options,
extra_functionality=extra_functionality, **kwargs) track_iterations=track_iterations,
min_iterations=min_iterations,
max_iterations=max_iterations,
**kwargs
)
return n return n
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('solve_network', network='elec', simpl='',
clusters='5', ll='copt', opts='Co2L-BAU-CCL-24H') snakemake = mock_snakemake(
"solve_network", simpl="", clusters="5", ll="copt", opts="Co2L-BAU-CCL-24H"
)
configure_logging(snakemake) configure_logging(snakemake)
tmpdir = snakemake.config['solving'].get('tmpdir') tmpdir = snakemake.config["solving"].get("tmpdir")
if tmpdir is not None: if tmpdir is not None:
Path(tmpdir).mkdir(parents=True, exist_ok=True) Path(tmpdir).mkdir(parents=True, exist_ok=True)
opts = snakemake.wildcards.opts.split('-') opts = snakemake.wildcards.opts.split("-")
solve_opts = snakemake.config['solving']['options'] solve_opts = snakemake.config["solving"]["options"]
fn = getattr(snakemake.log, 'memory', None) fn = getattr(snakemake.log, "memory", None)
with memory_logger(filename=fn, interval=30.) as mem: with memory_logger(filename=fn, interval=30.0) as mem:
n = pypsa.Network(snakemake.input[0]) n = pypsa.Network(snakemake.input[0])
n = prepare_network(n, solve_opts) n = prepare_network(n, solve_opts)
n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir, n = solve_network(
solver_logfile=snakemake.log.solver) n,
snakemake.config,
opts,
extra_functionality=extra_functionality,
solver_dir=tmpdir,
solver_logfile=snakemake.log.solver,
)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])
logger.info("Maximum memory usage: {}".format(mem.mem_usage)) logger.info("Maximum memory usage: {}".format(mem.mem_usage))

View File

@ -1,10 +1,11 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
""" """
Solves linear optimal dispatch in hourly resolution Solves linear optimal dispatch in hourly resolution using the capacities of
using the capacities of previous capacity expansion in rule :mod:`solve_network`. previous capacity expansion in rule :mod:`solve_network`.
Relevant Settings Relevant Settings
----------------- -----------------
@ -42,65 +43,80 @@ Outputs
Description Description
----------- -----------
""" """
import logging import logging
from _helpers import configure_logging
import pypsa
import numpy as np
from pathlib import Path from pathlib import Path
import numpy as np
import pypsa
from _helpers import configure_logging
from solve_network import prepare_network, solve_network
from vresutils.benchmark import memory_logger from vresutils.benchmark import memory_logger
from solve_network import solve_network, prepare_network
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def set_parameters_from_optimized(n, n_optim):
lines_typed_i = n.lines.index[n.lines.type != '']
n.lines.loc[lines_typed_i, 'num_parallel'] = \
n_optim.lines['num_parallel'].reindex(lines_typed_i, fill_value=0.)
n.lines.loc[lines_typed_i, 's_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel)
lines_untyped_i = n.lines.index[n.lines.type == ''] def set_parameters_from_optimized(n, n_optim):
for attr in ('s_nom', 'r', 'x'): lines_typed_i = n.lines.index[n.lines.type != ""]
n.lines.loc[lines_untyped_i, attr] = \ n.lines.loc[lines_typed_i, "num_parallel"] = n_optim.lines["num_parallel"].reindex(
n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.) lines_typed_i, fill_value=0.0
n.lines['s_nom_extendable'] = False )
n.lines.loc[lines_typed_i, "s_nom"] = (
np.sqrt(3)
* n.lines["type"].map(n.line_types.i_nom)
* n.lines.bus0.map(n.buses.v_nom)
* n.lines.num_parallel
)
lines_untyped_i = n.lines.index[n.lines.type == ""]
for attr in ("s_nom", "r", "x"):
n.lines.loc[lines_untyped_i, attr] = n_optim.lines[attr].reindex(
lines_untyped_i, fill_value=0.0
)
n.lines["s_nom_extendable"] = False
links_dc_i = n.links.index[n.links.p_nom_extendable] links_dc_i = n.links.index[n.links.p_nom_extendable]
n.links.loc[links_dc_i, 'p_nom'] = \ n.links.loc[links_dc_i, "p_nom"] = n_optim.links["p_nom_opt"].reindex(
n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.) links_dc_i, fill_value=0.0
n.links.loc[links_dc_i, 'p_nom_extendable'] = False )
n.links.loc[links_dc_i, "p_nom_extendable"] = False
gen_extend_i = n.generators.index[n.generators.p_nom_extendable] gen_extend_i = n.generators.index[n.generators.p_nom_extendable]
n.generators.loc[gen_extend_i, 'p_nom'] = \ n.generators.loc[gen_extend_i, "p_nom"] = n_optim.generators["p_nom_opt"].reindex(
n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.) gen_extend_i, fill_value=0.0
n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False )
n.generators.loc[gen_extend_i, "p_nom_extendable"] = False
stor_units_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable] stor_units_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable]
n.storage_units.loc[stor_units_extend_i, 'p_nom'] = \ n.storage_units.loc[stor_units_extend_i, "p_nom"] = n_optim.storage_units[
n_optim.storage_units['p_nom_opt'].reindex(stor_units_extend_i, fill_value=0.) "p_nom_opt"
n.storage_units.loc[stor_units_extend_i, 'p_nom_extendable'] = False ].reindex(stor_units_extend_i, fill_value=0.0)
n.storage_units.loc[stor_units_extend_i, "p_nom_extendable"] = False
stor_extend_i = n.stores.index[n.stores.e_nom_extendable] stor_extend_i = n.stores.index[n.stores.e_nom_extendable]
n.stores.loc[stor_extend_i, 'e_nom'] = \ n.stores.loc[stor_extend_i, "e_nom"] = n_optim.stores["e_nom_opt"].reindex(
n_optim.stores['e_nom_opt'].reindex(stor_extend_i, fill_value=0.) stor_extend_i, fill_value=0.0
n.stores.loc[stor_extend_i, 'e_nom_extendable'] = False )
n.stores.loc[stor_extend_i, "e_nom_extendable"] = False
return n return n
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('solve_operations_network', network='elec',
simpl='', clusters='5', ll='copt', opts='Co2L-BAU-24H') snakemake = mock_snakemake(
"solve_operations_network",
simpl="",
clusters="5",
ll="copt",
opts="Co2L-BAU-24H",
)
configure_logging(snakemake) configure_logging(snakemake)
tmpdir = snakemake.config['solving'].get('tmpdir') tmpdir = snakemake.config["solving"].get("tmpdir")
if tmpdir is not None: if tmpdir is not None:
Path(tmpdir).mkdir(parents=True, exist_ok=True) Path(tmpdir).mkdir(parents=True, exist_ok=True)
@ -109,14 +125,20 @@ if __name__ == "__main__":
n = set_parameters_from_optimized(n, n_optim) n = set_parameters_from_optimized(n, n_optim)
del n_optim del n_optim
opts = snakemake.wildcards.opts.split('-') opts = snakemake.wildcards.opts.split("-")
snakemake.config['solving']['options']['skip_iterations'] = False snakemake.config["solving"]["options"]["skip_iterations"] = False
fn = getattr(snakemake.log, 'memory', None) fn = getattr(snakemake.log, "memory", None)
with memory_logger(filename=fn, interval=30.) as mem: with memory_logger(filename=fn, interval=30.0) as mem:
n = prepare_network(n, snakemake.config['solving']['options']) n = prepare_network(n, snakemake.config["solving"]["options"])
n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir, n = solve_network(
solver_logfile=snakemake.log.solver) n,
snakemake.config,
opts,
solver_dir=tmpdir,
solver_logfile=snakemake.log.solver,
)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])
logger.info("Maximum memory usage: {}".format(mem.mem_usage)) logger.info("Maximum memory usage: {}".format(mem.mem_usage))

View File

@ -1,14 +1,15 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
# #
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
version: 0.4.0 version: 0.6.1
tutorial: true tutorial: true
logging: logging:
level: INFO level: INFO
format: '%(levelname)s:%(name)s:%(message)s' format: '%(levelname)s:%(name)s:%(message)s'
summary_dir: results run:
name: ""
scenario: scenario:
simpl: [''] simpl: ['']
@ -16,11 +17,7 @@ scenario:
clusters: [5] clusters: [5]
opts: [Co2L-24H] opts: [Co2L-24H]
countries: ['DE'] countries: ['BE']
clustering:
simplify:
to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections)
snapshots: snapshots:
start: "2013-03-01" start: "2013-03-01"
@ -30,6 +27,7 @@ snapshots:
enable: enable:
prepare_links_p_nom: false prepare_links_p_nom: false
retrieve_databundle: true retrieve_databundle: true
retrieve_cost_data: true
build_cutout: false build_cutout: false
retrieve_cutout: true retrieve_cutout: true
build_natura_raster: false build_natura_raster: false
@ -56,8 +54,9 @@ electricity:
atlite: atlite:
nprocesses: 4 nprocesses: 4
show_progress: false # false saves time
cutouts: cutouts:
europe-2013-era5-tutorial: be-03-2013-era5:
module: era5 module: era5
x: [4., 15.] x: [4., 15.]
y: [46., 56.] y: [46., 56.]
@ -65,7 +64,7 @@ atlite:
renewable: renewable:
onwind: onwind:
cutout: europe-2013-era5-tutorial cutout: be-03-2013-era5
resource: resource:
method: wind method: wind
turbine: Vestas_V112_3MW turbine: Vestas_V112_3MW
@ -74,15 +73,15 @@ renewable:
corine: corine:
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
# development of the REMix model and application for Europe. ( p.42 / p.28) # development of the REMix model and application for Europe. ( p.42 / p.28)
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000 distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6] distance_grid_codes: [1, 2, 3, 4, 5, 6]
natura: true natura: true
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
offwind-ac: offwind-ac:
cutout: europe-2013-era5-tutorial cutout: be-03-2013-era5
resource: resource:
method: wind method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore turbine: NREL_ReferenceTurbine_5MW_offshore
@ -90,11 +89,13 @@ renewable:
# correction_factor: 0.93 # correction_factor: 0.93
corine: [44, 255] corine: [44, 255]
natura: true natura: true
ship_threshold: 400
max_shore_distance: 30000 max_shore_distance: 30000
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
offwind-dc: offwind-dc:
cutout: europe-2013-era5-tutorial cutout: be-03-2013-era5
resource: resource:
method: wind method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore turbine: NREL_ReferenceTurbine_5MW_offshore
@ -103,11 +104,13 @@ renewable:
# correction_factor: 0.93 # correction_factor: 0.93
corine: [44, 255] corine: [44, 255]
natura: true natura: true
ship_threshold: 400
min_shore_distance: 30000 min_shore_distance: 30000
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
solar: solar:
cutout: europe-2013-era5-tutorial cutout: be-03-2013-era5
resource: resource:
method: pv method: pv
panel: CSi panel: CSi
@ -121,9 +124,9 @@ renewable:
# sector: The economic potential of photovoltaics and concentrating solar # sector: The economic potential of photovoltaics and concentrating solar
# power." Applied Energy 135 (2014): 704-720. # power." Applied Energy 135 (2014): 704-720.
correction_factor: 0.854337 correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true natura: true
excluder_resolution: 200
potential: simple # or conservative potential: simple # or conservative
clip_p_max_pu: 1.e-2 clip_p_max_pu: 1.e-2
@ -149,7 +152,7 @@ transformers:
type: '' type: ''
load: load:
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false manual_adjustments: true # false
@ -157,8 +160,17 @@ load:
costs: costs:
year: 2030 year: 2030
discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016 version: v0.4.0
USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html rooftop_share: 0.14
fill_values:
FOM: 0
VOM: 0
efficiency: 1
fuel: 0
investment: 0
lifetime: 25
"CO2 intensity": 0
"discount rate": 0.07
marginal_cost: marginal_cost:
solar: 0.01 solar: 0.01
onwind: 0.015 onwind: 0.015
@ -168,6 +180,26 @@ costs:
emission_prices: # only used with the option Ep emission_prices: # only used with the option Ep
co2: 0. co2: 0.
clustering:
simplify_network:
to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections)
algorithm: kmeans # choose from: [hac, kmeans]
feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc.
cluster_network:
algorithm: kmeans
feature: solar+onwind-time
exclude_carriers: ["OCGT", "offwind-ac", "coal"]
aggregation_strategies:
generators:
p_nom_max: sum # use "min" for more conservative assumptions
p_nom_min: sum
p_min_pu: mean
marginal_cost: mean
committable: any
ramp_limit_up: max
ramp_limit_down: max
efficiency: mean
solving: solving:
options: options:
formulation: kirchhoff formulation: kirchhoff
@ -200,7 +232,7 @@ solving:
plotting: plotting:
map: map:
figsize: [7, 7] figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72] boundaries: [-10.2, 29, 35, 72]
p_nom: p_nom:
bus_size_factor: 5.e+4 bus_size_factor: 5.e+4
linewidth_factor: 3.e+3 linewidth_factor: 3.e+3
@ -219,50 +251,50 @@ plotting:
AC_carriers: ["AC line", "AC transformer"] AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"] link_carriers: ["DC line", "Converter AC-DC"]
tech_colors: tech_colors:
"onwind" : "#235ebc" "onwind": "#235ebc"
"onshore wind" : "#235ebc" "onshore wind": "#235ebc"
'offwind' : "#6895dd" 'offwind': "#6895dd"
'offwind-ac' : "#6895dd" 'offwind-ac': "#6895dd"
'offshore wind' : "#6895dd" 'offshore wind': "#6895dd"
'offshore wind ac' : "#6895dd" 'offshore wind ac': "#6895dd"
'offwind-dc' : "#74c6f2" 'offwind-dc': "#74c6f2"
'offshore wind dc' : "#74c6f2" 'offshore wind dc': "#74c6f2"
"hydro" : "#08ad97" "hydro": "#08ad97"
"hydro+PHS" : "#08ad97" "hydro+PHS": "#08ad97"
"PHS" : "#08ad97" "PHS": "#08ad97"
"hydro reservoir" : "#08ad97" "hydro reservoir": "#08ad97"
'hydroelectricity' : '#08ad97' 'hydroelectricity': '#08ad97'
"ror" : "#4adbc8" "ror": "#4adbc8"
"run of river" : "#4adbc8" "run of river": "#4adbc8"
'solar' : "#f9d002" 'solar': "#f9d002"
'solar PV' : "#f9d002" 'solar PV': "#f9d002"
'solar thermal' : '#ffef60' 'solar thermal': '#ffef60'
'biomass' : '#0c6013' 'biomass': '#0c6013'
'solid biomass' : '#06540d' 'solid biomass': '#06540d'
'biogas' : '#23932d' 'biogas': '#23932d'
'waste' : '#68896b' 'waste': '#68896b'
'geothermal' : '#ba91b1' 'geothermal': '#ba91b1'
"OCGT" : "#d35050" "OCGT": "#d35050"
"gas" : "#d35050" "gas": "#d35050"
"natural gas" : "#d35050" "natural gas": "#d35050"
"CCGT" : "#b20101" "CCGT": "#b20101"
"nuclear" : "#ff9000" "nuclear": "#ff9000"
"coal" : "#707070" "coal": "#707070"
"lignite" : "#9e5a01" "lignite": "#9e5a01"
"oil" : "#262626" "oil": "#262626"
"H2" : "#ea048a" "H2": "#ea048a"
"hydrogen storage" : "#ea048a" "hydrogen storage": "#ea048a"
"battery" : "#b8ea04" "battery": "#b8ea04"
"Electric load" : "#f9d002" "Electric load": "#f9d002"
"electricity" : "#f9d002" "electricity": "#f9d002"
"lines" : "#70af1d" "lines": "#70af1d"
"transmission lines" : "#70af1d" "transmission lines": "#70af1d"
"AC-AC" : "#70af1d" "AC-AC": "#70af1d"
"AC line" : "#70af1d" "AC line": "#70af1d"
"links" : "#8a1caf" "links": "#8a1caf"
"HVDC links" : "#8a1caf" "HVDC links": "#8a1caf"
"DC-DC" : "#8a1caf" "DC-DC": "#8a1caf"
"DC link" : "#8a1caf" "DC link": "#8a1caf"
nice_names: nice_names:
OCGT: "Open-Cycle Gas" OCGT: "Open-Cycle Gas"
CCGT: "Combined-Cycle Gas" CCGT: "Combined-Cycle Gas"