diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..19a99c9b --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +# Exclude pre-commit applications +5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 +92080b1cd2ca5f123158571481722767b99c2b27 diff --git a/.gitattributes b/.gitattributes index 6d21b21b..9e85b38b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 5b64d242..d8c04382 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,5 +1,5 @@ blank_issues_enabled: false contact_links: - - name: PyPSA Mailing List - url: https://groups.google.com/forum/#!forum/pypsa - about: Please ask and answer general usage questions here. +- name: PyPSA Mailing List + url: https://groups.google.com/forum/#!forum/pypsa + about: Please ask and answer general usage questions here. diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c753deab..8b888cbe 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -16,7 +16,7 @@ on: branches: - master schedule: - - cron: "0 5 * * TUE" + - cron: "0 5 * * TUE" env: CACHE_NUMBER: 1 # Change this value to manually reset the environment cache @@ -28,63 +28,73 @@ jobs: matrix: include: # Matrix required to handle caching with Mambaforge - - os: ubuntu-latest - label: ubuntu-latest - prefix: /usr/share/miniconda3/envs/pypsa-eur + - os: ubuntu-latest + label: ubuntu-latest + prefix: /usr/share/miniconda3/envs/pypsa-eur - - os: macos-latest - label: macos-latest - prefix: /Users/runner/miniconda3/envs/pypsa-eur + - os: macos-latest + label: macos-latest + prefix: /Users/runner/miniconda3/envs/pypsa-eur - - os: windows-latest - label: windows-latest - prefix: C:\Miniconda3\envs\pypsa-eur + - os: windows-latest + label: windows-latest + prefix: C:\Miniconda3\envs\pypsa-eur name: ${{ matrix.label }} runs-on: ${{ matrix.os }} - + defaults: run: shell: bash -l {0} - + steps: - - uses: actions/checkout@v2 - - - name: Setup secrets - run: | - echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc + - uses: actions/checkout@v2 - - name: Add solver to environment - run: | - echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml + - name: Setup secrets + run: | + echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc - - name: Setup Mambaforge - uses: conda-incubator/setup-miniconda@v2 - with: - miniforge-variant: Mambaforge - miniforge-version: latest - activate-environment: pypsa-eur - use-mamba: true - - - name: Set cache date - run: echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV + - name: Add solver to environment + run: | + echo -e "- glpk\n- ipopt" >> envs/environment.yaml - - name: Create environment cache - uses: actions/cache@v2 - id: cache - with: - path: ${{ matrix.prefix }} - key: ${{ matrix.label }}-conda-${{ hashFiles('envs/environment.yaml') }}-${{ env.DATE }}-${{ env.CACHE_NUMBER }} + - name: Add solver to environment + run: | + echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml + if: ${{ matrix.label }} == 'windows-latest' - - name: Update environment due to outdated or unavailable cache - run: mamba env update -n pypsa-eur -f envs/environment.yaml - if: steps.cache.outputs.cache-hit != 'true' + - name: Add solver to environment + run: | + echo -e "- glpk\n- ipopt" >> envs/environment.yaml + if: ${{ matrix.label }} != 'windows-latest' - - name: Test snakemake workflow - run: | - conda activate pypsa-eur - conda list - cp test/config.test1.yaml config.yaml - snakemake --cores all solve_all_networks - rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results + - name: Setup Mambaforge + uses: conda-incubator/setup-miniconda@v2 + with: + miniforge-variant: Mambaforge + miniforge-version: latest + activate-environment: pypsa-eur + use-mamba: true + + - name: Set cache date + run: echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV + + - name: Create environment cache + uses: actions/cache@v2 + id: cache + with: + path: ${{ matrix.prefix }} + key: ${{ matrix.label }}-conda-${{ hashFiles('envs/environment.yaml') }}-${{ env.DATE }}-${{ env.CACHE_NUMBER }} + + - name: Update environment due to outdated or unavailable cache + run: mamba env update -n pypsa-eur -f envs/environment.yaml + if: steps.cache.outputs.cache-hit != 'true' + + - name: Test snakemake workflow + run: | + conda activate pypsa-eur + conda list + cp test/config.test1.yaml config.yaml + snakemake --cores all solve_all_networks + rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results diff --git a/.gitignore b/.gitignore index b4734ab2..80f91408 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -19,6 +19,7 @@ gurobi.log /data /data/links_p_nom.csv /cutouts +/dask-worker-space doc/_build diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..bb48c2db --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,92 @@ +# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 +exclude: "^LICENSES" + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-merge-conflict + - id: end-of-file-fixer + - id: fix-encoding-pragma + - id: mixed-line-ending + - id: trailing-whitespace + - id: check-added-large-files + args: ["--maxkb=2000"] + + # Sort package imports alphabetically +- repo: https://github.com/PyCQA/isort + rev: 5.11.4 + hooks: + - id: isort + args: ["--profile", "black", "--filter-files"] + + # Convert relative imports to absolute imports +- repo: https://github.com/MarcoGorelli/absolufy-imports + rev: v0.3.1 + hooks: + - id: absolufy-imports + + # Find common spelling mistakes in comments and docstrings +- repo: https://github.com/codespell-project/codespell + rev: v2.2.2 + hooks: + - id: codespell + args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom'] # Ignore capital case words, e.g. country codes + types_or: [python, rst, markdown] + files: ^(scripts|doc)/ + + # Make docstrings PEP 257 compliant +- repo: https://github.com/PyCQA/docformatter + rev: v1.5.1 + hooks: + - id: docformatter + args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"] + +- repo: https://github.com/keewis/blackdoc + rev: v0.3.8 + hooks: + - id: blackdoc + + # Formatting with "black" coding style +- repo: https://github.com/psf/black + rev: 22.12.0 + hooks: + # Format Python files + - id: black + # Format Jupyter Python notebooks + - id: black-jupyter + + # Remove output from Jupyter notebooks +- repo: https://github.com/aflc/pre-commit-jupyter + rev: v1.2.1 + hooks: + - id: jupyter-notebook-cleanup + args: ["--remove-kernel-metadata"] + + # Do YAML formatting (before the linter checks it for misses) +- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks + rev: v2.5.0 + hooks: + - id: pretty-format-yaml + args: [--autofix, --indent, "2", --preserve-quotes] + + # Format Snakemake rule / workflow files +- repo: https://github.com/snakemake/snakefmt + rev: v0.8.0 + hooks: + - id: snakefmt + + # For cleaning jupyter notebooks +- repo: https://github.com/aflc/pre-commit-jupyter + rev: v1.2.1 + hooks: + - id: jupyter-notebook-cleanup + exclude: examples/solve-on-remote.ipynb + +# Check for FSFE REUSE compliance (licensing) +- repo: https://github.com/fsfe/reuse-tool + rev: v1.1.0 + hooks: + - id: reuse diff --git a/.readthedocs.yml b/.readthedocs.yml index d6b81a40..4290ae6f 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -7,5 +7,5 @@ version: 2 python: version: 3.8 install: - - requirements: doc/requirements.txt + - requirements: doc/requirements.txt system_packages: true diff --git a/.reuse/dep5 b/.reuse/dep5 index eb64a172..8ddff0f7 100644 --- a/.reuse/dep5 +++ b/.reuse/dep5 @@ -4,15 +4,15 @@ Upstream-Contact: Tom Brown Source: https://github.com/pypsa/pypsa-eur Files: doc/img/* -Copyright: 2019 Fabian Neumann (KIT) +Copyright: 2019 Fabian Neumann (TUB, KIT) License: CC-BY-4.0 Files: doc/configtables/* -Copyright: 2019 Fabian Neumann (KIT) +Copyright: 2019 Fabian Neumann (TUB, KIT) License: CC-BY-4.0 Files: data/* -Copyright: 2017-2020 The PyPSA-Eur Authors +Copyright: 2017-2022 The PyPSA-Eur Authors License: CC-BY-4.0 Files: .github/* @@ -20,9 +20,9 @@ Copyright: 2019 The PyPSA-Eur Authors License: CC0-1.0 Files: matplotlibrc -Copyright: : 2017-2020 The PyPSA-Eur Authors +Copyright: : 2017-2022 The PyPSA-Eur Authors License: CC0-1.0 Files: borg-it -Copyright: : 2017-2020 The PyPSA-Eur Authors +Copyright: : 2017-2022 The PyPSA-Eur Authors License: CC0-1.0 diff --git a/.syncignore-receive b/.syncignore-receive index 717245c3..8a9f7d10 100644 --- a/.syncignore-receive +++ b/.syncignore-receive @@ -16,4 +16,4 @@ notebooks doc cutouts data/bundle -*.nc \ No newline at end of file +*.nc diff --git a/CITATION.cff b/CITATION.cff index a28562fd..57cc590f 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -6,7 +6,7 @@ cff-version: 1.1.0 message: "If you use this package, please cite the corresponding manuscript in Energy Strategy Reviews." title: "PyPSA-Eur: An open optimisation model of the European transmission system" repository: https://github.com/pypsa/pypsa-eur -version: 0.4.0 +version: 0.6.1 license: MIT journal: Energy Strategy Reviews doi: 10.1016/j.esr.2018.08.012 diff --git a/LICENSES/MIT.txt b/LICENSES/MIT.txt index dc10fd32..ef8a01cb 100644 --- a/LICENSES/MIT.txt +++ b/LICENSES/MIT.txt @@ -1,6 +1,6 @@ MIT License -Copyright 2017-2021 The PyPSA-Eur Authors +Copyright 2017-2022 The PyPSA-Eur Authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.md b/README.md index a603badd..27b9c592 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ @@ -8,7 +8,6 @@ SPDX-License-Identifier: CC-BY-4.0 [![Documentation](https://readthedocs.org/projects/pypsa-eur/badge/?version=latest)](https://pypsa-eur.readthedocs.io/en/latest/?badge=latest) ![Size](https://img.shields.io/github/repo-size/pypsa/pypsa-eur) [![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg)](https://doi.org/10.5281/zenodo.3520874) -[![Gitter](https://badges.gitter.im/PyPSA/community.svg)](https://gitter.im/PyPSA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![Snakemake](https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io) [![REUSE status](https://api.reuse.software/badge/github.com/pypsa/pypsa-eur)](https://api.reuse.software/info/github.com/pypsa/pypsa-eur) @@ -38,9 +37,7 @@ curtailment. We recommend to cluster the network to a couple of hundred nodes to remove these local inconsistencies. See the discussion in Section 3.4 "Model validation" of the paper. -![PyPSA-Eur Grid Model](doc/img/base.png) - -![PyPSA-Eur Grid Model Simplified](doc/img/elec_s_X.png) +![PyPSA-Eur Grid Model](doc/img/elec.png) The model building routines are defined through a snakemake workflow. The model is designed to be imported into the open toolbox [PyPSA](https://github.com/PyPSA/PyPSA) for operational studies as diff --git a/Snakefile b/Snakefile index 451be6ef..e9eb7113 100644 --- a/Snakefile +++ b/Snakefile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -6,65 +6,101 @@ from os.path import normpath, exists from shutil import copyfile, move from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider + HTTP = HTTPRemoteProvider() if not exists("config.yaml"): copyfile("config.default.yaml", "config.yaml") + configfile: "config.yaml" -COSTS="data/costs.csv" -ATLITE_NPROCESSES = config['atlite'].get('nprocesses', 4) + +run = config.get("run", {}) +RDIR = run["name"] + "/" if run.get("name") else "" +CDIR = RDIR if not run.get("shared_cutouts") else "" + +COSTS = "resources/" + RDIR + "costs.csv" +ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4) wildcard_constraints: simpl="[a-zA-Z0-9]*|all", clusters="[0-9]+m?|all", ll="(v|c)([0-9\.]+|opt|all)|all", - opts="[-+a-zA-Z0-9\.]*" + opts="[-+a-zA-Z0-9\.]*", rule cluster_all_networks: - input: expand("networks/elec_s{simpl}_{clusters}.nc", **config['scenario']) + input: + expand("networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", **config["scenario"]), rule extra_components_all_networks: - input: expand("networks/elec_s{simpl}_{clusters}_ec.nc", **config['scenario']) + input: + expand( + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", **config["scenario"] + ), rule prepare_all_networks: - input: expand("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) + input: + expand( + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + **config["scenario"] + ), rule solve_all_networks: - input: expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) + input: + expand( + "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + **config["scenario"] + ), -if config['enable'].get('prepare_links_p_nom', False): +if config["enable"].get("prepare_links_p_nom", False): + rule prepare_links_p_nom: - output: 'data/links_p_nom.csv' - log: 'logs/prepare_links_p_nom.log' + output: + "data/links_p_nom.csv", + log: + "logs/" + RDIR + "prepare_links_p_nom.log", threads: 1 - resources: mem_mb=500 - script: 'scripts/prepare_links_p_nom.py' + resources: + mem_mb=1500, + script: + "scripts/prepare_links_p_nom.py" -datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls', - 'eez/World_EEZ_v8_2014.shp', 'EIA_hydro_generation_2000_2014.csv', - 'hydro_capacities.csv', 'naturalearth/ne_10m_admin_0_countries.shp', - 'NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', 'nama_10r_3popgdp.tsv.gz', - 'nama_10r_3gdp.tsv.gz', 'corine/g250_clc06_V18_5.tif'] +datafiles = [ + "ch_cantons.csv", + "je-e-21.03.02.xls", + "eez/World_EEZ_v8_2014.shp", + "hydro_capacities.csv", + "naturalearth/ne_10m_admin_0_countries.shp", + "NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp", + "nama_10r_3popgdp.tsv.gz", + "nama_10r_3gdp.tsv.gz", + "corine/g250_clc06_V18_5.tif", +] -if not config.get('tutorial', False): +if not config.get("tutorial", False): datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"]) -if config['enable'].get('retrieve_databundle', True): +if config["enable"].get("retrieve_databundle", True): + rule retrieve_databundle: - output: expand('data/bundle/{file}', file=datafiles) - log: "logs/retrieve_databundle.log" - script: 'scripts/retrieve_databundle.py' + output: + expand("data/bundle/{file}", file=datafiles), + log: + "logs/" + RDIR + "retrieve_databundle.log", + resources: + mem_mb=1000, + script: + "scripts/retrieve_databundle.py" # Downloading Copernicus Global Land Cover for land cover and land use: # Website: https://land.copernicus.eu/global/products/lc @@ -104,118 +140,232 @@ rule determine_availability_matrix_MD_UA: "scripts/determine_availability_matrix_MD_UA.py.ipynb" rule retrieve_load_data: - input: HTTP.remote("data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv", keep_local=True, static=True) - output: "data/load_raw.csv" - run: move(input[0], output[0]) + input: + HTTP.remote( + "data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv", + keep_local=True, + static=True, + ), + output: + "data/load_raw.csv", + resources: + mem_mb=5000, + run: + move(input[0], output[0]) rule build_load_data: - input: "data/load_raw.csv" - output: "resources/load.csv" - log: "logs/build_load_data.log" - script: 'scripts/build_load_data.py' - + input: + "data/load_raw.csv", + output: + "resources/" + RDIR + "load.csv", + log: + "logs/" + RDIR + "build_load_data.log", + resources: + mem_mb=5000, + script: + "scripts/build_load_data.py" + rule build_powerplants: input: - base_network="networks/base.nc", - custom_powerplants="data/custom_powerplants.csv" - output: "resources/powerplants.csv" - log: "logs/build_powerplants.log" + base_network="networks/" + RDIR + "base.nc", + custom_powerplants="data/custom_powerplants.csv", + output: + "resources/" + RDIR + "powerplants.csv", + log: + "logs/" + RDIR + "build_powerplants.log", threads: 1 - resources: mem_mb=500 - script: "scripts/build_powerplants.py" + resources: + mem_mb=5000, + script: + "scripts/build_powerplants.py" rule base_network: input: - eg_buses='data/entsoegridkit/buses.csv', - eg_lines='data/entsoegridkit/lines.csv', - eg_links='data/entsoegridkit/links.csv', - eg_converters='data/entsoegridkit/converters.csv', - eg_transformers='data/entsoegridkit/transformers.csv', - parameter_corrections='data/parameter_corrections.yaml', - links_p_nom='data/links_p_nom.csv', - links_tyndp='data/links_tyndp.csv', - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - europe_shape='resources/europe_shape.geojson' - output: "networks/base.nc" - log: "logs/base_network.log" - benchmark: "benchmarks/base_network" + eg_buses="data/entsoegridkit/buses.csv", + eg_lines="data/entsoegridkit/lines.csv", + eg_links="data/entsoegridkit/links.csv", + eg_converters="data/entsoegridkit/converters.csv", + eg_transformers="data/entsoegridkit/transformers.csv", + parameter_corrections="data/parameter_corrections.yaml", + links_p_nom="data/links_p_nom.csv", + links_tyndp="data/links_tyndp.csv", + country_shapes="resources/" + RDIR + "country_shapes.geojson", + offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson", + europe_shape="resources/" + RDIR + "europe_shape.geojson", + output: + "networks/" + RDIR + "base.nc", + log: + "logs/" + RDIR + "base_network.log", + benchmark: + "benchmarks/" + RDIR + "base_network" threads: 1 - resources: mem_mb=500 - script: "scripts/base_network.py" + resources: + mem_mb=1500, + script: + "scripts/base_network.py" rule build_shapes: input: - naturalearth='data/bundle/naturalearth/ne_10m_admin_0_countries.shp', - eez='data/bundle/eez/World_EEZ_v8_2014.shp', - nuts3='data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', - nuts3pop='data/bundle/nama_10r_3popgdp.tsv.gz', - nuts3gdp='data/bundle/nama_10r_3gdp.tsv.gz', - ch_cantons='data/bundle/ch_cantons.csv', - ch_popgdp='data/bundle/je-e-21.03.02.xls' + naturalearth="data/bundle/naturalearth/ne_10m_admin_0_countries.shp", + eez="data/bundle/eez/World_EEZ_v8_2014.shp", + nuts3="data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp", + nuts3pop="data/bundle/nama_10r_3popgdp.tsv.gz", + nuts3gdp="data/bundle/nama_10r_3gdp.tsv.gz", + ch_cantons="data/bundle/ch_cantons.csv", + ch_popgdp="data/bundle/je-e-21.03.02.xls", output: - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - europe_shape='resources/europe_shape.geojson', - nuts3_shapes='resources/nuts3_shapes.geojson' - log: "logs/build_shapes.log" + country_shapes="resources/" + RDIR + "country_shapes.geojson", + offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson", + europe_shape="resources/" + RDIR + "europe_shape.geojson", + nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson", + log: + "logs/" + RDIR + "build_shapes.log", threads: 1 - resources: mem_mb=500 - script: "scripts/build_shapes.py" + resources: + mem_mb=1500, + script: + "scripts/build_shapes.py" rule build_bus_regions: input: - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - base_network="networks/base.nc" + country_shapes="resources/" + RDIR + "country_shapes.geojson", + offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson", + base_network="networks/" + RDIR + "base.nc", output: - regions_onshore="resources/regions_onshore.geojson", - regions_offshore="resources/regions_offshore.geojson" - log: "logs/build_bus_regions.log" + regions_onshore="resources/" + RDIR + "regions_onshore.geojson", + regions_offshore="resources/" + RDIR + "regions_offshore.geojson", + log: + "logs/" + RDIR + "build_bus_regions.log", threads: 1 - resources: mem_mb=1000 - script: "scripts/build_bus_regions.py" + resources: + mem_mb=1000, + script: + "scripts/build_bus_regions.py" + + +if config["enable"].get("build_cutout", False): -if config['enable'].get('build_cutout', False): rule build_cutout: - input: - regions_onshore="resources/regions_onshore.geojson", - regions_offshore="resources/regions_offshore.geojson" - output: "cutouts/{cutout}.nc" - log: "logs/build_cutout/{cutout}.log" - benchmark: "benchmarks/build_cutout_{cutout}" + input: + regions_onshore="resources/" + RDIR + "regions_onshore.geojson", + regions_offshore="resources/" + RDIR + "regions_offshore.geojson", + output: + "cutouts/" + CDIR + "{cutout}.nc", + log: + "logs/" + CDIR + "build_cutout/{cutout}.log", + benchmark: + "benchmarks/" + CDIR + "build_cutout_{cutout}" threads: ATLITE_NPROCESSES - resources: mem_mb=ATLITE_NPROCESSES * 1000 - script: "scripts/build_cutout.py" + resources: + mem_mb=ATLITE_NPROCESSES * 1000, + script: + "scripts/build_cutout.py" -if config['enable'].get('retrieve_cutout', True): +if config["enable"].get("retrieve_cutout", True): + rule retrieve_cutout: - input: HTTP.remote("zenodo.org/record/6350001/files/{cutout}.nc", keep_local=True, static=True) - output: "cutouts/{cutout}.nc" - run: move(input[0], output[0]) + input: + HTTP.remote( + "zenodo.org/record/6350001/files/{cutout}.nc", + keep_local=True, + static=True, + ), + output: + "cutouts/" + CDIR + "{cutout}.nc", + log: + "logs/" + CDIR + "retrieve_cutout_{cutout}.log", + resources: + mem_mb=5000, + run: + move(input[0], output[0]) -if config['enable'].get('build_natura_raster', False): +if config["enable"].get("retrieve_cost_data", True): + + rule retrieve_cost_data: + input: + HTTP.remote( + f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs_{config['costs']['year']}.csv", + keep_local=True, + ), + output: + COSTS, + log: + "logs/" + RDIR + "retrieve_cost_data.log", + resources: + mem_mb=5000, + run: + move(input[0], output[0]) + + +if config["enable"].get("build_natura_raster", False): + rule build_natura_raster: input: natura="data/bundle/natura/Natura2000_end2015.shp", - cutouts=expand("cutouts/{cutouts}.nc", **config['atlite']) - output: "resources/natura.tiff" - log: "logs/build_natura_raster.log" - script: "scripts/build_natura_raster.py" + cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]), + output: + "resources/" + RDIR + "natura.tiff", + resources: + mem_mb=5000, + log: + "logs/" + RDIR + "build_natura_raster.log", + script: + "scripts/build_natura_raster.py" -if config['enable'].get('retrieve_natura_raster', True): +if config["enable"].get("retrieve_natura_raster", True): + rule retrieve_natura_raster: - input: HTTP.remote("zenodo.org/record/4706686/files/natura.tiff", keep_local=True, static=True) - output: "resources/natura.tiff" - run: move(input[0], output[0]) + input: + HTTP.remote( + "zenodo.org/record/4706686/files/natura.tiff", + keep_local=True, + static=True, + ), + output: + "resources/" + RDIR + "natura.tiff", + resources: + mem_mb=5000, + run: + move(input[0], output[0]) + + +rule retrieve_ship_raster: + input: + HTTP.remote( + "https://zenodo.org/record/6953563/files/shipdensity_global.zip", + keep_local=True, + static=True, + ), + output: + "data/shipdensity_global.zip", + resources: + mem_mb=5000, + run: + move(input[0], output[0]) + + +rule build_ship_raster: + input: + ship_density="data/shipdensity_global.zip", + cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]), + output: + "resources/" + RDIR + "shipdensity_raster.nc", + log: + "logs/" + RDIR + "build_ship_raster.log", + resources: + mem_mb=5000, + benchmark: + "benchmarks/" + RDIR + "build_ship_raster" + script: + "scripts/build_ship_raster.py" # Optional input when having Ukraine (UA) or Moldova (MD) in the countries list @@ -228,136 +378,207 @@ else: rule build_renewable_profiles: input: - base_network="networks/base.nc", + base_network="networks/" + RDIR + "base.nc", corine="data/bundle/corine/g250_clc06_V18_5.tif", - natura="resources/natura.tiff", - gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc" - if "max_depth" in config["renewable"][w.technology].keys() - else []), - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - regions=lambda w: ("resources/regions_onshore.geojson" - if w.technology in ('onwind', 'solar') - else "resources/regions_offshore.geojson"), - cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] + ".nc", - **opt, - output: profile="resources/profile_{technology}.nc", - log: "logs/build_renewable_profile_{technology}.log" - benchmark: "benchmarks/build_renewable_profiles_{technology}" + natura=lambda w: ( + "resources/" + RDIR + "natura.tiff" + if config["renewable"][w.technology]["natura"] + else [] + ), + gebco=lambda w: ( + "data/bundle/GEBCO_2014_2D.nc" + if "max_depth" in config["renewable"][w.technology].keys() + else [] + ), + ship_density=lambda w: ( + "resources/" + RDIR + "shipdensity_raster.nc" + if "ship_threshold" in config["renewable"][w.technology].keys() + else [] + ), + country_shapes="resources/" + RDIR + "country_shapes.geojson", + offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson", + regions=lambda w: ( + "resources/" + RDIR + "regions_onshore.geojson" + if w.technology in ("onwind", "solar") + else "resources/" + RDIR + "regions_offshore.geojson" + ), + cutout=lambda w: "cutouts/" + + CDIR + + config["renewable"][w.technology]["cutout"] + + ".nc", + output: + profile="resources/" + RDIR + "profile_{technology}.nc", + log: + "logs/" + RDIR + "build_renewable_profile_{technology}.log", + benchmark: + "benchmarks/" + RDIR + "build_renewable_profiles_{technology}" threads: ATLITE_NPROCESSES - resources: mem_mb=ATLITE_NPROCESSES * 5000 - script: "scripts/build_renewable_profiles.py" + resources: + mem_mb=ATLITE_NPROCESSES * 5000, + wildcard_constraints: + technology="(?!hydro).*", # Any technology other than hydro + script: + "scripts/build_renewable_profiles.py" -if 'hydro' in config['renewable'].keys(): - rule build_hydro_profile: - input: - country_shapes='resources/country_shapes.geojson', - eia_hydro_generation='data/EIA_hydro_generation_2000_2014.csv', - cutout="cutouts/" + config["renewable"]['hydro']['cutout'] + ".nc" - output: 'resources/profile_hydro.nc' - log: "logs/build_hydro_profile.log" - resources: mem_mb=5000 - script: 'scripts/build_hydro_profile.py' +rule build_hydro_profile: + input: + country_shapes="resources/" + RDIR + "country_shapes.geojson", + eia_hydro_generation="data/eia_hydro_annual_generation.csv", + cutout=f"cutouts/" + CDIR + config["renewable"]["hydro"]["cutout"] + ".nc" + if "hydro" in config["renewable"] + else [], + output: + "resources/" + RDIR + "profile_hydro.nc", + log: + "logs/" + RDIR + "build_hydro_profile.log", + resources: + mem_mb=5000, + script: + "scripts/build_hydro_profile.py" rule add_electricity: input: - base_network='networks/base.nc', + **{ + f"profile_{tech}": "resources/" + RDIR + f"profile_{tech}.nc" + for tech in config["renewable"] + }, + **{ + f"conventional_{carrier}_{attr}": fn + for carrier, d in config.get("conventional", {None: {}}).items() + for attr, fn in d.items() + if str(fn).startswith("data/") + }, + base_network="networks/" + RDIR + "base.nc", tech_costs=COSTS, - regions="resources/regions_onshore.geojson", - powerplants='resources/powerplants.csv', - hydro_capacities='data/bundle/hydro_capacities.csv', - geth_hydro_capacities='data/geth2015_hydro_capacities.csv', - load='resources/load.csv', - nuts3_shapes='resources/nuts3_shapes.geojson', + regions="resources/" + RDIR + "regions_onshore.geojson", + powerplants="resources/" + RDIR + "powerplants.csv", + hydro_capacities="data/bundle/hydro_capacities.csv", + geth_hydro_capacities="data/geth2015_hydro_capacities.csv", + load="resources/" + RDIR + "load.csv", + nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson", ua_md_gdp='data/GDP_PPP_30arcsec_v3_mapped_default.csv', - **{f"profile_{tech}": f"resources/profile_{tech}.nc" - for tech in config['renewable']} - output: "networks/elec.nc" - log: "logs/add_electricity.log" - benchmark: "benchmarks/add_electricity" + output: + "networks/" + RDIR + "elec.nc", + log: + "logs/" + RDIR + "add_electricity.log", + benchmark: + "benchmarks/" + RDIR + "add_electricity" threads: 1 - resources: mem_mb=5000 - script: "scripts/add_electricity.py" + resources: + mem_mb=5000, + script: + "scripts/add_electricity.py" rule simplify_network: input: - network='networks/elec.nc', + network="networks/" + RDIR + "elec.nc", tech_costs=COSTS, - regions_onshore="resources/regions_onshore.geojson", - regions_offshore="resources/regions_offshore.geojson" + regions_onshore="resources/" + RDIR + "regions_onshore.geojson", + regions_offshore="resources/" + RDIR + "regions_offshore.geojson", output: - network='networks/elec_s{simpl}.nc', - regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson", - regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson", - busmap='resources/busmap_elec_s{simpl}.csv', - connection_costs='resources/connection_costs_s{simpl}.csv' - log: "logs/simplify_network/elec_s{simpl}.log" - benchmark: "benchmarks/simplify_network/elec_s{simpl}" + network="networks/" + RDIR + "elec_s{simpl}.nc", + regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}.geojson", + regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}.geojson", + busmap="resources/" + RDIR + "busmap_elec_s{simpl}.csv", + connection_costs="resources/" + RDIR + "connection_costs_s{simpl}.csv", + log: + "logs/" + RDIR + "simplify_network/elec_s{simpl}.log", + benchmark: + "benchmarks/" + RDIR + "simplify_network/elec_s{simpl}" threads: 1 - resources: mem_mb=4000 - script: "scripts/simplify_network.py" + resources: + mem_mb=4000, + script: + "scripts/simplify_network.py" rule cluster_network: input: - network='networks/elec_s{simpl}.nc', - regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson", - regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson", - busmap=ancient('resources/busmap_elec_s{simpl}.csv'), - custom_busmap=("data/custom_busmap_elec_s{simpl}_{clusters}.csv" - if config["enable"].get("custom_busmap", False) else []), - tech_costs=COSTS + network="networks/" + RDIR + "elec_s{simpl}.nc", + regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}.geojson", + regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}.geojson", + busmap=ancient("resources/" + RDIR + "busmap_elec_s{simpl}.csv"), + custom_busmap=( + "data/custom_busmap_elec_s{simpl}_{clusters}.csv" + if config["enable"].get("custom_busmap", False) + else [] + ), + tech_costs=COSTS, output: - network='networks/elec_s{simpl}_{clusters}.nc', - regions_onshore="resources/regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore="resources/regions_offshore_elec_s{simpl}_{clusters}.geojson", - busmap="resources/busmap_elec_s{simpl}_{clusters}.csv", - linemap="resources/linemap_elec_s{simpl}_{clusters}.csv" - log: "logs/cluster_network/elec_s{simpl}_{clusters}.log" - benchmark: "benchmarks/cluster_network/elec_s{simpl}_{clusters}" + network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", + regions_onshore="resources/" + + RDIR + + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions_offshore="resources/" + + RDIR + + "regions_offshore_elec_s{simpl}_{clusters}.geojson", + busmap="resources/" + RDIR + "busmap_elec_s{simpl}_{clusters}.csv", + linemap="resources/" + RDIR + "linemap_elec_s{simpl}_{clusters}.csv", + log: + "logs/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}.log", + benchmark: + "benchmarks/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}" threads: 1 - resources: mem_mb=6000 - script: "scripts/cluster_network.py" + resources: + mem_mb=6000, + script: + "scripts/cluster_network.py" rule add_extra_components: input: - network='networks/elec_s{simpl}_{clusters}.nc', + network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", tech_costs=COSTS, - output: 'networks/elec_s{simpl}_{clusters}_ec.nc' - log: "logs/add_extra_components/elec_s{simpl}_{clusters}.log" - benchmark: "benchmarks/add_extra_components/elec_s{simpl}_{clusters}_ec" + output: + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", + log: + "logs/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}.log", + benchmark: + "benchmarks/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}_ec" threads: 1 - resources: mem_mb=3000 - script: "scripts/add_extra_components.py" + resources: + mem_mb=3000, + script: + "scripts/add_extra_components.py" rule prepare_network: - input: 'networks/elec_s{simpl}_{clusters}_ec.nc', tech_costs=COSTS - output: 'networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc' - log: "logs/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log" - benchmark: "benchmarks/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + input: + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", + tech_costs=COSTS, + output: + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + log: + "logs/" + RDIR + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log", + benchmark: + ( + "benchmarks/" + + RDIR + + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + ) threads: 1 - resources: mem_mb=4000 - script: "scripts/prepare_network.py" + resources: + mem_mb=4000, + script: + "scripts/prepare_network.py" def memory(w): - factor = 3. - for o in w.opts.split('-'): - m = re.match(r'^(\d+)h$', o, re.IGNORECASE) + factor = 3.0 + for o in w.opts.split("-"): + m = re.match(r"^(\d+)h$", o, re.IGNORECASE) if m is not None: factor /= int(m.group(1)) break - for o in w.opts.split('-'): - m = re.match(r'^(\d+)seg$', o, re.IGNORECASE) + for o in w.opts.split("-"): + m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) if m is not None: factor *= int(m.group(1)) / 8760 break - if w.clusters.endswith('m'): + if w.clusters.endswith("m"): return int(factor * (18000 + 180 * int(w.clusters[:-1]))) elif w.clusters == "all": return int(factor * (18000 + 180 * 4000)) @@ -366,44 +587,87 @@ def memory(w): rule solve_network: - input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" + input: + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + output: + "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", log: - solver=normpath("logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"), - python="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", - memory="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log" - benchmark: "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + solver=normpath( + "logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" + ), + python="logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + memory="logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log", + benchmark: + "benchmarks/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" threads: 4 - resources: mem_mb=memory - shadow: "minimal" - script: "scripts/solve_network.py" + resources: + mem_mb=memory, + shadow: + "minimal" + script: + "scripts/solve_network.py" rule solve_operations_network: input: - unprepared="networks/elec_s{simpl}_{clusters}_ec.nc", - optimized="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc" + unprepared="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", + optimized="results/networks/" + + RDIR + + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + output: + "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc", log: - solver=normpath("logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"), - python="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", - memory="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log" - benchmark: "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + solver=normpath( + "logs/" + + RDIR + + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log" + ), + python="logs/" + + RDIR + + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", + memory="logs/" + + RDIR + + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log", + benchmark: + ( + "benchmarks/" + + RDIR + + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + ) threads: 4 - resources: mem_mb=(lambda w: 5000 + 372 * int(w.clusters)) - shadow: "minimal" - script: "scripts/solve_operations_network.py" + resources: + mem_mb=(lambda w: 5000 + 372 * int(w.clusters)), + shadow: + "minimal" + script: + "scripts/solve_operations_network.py" rule plot_network: input: - network="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - tech_costs=COSTS + network="results/networks/" + + RDIR + + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + tech_costs=COSTS, output: - only_map="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}", - ext="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}" - log: "logs/plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log" - script: "scripts/plot_network.py" + only_map="results/plots/" + + RDIR + + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}", + ext="results/plots/" + + RDIR + + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}", + log: + "logs/" + + RDIR + + "plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log", + script: + "scripts/plot_network.py" def input_make_summary(w): @@ -414,36 +678,79 @@ def input_make_summary(w): ll = [l for l in ll if l[0] == w.ll[0]] else: ll = w.ll - return ([COSTS] + - expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - ll=ll, - **{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k) - for k in ["simpl", "clusters", "opts"]})) + return [COSTS] + expand( + "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + ll=ll, + **{ + k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k) + for k in ["simpl", "clusters", "opts"] + } + ) rule make_summary: - input: input_make_summary - output: directory("results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}") - log: "logs/make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log", - script: "scripts/make_summary.py" + input: + input_make_summary, + output: + directory( + "results/summaries/" + + RDIR + + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}" + ), + log: + "logs/" + + RDIR + + "make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log", + resources: + mem_mb=1500, + script: + "scripts/make_summary.py" rule plot_summary: - input: "results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}" - output: "results/plots/summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}" - log: "logs/plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log" - script: "scripts/plot_summary.py" + input: + "results/summaries/" + + RDIR + + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}", + output: + "results/plots/" + + RDIR + + "summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}", + log: + "logs/" + + RDIR + + "plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log", + resources: + mem_mb=1500, + script: + "scripts/plot_summary.py" def input_plot_p_nom_max(w): - return [("networks/elec_s{simpl}{maybe_cluster}.nc" - .format(maybe_cluster=('' if c == 'full' else ('_' + c)), **w)) - for c in w.clusts.split(",")] + return [ + ( + "results/networks/" + + RDIR + + "elec_s{simpl}{maybe_cluster}.nc".format( + maybe_cluster=("" if c == "full" else ("_" + c)), **w + ) + ) + for c in w.clusts.split(",") + ] rule plot_p_nom_max: - input: input_plot_p_nom_max - output: "results/plots/elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}" - log: "logs/plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log" - script: "scripts/plot_p_nom_max.py" - + input: + input_plot_p_nom_max, + output: + "results/plots/" + + RDIR + + "elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}", + log: + "logs/" + + RDIR + + "plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log", + resources: + mem_mb=1500, + script: + "scripts/plot_p_nom_max.py" diff --git a/config.default.yaml b/config.default.yaml index a354861a..69128a5b 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -1,15 +1,18 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 -version: 0.4.0 +version: 0.6.1 tutorial: false logging: level: INFO format: '%(levelname)s:%(name)s:%(message)s' -summary_dir: results +run: + name: "" # use this to keep track of runs with different settings + shared_cutouts: false # set to true to share the default cutout(s) across runs + scenario: simpl: [''] @@ -19,10 +22,6 @@ scenario: countries: ['AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MD', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK', 'UA'] -clustering: - simplify: - to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections) - snapshots: start: "2013-01-01" end: "2014-01-01" @@ -31,6 +30,7 @@ snapshots: enable: prepare_links_p_nom: false retrieve_databundle: true + retrieve_cost_data: true build_cutout: false retrieve_cutout: true build_natura_raster: false @@ -39,36 +39,58 @@ enable: electricity: voltages: [220., 300., 380., 750.] + gaslimit: false # global gas usage limit of X MWh_th co2limit: 9.59e+7 # 0.05 * co2base co2base: 1.918e+9 agg_p_nom_limits: data/agg_p_nom_minmax.csv - extendable_carriers: - Generator: [] - StorageUnit: [] # battery, H2 - Store: [battery, H2] - Link: [] + operational_reserve: # like https://genxproject.github.io/GenX/dev/core/#Reserves + activate: false + epsilon_load: 0.02 # share of total load + epsilon_vres: 0.02 # share of total renewable supply + contingency: 4000 # fixed capacity in MW max_hours: battery: 6 H2: 168 - powerplants_filter: false # use pandas query strings here, e.g. Country not in ['Germany'] - custom_powerplants: true # use pandas query strings here, e.g. Country in ['Germany'] - conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] - renewable_capacities_from_OPSD: [] # onwind, offwind, solar + extendable_carriers: + Generator: [solar, onwind, offwind-ac, offwind-dc, OCGT] + StorageUnit: [] # battery, H2 + Store: [battery, H2] + Link: [] # H2 pipeline - # estimate_renewable_capacities_from_capacity_stats: - # # Wind is the Fueltype in ppm.data.Capacity_stats, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur - # Wind: [onwind, offwind-ac, offwind-dc] - # Solar: [solar] + # use pandas query strings here, e.g. Country not in ['Germany'] + powerplants_filter: (DateOut >= 2022 or DateOut != DateOut) + # use pandas query strings here, e.g. Country in ['Germany'] + custom_powerplants: false + + conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] + renewable_carriers: [solar, onwind, offwind-ac, offwind-dc, hydro] + + estimate_renewable_capacities: + enable: true + # Add capacities from OPSD data + from_opsd: true + # Renewable capacities are based on existing capacities reported by IRENA + year: 2020 + # Artificially limit maximum capacities to factor * (IRENA capacities), + # i.e. 110% of 's capacities => expansion_limit: 1.1 + # false: Use estimated renewable potentials determine by the workflow + expansion_limit: false + technology_mapping: + # Wind is the Fueltype in powerplantmatching, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur + Offshore: [offwind-ac, offwind-dc] + Onshore: [onwind] + PV: [solar] atlite: nprocesses: 4 + show_progress: false # false saves time cutouts: # use 'base' to determine geographical bounds and time span from config - # base: - # module: era5 + # base: + # module: era5 europe-2013-era5: module: era5 # in priority order dx: 0.3 @@ -78,26 +100,28 @@ atlite: dx: 0.2 dy: 0.2 sarah_interpolate: false - sarah_dir: + sarah_dir: features: [influx, temperature] - + renewable: onwind: cutout: europe-2013-era5 resource: method: wind turbine: Vestas_V112_3MW - capacity_per_sqkm: 3 # ScholzPhd Tab 4.3.1: 10MW/km^2 + capacity_per_sqkm: 3 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 30% fraction of the already restricted + # area is available for installation of wind generators due to competing land use and likely public + # acceptance issues. # correction_factor: 0.93 corine: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # development of the REMix model and application for Europe. ( p.42 / p.28) - grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 31, 32] + grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] distance: 1000 distance_grid_codes: [1, 2, 3, 4, 5, 6] natura: true + excluder_resolution: 100 potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-ac: @@ -105,15 +129,19 @@ renewable: resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore - capacity_per_sqkm: 2 + capacity_per_sqkm: 2 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 20% fraction of the already restricted + # area is available for installation of wind generators due to competing land use and likely public + # acceptance issues. correction_factor: 0.8855 # proxy for wake losses # from 10.1016/j.energy.2018.08.153 # until done more rigorously in #153 corine: [44, 255] natura: true + ship_threshold: 400 max_depth: 50 max_shore_distance: 30000 + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-dc: @@ -121,16 +149,19 @@ renewable: resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore - # ScholzPhd Tab 4.3.1: 10MW/km^2 - capacity_per_sqkm: 2 + capacity_per_sqkm: 2 # ScholzPhd Tab 4.3.1: 10MW/km^2 and assuming 20% fraction of the already restricted + # area is available for installation of wind generators due to competing land use and likely public + # acceptance issues. correction_factor: 0.8855 # proxy for wake losses # from 10.1016/j.energy.2018.08.153 # until done more rigorously in #153 corine: [44, 255] natura: true + ship_threshold: 400 max_depth: 50 min_shore_distance: 30000 + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 solar: @@ -141,7 +172,7 @@ renewable: orientation: slope: 35. azimuth: 180. - capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2 + capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2 and assuming 1% of the area can be used for solar PV panels # Correction factor determined by comparing uncorrected area-weighted full-load hours to those # published in Supplementary Data to # Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power @@ -150,9 +181,9 @@ renewable: # This correction factor of 0.854337 may be in order if using reanalysis data. # for discussion refer to https://github.com/PyPSA/pypsa-eur/pull/304 # correction_factor: 0.854337 - corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] + corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] natura: true + excluder_resolution: 100 potential: simple # or conservative clip_p_max_pu: 1.e-2 hydro: @@ -162,6 +193,10 @@ renewable: hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float clip_min_inflow: 1.0 +conventional: + nuclear: + p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name + lines: types: 220.: "Al/St 240/40 2-bundle 220.0" @@ -185,17 +220,26 @@ transformers: type: '' load: - power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data interpolate_limit: 3 # data gaps up until this size are interpolated linearly - time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from + time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from manual_adjustments: true # false scaling_factor: 1.0 costs: year: 2030 - discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016 - USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html - marginal_cost: # EUR/MWh + version: v0.4.0 + rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) + fill_values: + FOM: 0 + VOM: 0 + efficiency: 1 + fuel: 0 + investment: 0 + lifetime: 25 + "CO2 intensity": 0 + "discount rate": 0.07 + marginal_cost: solar: 0.01 onwind: 0.015 offwind: 0.015 @@ -208,6 +252,29 @@ costs: emission_prices: # in currency per tonne emission, only used with the option Ep co2: 0. +clustering: + simplify_network: + to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections) + algorithm: kmeans # choose from: [hac, kmeans] + feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc. + exclude_carriers: [] + remove_stubs: true + remove_stubs_across_borders: true + cluster_network: + algorithm: kmeans + feature: solar+onwind-time + exclude_carriers: [] + aggregation_strategies: + generators: + p_nom_max: sum # use "min" for more conservative assumptions + p_nom_min: sum + p_min_pu: mean + marginal_cost: mean + committable: any + ramp_limit_up: max + ramp_limit_down: max + efficiency: mean + solving: options: formulation: kirchhoff @@ -240,7 +307,7 @@ solving: plotting: map: figsize: [7, 7] - boundaries: [-10.2, 29, 35, 72] + boundaries: [-10.2, 29, 35, 72] p_nom: bus_size_factor: 5.e+4 linewidth_factor: 3.e+3 @@ -259,50 +326,50 @@ plotting: AC_carriers: ["AC line", "AC transformer"] link_carriers: ["DC line", "Converter AC-DC"] tech_colors: - "onwind" : "#235ebc" - "onshore wind" : "#235ebc" - 'offwind' : "#6895dd" - 'offwind-ac' : "#6895dd" - 'offshore wind' : "#6895dd" - 'offshore wind ac' : "#6895dd" - 'offwind-dc' : "#74c6f2" - 'offshore wind dc' : "#74c6f2" - "hydro" : "#08ad97" - "hydro+PHS" : "#08ad97" - "PHS" : "#08ad97" - "hydro reservoir" : "#08ad97" - 'hydroelectricity' : '#08ad97' - "ror" : "#4adbc8" - "run of river" : "#4adbc8" - 'solar' : "#f9d002" - 'solar PV' : "#f9d002" - 'solar thermal' : '#ffef60' - 'biomass' : '#0c6013' - 'solid biomass' : '#06540d' - 'biogas' : '#23932d' - 'waste' : '#68896b' - 'geothermal' : '#ba91b1' - "OCGT" : "#d35050" - "gas" : "#d35050" - "natural gas" : "#d35050" - "CCGT" : "#b20101" - "nuclear" : "#ff9000" - "coal" : "#707070" - "lignite" : "#9e5a01" - "oil" : "#262626" - "H2" : "#ea048a" - "hydrogen storage" : "#ea048a" - "battery" : "#b8ea04" - "Electric load" : "#f9d002" - "electricity" : "#f9d002" - "lines" : "#70af1d" - "transmission lines" : "#70af1d" - "AC-AC" : "#70af1d" - "AC line" : "#70af1d" - "links" : "#8a1caf" - "HVDC links" : "#8a1caf" - "DC-DC" : "#8a1caf" - "DC link" : "#8a1caf" + "onwind": "#235ebc" + "onshore wind": "#235ebc" + 'offwind': "#6895dd" + 'offwind-ac': "#6895dd" + 'offshore wind': "#6895dd" + 'offshore wind ac': "#6895dd" + 'offwind-dc': "#74c6f2" + 'offshore wind dc': "#74c6f2" + "hydro": "#08ad97" + "hydro+PHS": "#08ad97" + "PHS": "#08ad97" + "hydro reservoir": "#08ad97" + 'hydroelectricity': '#08ad97' + "ror": "#4adbc8" + "run of river": "#4adbc8" + 'solar': "#f9d002" + 'solar PV': "#f9d002" + 'solar thermal': '#ffef60' + 'biomass': '#0c6013' + 'solid biomass': '#06540d' + 'biogas': '#23932d' + 'waste': '#68896b' + 'geothermal': '#ba91b1' + "OCGT": "#d35050" + "gas": "#d35050" + "natural gas": "#d35050" + "CCGT": "#b20101" + "nuclear": "#ff9000" + "coal": "#707070" + "lignite": "#9e5a01" + "oil": "#262626" + "H2": "#ea048a" + "hydrogen storage": "#ea048a" + "battery": "#b8ea04" + "Electric load": "#f9d002" + "electricity": "#f9d002" + "lines": "#70af1d" + "transmission lines": "#70af1d" + "AC-AC": "#70af1d" + "AC line": "#70af1d" + "links": "#8a1caf" + "HVDC links": "#8a1caf" + "DC-DC": "#8a1caf" + "DC link": "#8a1caf" "load": "#dd2e23" nice_names: OCGT: "Open-Cycle Gas" diff --git a/config.tutorial.yaml b/config.tutorial.yaml index ea624727..4c093a3c 100755 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -1,15 +1,17 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 -version: 0.4.0 +version: 0.6.1 tutorial: true logging: level: INFO format: '%(levelname)s:%(name)s:%(message)s' -summary_dir: results +run: + name: "" + shared_cutouts: false scenario: simpl: [''] @@ -17,11 +19,7 @@ scenario: clusters: [5] opts: [Co2L-24H] -countries: ['DE'] - -clustering: - simplify: - to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections) +countries: ['BE'] snapshots: start: "2013-03-01" @@ -31,6 +29,7 @@ snapshots: enable: prepare_links_p_nom: false retrieve_databundle: true + retrieve_cost_data: true build_cutout: false retrieve_cutout: true build_natura_raster: false @@ -45,7 +44,7 @@ electricity: Generator: [OCGT] StorageUnit: [] #battery, H2 Store: [battery, H2] - Link: [] + Link: [] # H2 pipeline max_hours: battery: 6 @@ -57,8 +56,9 @@ electricity: atlite: nprocesses: 4 + show_progress: false # false saves time cutouts: - europe-2013-era5-tutorial: + be-03-2013-era5: module: era5 x: [4., 15.] y: [46., 56.] @@ -66,7 +66,7 @@ atlite: renewable: onwind: - cutout: europe-2013-era5-tutorial + cutout: be-03-2013-era5 resource: method: wind turbine: Vestas_V112_3MW @@ -75,15 +75,15 @@ renewable: corine: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # development of the REMix model and application for Europe. ( p.42 / p.28) - grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 31, 32] + grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] distance: 1000 distance_grid_codes: [1, 2, 3, 4, 5, 6] natura: true + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-ac: - cutout: europe-2013-era5-tutorial + cutout: be-03-2013-era5 resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore @@ -91,11 +91,13 @@ renewable: # correction_factor: 0.93 corine: [44, 255] natura: true + ship_threshold: 400 max_shore_distance: 30000 + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-dc: - cutout: europe-2013-era5-tutorial + cutout: be-03-2013-era5 resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore @@ -104,11 +106,13 @@ renewable: # correction_factor: 0.93 corine: [44, 255] natura: true + ship_threshold: 400 min_shore_distance: 30000 + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 solar: - cutout: europe-2013-era5-tutorial + cutout: be-03-2013-era5 resource: method: pv panel: CSi @@ -123,9 +127,9 @@ renewable: # power." Applied Energy 135 (2014): 704-720. # This correction factor of 0.854337 may be in order if using reanalysis data. # correction_factor: 0.854337 - corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] + corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] natura: true + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 @@ -151,16 +155,25 @@ transformers: type: '' load: - power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data interpolate_limit: 3 # data gaps up until this size are interpolated linearly - time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from + time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from manual_adjustments: true # false scaling_factor: 1.0 costs: year: 2030 - discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016 - USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html + version: v0.4.0 + rooftop_share: 0.14 + fill_values: + FOM: 0 + VOM: 0 + efficiency: 1 + fuel: 0 + investment: 0 + lifetime: 25 + "CO2 intensity": 0 + "discount rate": 0.07 marginal_cost: solar: 0.01 onwind: 0.015 @@ -170,6 +183,27 @@ costs: emission_prices: # in currency per tonne emission, only used with the option Ep co2: 0. +clustering: + simplify_network: + to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections) + algorithm: kmeans # choose from: [hac, kmeans] + feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc. + exclude_carriers: [] + cluster_network: + algorithm: kmeans + feature: solar+onwind-time + exclude_carriers: [] + aggregation_strategies: + generators: + p_nom_max: sum # use "min" for more conservative assumptions + p_nom_min: sum + p_min_pu: mean + marginal_cost: mean + committable: any + ramp_limit_up: max + ramp_limit_down: max + efficiency: mean + solving: options: formulation: kirchhoff @@ -186,7 +220,7 @@ solving: plotting: map: figsize: [7, 7] - boundaries: [-10.2, 29, 35, 72] + boundaries: [-10.2, 29, 35, 72] p_nom: bus_size_factor: 5.e+4 linewidth_factor: 3.e+3 @@ -205,50 +239,50 @@ plotting: AC_carriers: ["AC line", "AC transformer"] link_carriers: ["DC line", "Converter AC-DC"] tech_colors: - "onwind" : "#235ebc" - "onshore wind" : "#235ebc" - 'offwind' : "#6895dd" - 'offwind-ac' : "#6895dd" - 'offshore wind' : "#6895dd" - 'offshore wind ac' : "#6895dd" - 'offwind-dc' : "#74c6f2" - 'offshore wind dc' : "#74c6f2" - "hydro" : "#08ad97" - "hydro+PHS" : "#08ad97" - "PHS" : "#08ad97" - "hydro reservoir" : "#08ad97" - 'hydroelectricity' : '#08ad97' - "ror" : "#4adbc8" - "run of river" : "#4adbc8" - 'solar' : "#f9d002" - 'solar PV' : "#f9d002" - 'solar thermal' : '#ffef60' - 'biomass' : '#0c6013' - 'solid biomass' : '#06540d' - 'biogas' : '#23932d' - 'waste' : '#68896b' - 'geothermal' : '#ba91b1' - "OCGT" : "#d35050" - "gas" : "#d35050" - "natural gas" : "#d35050" - "CCGT" : "#b20101" - "nuclear" : "#ff9000" - "coal" : "#707070" - "lignite" : "#9e5a01" - "oil" : "#262626" - "H2" : "#ea048a" - "hydrogen storage" : "#ea048a" - "battery" : "#b8ea04" - "Electric load" : "#f9d002" - "electricity" : "#f9d002" - "lines" : "#70af1d" - "transmission lines" : "#70af1d" - "AC-AC" : "#70af1d" - "AC line" : "#70af1d" - "links" : "#8a1caf" - "HVDC links" : "#8a1caf" - "DC-DC" : "#8a1caf" - "DC link" : "#8a1caf" + "onwind": "#235ebc" + "onshore wind": "#235ebc" + 'offwind': "#6895dd" + 'offwind-ac': "#6895dd" + 'offshore wind': "#6895dd" + 'offshore wind ac': "#6895dd" + 'offwind-dc': "#74c6f2" + 'offshore wind dc': "#74c6f2" + "hydro": "#08ad97" + "hydro+PHS": "#08ad97" + "PHS": "#08ad97" + "hydro reservoir": "#08ad97" + 'hydroelectricity': '#08ad97' + "ror": "#4adbc8" + "run of river": "#4adbc8" + 'solar': "#f9d002" + 'solar PV': "#f9d002" + 'solar thermal': '#ffef60' + 'biomass': '#0c6013' + 'solid biomass': '#06540d' + 'biogas': '#23932d' + 'waste': '#68896b' + 'geothermal': '#ba91b1' + "OCGT": "#d35050" + "gas": "#d35050" + "natural gas": "#d35050" + "CCGT": "#b20101" + "nuclear": "#ff9000" + "coal": "#707070" + "lignite": "#9e5a01" + "oil": "#262626" + "H2": "#ea048a" + "hydrogen storage": "#ea048a" + "battery": "#b8ea04" + "Electric load": "#f9d002" + "electricity": "#f9d002" + "lines": "#70af1d" + "transmission lines": "#70af1d" + "AC-AC": "#70af1d" + "AC line": "#70af1d" + "links": "#8a1caf" + "HVDC links": "#8a1caf" + "DC-DC": "#8a1caf" + "DC link": "#8a1caf" nice_names: OCGT: "Open-Cycle Gas" CCGT: "Combined-Cycle Gas" diff --git a/data/eia_hydro_annual_generation.csv b/data/eia_hydro_annual_generation.csv new file mode 100644 index 00000000..9b781ee3 --- /dev/null +++ b/data/eia_hydro_annual_generation.csv @@ -0,0 +1,50 @@ +https://www.eia.gov/international/data/world/electricity/electricity-generation?pd=2&p=000000000000000000000000000000g&u=1&f=A&v=mapbubble&a=-&i=none&vo=value&t=R&g=000000000000002&l=73-1028i008017kg6368g80a4k000e0ag00gg0004g8g0ho00g000400008&s=315532800000&e=1577836800000&ev=false& +Report generated on: 03-28-2022 11:20:48 +"API","","1980","1981","1982","1983","1984","1985","1986","1987","1988","1989","1990","1991","1992","1993","1994","1995","1996","1997","1998","1999","2000","2001","2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014","2015","2016","2017","2018","2019","2020" +"","hydroelectricity net generation (billion kWh)","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" +"INTL.33-12-EURO-BKWH.A"," Europe","458.018","464.155","459.881","473.685","481.241","476.739","459.535","491.085","534.517","465.365","474.466","475.47","509.041","526.448","531.815","543.743","529.114164","543.845616","562.441501","569.308453","591.206662","587.371195","541.542535","506.19703","544.536443","545.176179","537.335934","540.934407","567.557921","564.244482","619.96477","543.05273","600.46622","631.86431","619.59229","615.53013","629.98906","562.59258","619.31106","610.62616","670.925" +"INTL.33-12-ALB-BKWH.A"," Albania","2.919","3.018","3.093","3.167","3.241","3.315","3.365","3.979","3.713","3.846","2.82","3.483","3.187","3.281","3.733","4.162","5.669","4.978","4.872","5.231","4.548","3.519","3.477","5.117","5.411","5.319","4.951","2.76","3.759","5.201","7.49133","4.09068","4.67775","6.88941","4.67676","5.83605","7.70418","4.47975","8.46648","5.15394","5.281" +"INTL.33-12-AUT-BKWH.A"," Austria","28.501","30.008","29.893","29.577","28.384","30.288","30.496","25.401","35.151","34.641","31.179","31.112","34.483","36.336","35.349","36.696","33.874","35.744","36.792","40.292","41.418","40.05","39.825","32.883","36.394","36.31","35.48","36.732","37.969","40.487","36.466","32.511","41.862","40.138","39.001","35.255","37.954","36.462","35.73","40.43655","45.344" +"INTL.33-12-BEL-BKWH.A"," Belgium","0.274","0.377","0.325","0.331","0.348","0.282","0.339","0.425","0.354","0.3","0.263","0.226","0.338","0.252","0.342","0.335","0.237","0.30195","0.38511","0.338","0.455","0.437","0.356","0.245","0.314","0.285","0.355","0.385","0.406","0.325","0.298","0.193","0.353","0.376","0.289","0.314","0.367","0.268","0.311","0.108","1.29" +"INTL.33-12-BIH-BKWH.A"," Bosnia and Herzegovina","--","--","--","--","--","--","--","--","--","--","--","--","3.374","2.343","3.424","3.607","5.104","4.608","4.511","5.477","5.043","5.129","5.215","4.456","5.919","5.938","5.798","3.961","4.818","6.177","7.946","4.343","4.173","7.164","5.876","5.495","5.585","3.7521","6.35382","6.02019","6.1" +"INTL.33-12-BGR-BKWH.A"," Bulgaria","3.674","3.58","3.018","3.318","3.226","2.214","2.302","2.512","2.569","2.662","1.859","2.417","2.042","1.923","1.453","2.291","2.89","2.726","3.066","2.725","2.646","1.72","2.172","2.999","3.136","4.294","4.196","2.845","2.796","3.435","4.98168","2.84328","3.14622","3.99564","4.55598","5.59845","3.8412","2.79972","5.09553","3.34917","3.37" +"INTL.33-12-HRV-BKWH.A"," Croatia","--","--","--","--","--","--","--","--","--","--","--","--","4.298","4.302","4.881","5.212","7.156","5.234","5.403","6.524","5.794","6.482","5.311","4.827","6.888","6.27","5.94","4.194","5.164","6.663","9.035","4.983","4.789","8.536","8.917","6.327","6.784","5.255","7.62399","5.87268","3.4" +"INTL.33-12-CYP-BKWH.A"," Cyprus","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-12-CZE-BKWH.A"," Czech Republic","--","--","--","--","--","--","--","--","--","--","--","--","--","1.355","1.445","1.982","1.949","1.68201","1.382","1.664","1.7404","2.033","2.467","1.369","1.999","2.356","2.525","2.068","2.004","2.405","2.775","1.95","2.107","2.704","1.909","1.779","1.983","1.852","1.615","1.98792","3.4" +"INTL.33-12-DNK-BKWH.A"," Denmark","0.03","0.031","0.028","0.036","0.028","0.027","0.029","0.029","0.032","0.027","0.027","0.026","0.028","0.027","0.033","0.03","0.019","0.019","0.02673","0.031","0.03","0.028","0.032","0.021","0.027","0.023","0.023","0.028","0.026","0.019","0.021","0.017","0.017","0.013","0.015","0.018","0.019","0.018","0.015","0.01584","0.02" +"INTL.33-12-EST-BKWH.A"," Estonia","--","--","--","--","--","--","--","--","--","--","--","--","0.001","0.001","0.003","0.002","0.002","0.003","0.004","0.004","0.005","0.007","0.006","0.013","0.022","0.022","0.014","0.021","0.028","0.032","0.027","0.03","0.042","0.026","0.027","0.027","0.035","0.026","0.015","0.01881","0.04" +"INTL.33-12-FRO-BKWH.A"," Faroe Islands","0.049","0.049","0.049","0.049","0.049","0.049","0.049","0.049","0.062","0.071","0.074","0.074","0.083","0.073","0.075","0.075","0.069564","0.075066","0.076501","0.069453","0.075262","0.075195","0.095535","0.08483","0.093443","0.097986","0.099934","0.103407","0.094921","0.091482","0.06676","0.092","0.099","0.091","0.121","0.132","0.105","0.11","0.107","0.102","0.11" +"INTL.33-12-FIN-BKWH.A"," Finland","10.115","13.518","12.958","13.445","13.115","12.211","12.266","13.658","13.229","12.9","10.75","13.065","14.956","13.341","11.669","12.796","11.742","12.11958","14.9","12.652","14.513","13.073","10.668","9.495","14.919","13.646","11.379","14.035","16.941","12.559","12.743","12.278","16.667","12.672","13.24","16.584","15.634","14.61","13.137","12.31461","15.56" +"INTL.33-12-CSK-BKWH.A"," Former Czechoslovakia","4.8","4.2","3.7","3.9","3.2","4.3","4","4.853","4.355","4.229","3.919","3.119","3.602","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-SCG-BKWH.A"," Former Serbia and Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","11.23","10.395","11.016","12.071","14.266","12.636","12.763","13.243","11.88","12.326","11.633","9.752","11.01","11.912","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-YUG-BKWH.A"," Former Yugoslavia","27.868","25.044","23.295","21.623","25.645","24.363","27.474","25.98","25.612","23.256","19.601","18.929","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-FRA-BKWH.A"," France","68.253","70.358","68.6","67.515","64.01","60.248","60.953","68.623","73.952","45.744","52.796","56.277","68.313","64.3","78.057","72.196","64.43","63.151","61.479","71.832","66.466","73.888","59.992","58.567","59.276","50.965","55.741","57.029","63.017","56.428","61.945","45.184","59.099","71.042","62.993","54.876","60.094","49.389","64.485","56.98242","64.84" +"INTL.33-12-DEU-BKWH.A"," Germany","--","--","--","--","--","--","--","--","--","--","--","14.742","17.223","17.699","19.731","21.562","21.737","17.18343","17.044","19.451","21.515","22.506","22.893","19.071","20.866","19.442","19.808","20.957","20.239","18.841","20.678","17.323","21.331","22.66","19.31","18.664","20.214","19.985","17.815","19.86039","24.75" +"INTL.33-12-DDR-BKWH.A"," Germany, East","1.658","1.718","1.748","1.683","1.748","1.758","1.767","1.726","1.719","1.551","1.389","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-DEUW-BKWH.A"," Germany, West","17.125","17.889","17.694","16.713","16.434","15.354","16.526","18.36","18.128","16.482","15.769","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-GIB-BKWH.A"," Gibraltar","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-12-GRC-BKWH.A"," Greece","3.396","3.398","3.551","2.331","2.852","2.792","3.222","2.768","2.354","1.888","1.751","3.068","2.181","2.26","2.573","3.494","4.305","3.84318","3.68","4.546","3.656","2.076","2.772","4.718","4.625","4.967","5.806","2.565","3.279","5.32","7.431","3.998","4.387","6.337","4.464","5.782","5.543","3.962","5.035","3.9798","3.43" +"INTL.33-12-HUN-BKWH.A"," Hungary","0.111","0.166","0.158","0.153","0.179","0.153","0.152","0.167","0.167","0.156","0.176","0.192","0.156","0.164","0.159","0.161","0.205","0.21384","0.15345","0.179","0.176","0.184","0.192","0.169","0.203","0.2","0.184","0.208","0.211","0.226","0.184","0.216","0.206","0.208","0.294","0.227","0.253","0.214","0.216","0.21681","0.24" +"INTL.33-12-ISL-BKWH.A"," Iceland","3.053","3.085","3.407","3.588","3.738","3.667","3.846","3.918","4.169","4.217","4.162","4.162","4.267","4.421","4.47","4.635","4.724","5.15493","5.565","5.987","6.292","6.512","6.907","7.017","7.063","6.949","7.22","8.31","12.303","12.156","12.51","12.382","12.214","12.747","12.554","13.541","13.092","13.892","13.679","13.32441","12.46" +"INTL.33-12-IRL-BKWH.A"," Ireland","0.833","0.855","0.792","0.776","0.68","0.824","0.91","0.673","0.862","0.684","0.69","0.738","0.809","0.757","0.911","0.706","0.715","0.67122","0.907","0.838","0.838","0.59","0.903","0.592","0.624","0.625","0.717","0.66","0.959","0.893","0.593","0.699","0.795","0.593","0.701","0.798","0.674","0.685","0.687","0.87813","1.21" +"INTL.33-12-ITA-BKWH.A"," Italy","44.997","42.782","41.216","40.96","41.923","40.616","40.626","39.05","40.205","33.647","31.31","41.817","41.778","41.011","44.212","37.404","41.617","41.18697","40.808","44.911","43.763","46.343","39.125","33.303","41.915","35.706","36.624","32.488","41.207","48.647","50.506","45.36477","41.45625","52.24626","57.95955","45.08163","42.00768","35.83701","48.29913","45.31824","47.72" +"INTL.33-12-XKS-BKWH.A"," Kosovo","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","0.075","0.119","0.154","0.104","0.095","0.142","0.149","0.139","0.243","0.177","0.27027","0.2079","0.26" +"INTL.33-12-LVA-BKWH.A"," Latvia","--","--","--","--","--","--","--","--","--","--","--","--","2.498","2.846","3.272","2.908","1.841","2.922","2.99","2.729","2.791","2.805","2.438","2.243","3.078","3.293","2.671","2.706","3.078","3.422","3.488","2.857","3.677","2.838","1.953","1.841","2.523","4.356","2.417","2.08692","2.59" +"INTL.33-12-LTU-BKWH.A"," Lithuania","--","--","--","--","--","--","--","--","--","--","--","--","0.308","0.389","0.447","0.369","0.323","0.291","0.413","0.409","0.336","0.322","0.35","0.323","0.417","0.446193","0.393","0.417","0.398","0.42","0.535","0.475","0.419","0.516","0.395","0.346","0.45","0.597","0.427","0.34254","1.06" +"INTL.33-12-LUX-BKWH.A"," Luxembourg","0.086","0.095","0.084","0.083","0.088","0.071","0.084","0.101","0.097","0.072","0.07","0.083","0.069","0.066","0.117","0.087","0.059","0.082","0.114","0.084","0.119","0.117","0.098","0.078","0.103","0.093","0.11","0.116","0.131","0.105","0.104","0.061","0.095","0.114","0.104","0.095","0.111","0.082","0.089","0.10593","1.09" +"INTL.33-12-MLT-BKWH.A"," Malta","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-12-MNE-BKWH.A"," Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","1.733","1.271","1.524","2.05","2.723","1.192","1.462","2.479","1.734","1.476","1.825","1.014","2.09187","1.78","1.8" +"INTL.33-12-NLD-BKWH.A"," Netherlands","0","0","0","0","0","0.003","0.003","0.001","0.002","0.037","0.119","0.079","0.119","0.091","0.1","0.087","0.079","0.09108","0.111","0.089","0.141","0.116","0.109","0.071","0.094","0.087","0.105","0.106","0.101","0.097","0.105","0.057","0.104","0.114","0.112","0.093","0.1","0.061","0.072","0.07326","0.05" +"INTL.33-12-MKD-BKWH.A"," North Macedonia","--","--","--","--","--","--","--","--","--","--","--","--","0.817","0.517","0.696","0.793","0.842","0.891","1.072","1.375","1.158","0.62","0.749","1.36","1.467","1.477","1.634","1","0.832","1.257","2.407","1.419","1.031","1.568","1.195","1.846","1.878","1.099","1.773","1.15236","1.24" +"INTL.33-12-NOR-BKWH.A"," Norway","82.717","91.876","91.507","104.704","104.895","101.464","95.321","102.341","107.919","117.369","119.933","109.032","115.505","118.024","110.398","120.315","102.823","108.677","114.546","120.237","140.4","119.258","128.078","104.425","107.693","134.331","118.175","132.319","137.654","124.03","116.257","119.78","141.189","127.551","134.844","136.662","142.244","141.651","138.202","123.66288","141.69" +"INTL.33-12-POL-BKWH.A"," Poland","2.326","2.116","1.528","1.658","1.394","1.833","1.534","1.644","1.775","1.593","1.403","1.411","1.492","1.473","1.716","1.868","1.912","1.941","2.286","2.133","2.085","2.302","2.256","1.654","2.06","2.179","2.022","2.328","2.13","2.351","2.9","2.313","2.02","2.421","2.165","1.814","2.117","2.552","1.949","1.93842","2.93" +"INTL.33-12-PRT-BKWH.A"," Portugal","7.873","4.934","6.82","7.897","9.609","10.512","8.364","9.005","12.037","5.72","9.065","8.952","4.599","8.453","10.551","8.26","14.613","12.97395","12.853","7.213","11.21","13.894","7.722","15.566","9.77","4.684","10.892","9.991","6.73","8.201","15.954","11.423","5.589","13.652","15.471","8.615","15.608","5.79","12.316","8.6526","13.96" +"INTL.33-12-ROU-BKWH.A"," Romania","12.506","12.605","11.731","9.934","11.208","11.772","10.688","11.084","13.479","12.497","10.87","14.107","11.583","12.64","12.916","16.526","15.597","17.334","18.69","18.107","14.63","14.774","15.886","13.126","16.348","20.005","18.172","15.806","17.023","15.379","19.684","14.581","11.945","14.807","18.618","16.467","17.848","14.349","17.48736","15.65289","15.53" +"INTL.33-12-SRB-BKWH.A"," Serbia","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","10.855","9.937","9.468","10.436","11.772","8.58","9.193","10.101","10.893","9.979","10.684","9.061","10.53261","10.07028","9.66" +"INTL.33-12-SVK-BKWH.A"," Slovakia","--","--","--","--","--","--","--","--","--","--","--","--","--","3.432","4.311","4.831","4.185","4.023","4.224","4.429","4.569","4.878","5.215","3.4452","4.059","4.592","4.355","4.406","4","4.324","5.184","3.211","3.687","4.329","3.762","3.701","4.302","4.321","3.506","4.27383","4.67" +"INTL.33-12-SVN-BKWH.A"," Slovenia","--","--","--","--","--","--","--","--","--","--","--","--","3.379","2.974","3.348","3.187","3.616","3.046","3.4","3.684","3.771","3.741","3.265","2.916","4.033","3.426","3.555","3.233","3.978","4.666","4.452","3.506","3.841","4.562","6.011","3.75","4.443","3.814","4.643","4.43421","5.24" +"INTL.33-12-ESP-BKWH.A"," Spain","29.16","21.64","25.99","26.696","31.088","30.895","26.105","27.016","34.76","19.046","25.16","27.01","18.731","24.133","27.898","22.881","39.404","34.43","33.665","22.634","29.274","40.617","22.691","40.643","31.359","18.209","25.699","27.036","23.13","26.147","41.576","30.07","20.192","36.45","38.815","27.656","35.77","18.007","33.743","24.23025","33.34" +"INTL.33-12-SWE-BKWH.A"," Sweden","58.133","59.006","54.369","62.801","67.106","70.095","60.134","70.95","69.016","70.911","71.778","62.603","73.588","73.905","58.508","67.421","51.2226","68.365","74.25","70.974","77.798","78.269","65.696","53.005","59.522","72.075","61.106","65.497","68.378","65.193","66.279","66.047","78.333","60.81","63.227","74.734","61.645","64.651","61.79","64.46583","71.6" +"INTL.33-12-CHE-BKWH.A"," Switzerland","32.481","35.13","35.974","35.069","29.871","31.731","32.576","34.328","35.437","29.477","29.497","31.756","32.373","35.416","38.678","34.817","28.458","33.70257","33.136","39.604","36.466","40.895","34.862","34.471","33.411","30.914","30.649","34.898","35.676","35.366","35.704","32.069","38.218","38.08","37.659","37.879","34.281","33.754","34.637","37.6596","40.62" +"INTL.33-12-TUR-BKWH.A"," Turkey","11.159","12.308","13.81","11.13","13.19","11.822","11.637","18.314","28.447","17.61","22.917","22.456","26.302","33.611","30.28","35.186","40.07","39.41784","41.80671","34.33","30.57","23.77","33.346","34.977","45.623","39.165","43.802","35.492","32.937","35.598","51.423","51.155","56.669","58.225","39.75","65.856","66.686","57.824","59.49","87.99714","77.39" +"INTL.33-12-GBR-BKWH.A"," United Kingdom","3.921","4.369","4.543","4.548","3.992","4.08","4.767","4.13","4.915","4.732","5.119","4.534","5.329","4.237","5.043","4.79","3.359","4.127","5.067","5.283","5.035","4.015","4.74","3.195","4.795","4.873","4.547","5.026","5.094","5.178","3.566","5.655","5.286","4.667","5.832","6.246","5.342","5.836","5.189","5.89941","7.64" diff --git a/data/links_tyndp.csv b/data/links_tyndp.csv index 8079be72..a0603120 100644 --- a/data/links_tyndp.csv +++ b/data/links_tyndp.csv @@ -24,3 +24,5 @@ Gridlink,Kingsnorth (UK),Warande (FR),160,,1400,in permitting,,https://tyndp.ent NeuConnect,Grain (UK),Fedderwarden (DE),680,,1400,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/309,0.716666666666667,51.44,8.046524,53.562763 NordBalt,Klaipeda (LT),Nybro (SE),450,,700,built,,https://en.wikipedia.org/wiki/NordBalt,21.256667,55.681667,15.854167,56.767778 Estlink 1,Harku (EE),Espoo (FI),105,,350,built,,https://en.wikipedia.org/wiki/Estlink,24.560278,59.384722,24.551667,60.203889 +Greenlink,Waterford (IE),Pembroke (UK),,180,500,under construction,,https://tyndp2022-project-platform.azurewebsites.net/projectsheets/transmission/286,-6.987,52.260,-4.986,51.686 +Celtic Interconnector,Aghada (IE),La Martyre (FR),,572,700,under consideration,,https://tyndp2022-project-platform.azurewebsites.net/projectsheets/transmission/107,-8.16642,51.91413,-4.184,48.459 diff --git a/data/nuclear_p_max_pu.csv b/data/nuclear_p_max_pu.csv new file mode 100644 index 00000000..7bc54455 --- /dev/null +++ b/data/nuclear_p_max_pu.csv @@ -0,0 +1,16 @@ +country,factor +BE,0.65 +BG,0.89 +CZ,0.82 +FI,0.92 +FR,0.70 +DE,0.88 +HU,0.90 +NL,0.86 +RO,0.92 +SK,0.89 +SI,0.94 +ES,0.89 +SE,0.82 +CH,0.86 +GB,0.67 diff --git a/data/parameter_corrections.yaml b/data/parameter_corrections.yaml index b50fc03a..df15738a 100644 --- a/data/parameter_corrections.yaml +++ b/data/parameter_corrections.yaml @@ -36,12 +36,20 @@ Link: "5583": "7428" # bus0 == bus1 to remove link in remove_unconnected_components (Sardinia) "13588": "7428" # bus0 == bus1 to remove link in remove_unconnected_components (Sardinia) "T23": "6355" # bus0 == bus1 to remove link in remove_unconnected_components (NordBalt) + "14815": "5939" # Kainachtal + "8706": "6448" bus1: index: "12931": "8152" # BorWin3 "5582": "2382" # combine link 5583 + 5582 in 5582 (Sardinia) "13589": "1349" # combine link 13589 + 13588 in 13589 (Sardinia) "14820": "6354" # NordBalt + "14810": "6365" # Skagerrak + "8708": "6448" + "8394": "6695" + "14813": "7052" + "8009": "5939" + "5601": "7052" # Link Sweden - Lübeck length: index: "5582": 26.39 # new length of combined links (sum) @@ -53,6 +61,7 @@ Line: bus0: index: "14573": "7179" #fix bus-id substation in PT (220/380kV issue) + "14756": "8577" # Deeside connection v_nom: index: "14573": 220 # 220/380kV issue of substation in PT diff --git a/doc/Makefile b/doc/Makefile index 75df2f48..d9bd3d5d 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT diff --git a/doc/_static/theme_overrides.css b/doc/_static/theme_overrides.css index a4c9818d..e7e21151 100644 --- a/doc/_static/theme_overrides.css +++ b/doc/_static/theme_overrides.css @@ -1,4 +1,4 @@ -/* SPDX-FileCopyrightText: 2017-2020 The PyPSA-Eur Authors +/* SPDX-FileCopyrightText: 2017-2022 The PyPSA-Eur Authors SPDX-License-Identifier: MIT */ @@ -71,4 +71,4 @@ .wy-nav-content { max-width: 910px !important; } -} \ No newline at end of file +} diff --git a/doc/cloudcomputing.rst b/doc/cloudcomputing.rst index f751d624..6df8b2b3 100644 --- a/doc/cloudcomputing.rst +++ b/doc/cloudcomputing.rst @@ -72,7 +72,7 @@ Step 3 - Installation of Cloud SDK - Download Google Cloud SDK `SDK `_. Check that you are logged in in your Google account. The link should lead you to the Windows installation of Google Cloud SDK. - Follow the "Quickstart for Windows - Before you begin" steps. -- After the successfull installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell": +- After the successful installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell": .. code:: bash @@ -107,7 +107,7 @@ Make sure that your instance is operating for the next steps. - Click on the advanced setting. SSH -> Authentication. - Option 1. Click on the Tools button and "Install Public Key into Server..". Somewhere in your folder structure must be a public key. I found it with the following folder syntax on my local windows computer -> :\Users\...\.ssh (there should be a PKK file). - Option 2. Click on the Tools button and "Generate new key pair...". Save the private key at a folder you remember and add it to the "private key file" field in WinSCP. Upload the public key to the metadeta of your instance. -- Click ok and save. Then click Login. If successfull WinSCP will open on the left side your local computer folder structure and on the right side the folder strucutre of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successfull) +- Click ok and save. Then click Login. If successful WinSCP will open on the left side your local computer folder structure and on the right side the folder structure of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successful) If you had struggle with the above steps, you could also try `this video `_. diff --git a/doc/conf.py b/doc/conf.py index 01dd6bc8..0cfb1f9a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: 20017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -16,19 +17,19 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os import shlex +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../scripts')) +sys.path.insert(0, os.path.abspath("../scripts")) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -36,47 +37,47 @@ sys.path.insert(0, os.path.abspath('../scripts')) extensions = [ #'sphinx.ext.autodoc', #'sphinx.ext.autosummary', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.mathjax', - 'sphinx.ext.napoleon', - 'sphinx.ext.graphviz', + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx.ext.graphviz", #'sphinx.ext.pngmath', #'sphinxcontrib.tikz', #'rinoh.frontend.sphinx', - 'sphinx.ext.imgconverter', # for SVG conversion + "sphinx.ext.imgconverter", # for SVG conversion ] -autodoc_default_flags = ['members'] +autodoc_default_flags = ["members"] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'PyPSA-Eur' -copyright = u'2017-2020 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberger (FIAS), Tom Brown (KIT, FIAS); 2019-2020 Fabian Neumann (KIT)' -author = u'Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberger (FIAS), Tom Brown (KIT, FIAS), Fabian Neumann (KIT)' +project = "PyPSA-Eur" +copyright = "2017-2022 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS); 2019-2022 Fabian Neumann (TUB, KIT)" +author = "Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS), Fabian Neumann (TUB, KIT)" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'0.3' +version = "0.6" # The full version, including alpha/beta/rc tags. -release = u'0.4.0' +release = "0.6.1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -87,37 +88,37 @@ language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -127,35 +128,35 @@ todo_include_todos = True # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { - 'display_version': True, - 'sticky_navigation': True, + "display_version": True, + "sticky_navigation": True, } # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # These folders are copied to the documentation's HTML output html_static_path = ["_static"] @@ -167,130 +168,127 @@ html_css_files = ["theme_overrides.css"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'PyPSAEurdoc' +htmlhelp_basename = "PyPSAEurdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'PyPSA-Eur.tex', u'PyPSA-Eur Documentation', - u'author', 'manual'), + (master_doc, "PyPSA-Eur.tex", "PyPSA-Eur Documentation", "author", "manual"), ] -#Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html -rinoh_documents = [(master_doc, # top-level file (index.rst) - 'PyPSA-Eur', # output (target.pdf) - 'PyPSA-Eur Documentation', # document title - 'author')] # document author +# Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html +rinoh_documents = [ + ( + master_doc, # top-level file (index.rst) + "PyPSA-Eur", # output (target.pdf) + "PyPSA-Eur Documentation", # document title + "author", + ) +] # document author # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pypsa-eur', u'PyPSA-Eur Documentation', - [author], 1) -] +man_pages = [(master_doc, "pypsa-eur", "PyPSA-Eur Documentation", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -299,23 +297,29 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'PyPSA-Eur', u'PyPSA-Eur Documentation', - author, 'PyPSA-Eur', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "PyPSA-Eur", + "PyPSA-Eur Documentation", + author, + "PyPSA-Eur", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = {"https://docs.python.org/": None} diff --git a/doc/configtables/clustering.csv b/doc/configtables/clustering.csv index 2f63f955..bcab264a 100644 --- a/doc/configtables/clustering.csv +++ b/doc/configtables/clustering.csv @@ -1,3 +1,15 @@ ,Unit,Values,Description -simplify,,, +simplify_network,,, -- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones" +-- algorithm,str,"One of {‘kmeans’, ‘hac’, ‘modularity‘}", +-- feature,str,"Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.", +-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated." +cluster_network,,, +-- algorithm,str,"One of {‘kmeans’, ‘hac’}", +-- feature,str,"Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.", +-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated." +aggregation_strategies,,, +-- generators,,, +-- -- {key},str,"{key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator." +-- buses,,, +-- -- {key},str,"{key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus." diff --git a/doc/configtables/costs.csv b/doc/configtables/costs.csv index ed2d56e4..c0870ddd 100644 --- a/doc/configtables/costs.csv +++ b/doc/configtables/costs.csv @@ -1,8 +1,9 @@ ,Unit,Values,Description -year,--,"YYYY; e.g. '2030'","Year for which to retrieve cost assumptions of ``data/costs.csv``." -discountrate,--,float,"Default discount rate if not specified for a technology in ``data/costs.csv``." -USD2013_to_EUR2013,--,float,"Exchange rate from USD :math:`_{2013}` to EUR :math:`_{2013}` from `ECB `_" -capital_cost,EUR/MW,"Keys should be in the 'technology' column of ``data/costs.csv``. Values can be any float.","For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``data/costs.csv``." -marginal_cost,EUR/MWh,"Keys should be in the 'technology' column of ``data/costs.csv``. Values can be any float.","For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``data/costs.csv``." +year,--,"YYYY; e.g. '2030'","Year for which to retrieve cost assumptions of ``resources/costs.csv``." +version,--,"vX.X.X; e.g. 'v0.1.0'","Version of ``technology-data`` repository to use." +rooftop_share,--,float,"Share of rooftop PV when calculating capital cost of solar (joint rooftop and utility-scale PV)." +fill_values,--,float,"Default values if not specified for a technology in ``resources/costs.csv``." +capital_cost,EUR/MW,"Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.","For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." +marginal_cost,EUR/MWh,"Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.","For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." emission_prices,,,"Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs." -- co2,EUR/t,float,"Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``." diff --git a/doc/configtables/electricity.csv b/doc/configtables/electricity.csv index aef35350..9cf23ebf 100644 --- a/doc/configtables/electricity.csv +++ b/doc/configtables/electricity.csv @@ -1,19 +1,29 @@ ,Unit,Values,Description -voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider when +voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider +gaslimit,MWhth,"float or false",Global gas usage limit co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard. agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. -extendable_carriers,,, --- Generator,--,"Any subset of {'OCGT','CCGT'}",Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits. --- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. --- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. --- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. +operational_reserve,,,"Settings for reserve requirements following like `GenX `_" +-- activate,bool,"true or false","Whether to take operational reserve requirements into account during optimisation" +-- epsilon_load,--,float,share of total load +-- epsilon_vres,--,float,share of total renewable supply +-- contingency,MW,float,fixed reserve capacity max_hours,,, -- battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. -- H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. +extendable_carriers,,, +-- Generator,--,"Any extendable carrier","Defines existing or non-existing conventional and renewable power plants to be extendable during the optimization. Conventional generators can only be built/expanded where already existent today. If a listed conventional carrier is not included in the ``conventional_carriers`` list, the lower limit of the capacity expansion is set to 0." +-- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. +-- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. +-- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. powerplants_filter,--,"use `pandas.query `_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database. custom_powerplants,--,"use `pandas.query `_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database. -conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}",List of conventional power plants to include in the model from ``resources/powerplants.csv``. -renewable_capacities_from_OPSD,,"[solar, onwind, offwind]",List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list `_ -estimate_renewable_capacities_from_capacitiy_stats,,, -"-- Fueltype [ppm], e.g. Wind",,"list of fueltypes strings in PyPSA-Eur, e.g. [onwind, offwind-ac, offwind-dc]",converts ppm Fueltype to PyPSA-EUR Fueltype +conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in `extendable_carriers`, the capacity is taken as a lower bound." +renewable_carriers,--,"Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro}",List of renewable generators to include in the model. +estimate_renewable_capacities,,, +-- enable,,bool,"Activate routine to estimate renewable capacities" +-- from_opsd,--,bool,"Add capacities from OPSD data" +-- year,--,bool,"Renewable capacities are based on existing capacities reported by IRENA for the specified year" +-- expansion_limit,--,float or false,"Artificially limit maximum capacities to factor * (IRENA capacities), i.e. 110% of 's capacities => expansion_limit: 1.1 false: Use estimated renewable potentials determine by the workflow" +-- technology_mapping,,,"Mapping between powerplantmatching and PyPSA-Eur technology names" diff --git a/doc/configtables/load.csv b/doc/configtables/load.csv index 66f3b994..d6cfa686 100644 --- a/doc/configtables/load.csv +++ b/doc/configtables/load.csv @@ -1,6 +1,6 @@ ,Unit,Values,Description url,--,string,"Link to open power system data time series data." -power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards). +power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards). interpolate_limit,hours,integer,"Maximum gap size (consecutive nans) which interpolated linearly." time_shift_for_large_gaps,string,string,"Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings." manual_adjustments,bool,"{true, false}","Whether to adjust the load data manually according to the function in :func:`manual_adjustment`." diff --git a/doc/configtables/offwind-ac.csv b/doc/configtables/offwind-ac.csv index e5bbc847..d478011d 100644 --- a/doc/configtables/offwind-ac.csv +++ b/doc/configtables/offwind-ac.csv @@ -6,8 +6,8 @@ resource,,, capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." corine,--,"Any *realistic* subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." +ship_threshold,--,float,"Ship density threshold from which areas are excluded." max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential." min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." -keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." diff --git a/doc/configtables/offwind-dc.csv b/doc/configtables/offwind-dc.csv index 06b82ba0..67bda165 100644 --- a/doc/configtables/offwind-dc.csv +++ b/doc/configtables/offwind-dc.csv @@ -6,8 +6,8 @@ resource,,, capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." corine,--,"Any *realistic* subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." +ship_threshold,--,float,"Ship density threshold from which areas are excluded." max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential." min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." -keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." diff --git a/doc/configtables/onwind.csv b/doc/configtables/onwind.csv index 31884183..c5f9da20 100644 --- a/doc/configtables/onwind.csv +++ b/doc/configtables/onwind.csv @@ -11,4 +11,3 @@ corine,,, natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." -keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." diff --git a/doc/configtables/opts.csv b/doc/configtables/opts.csv index 918d0d17..b468be6e 100644 --- a/doc/configtables/opts.csv +++ b/doc/configtables/opts.csv @@ -8,4 +8,5 @@ Trigger, Description, Definition, Status ``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use ``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested ``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `__, Untested -``carrier+{c|p}factor``, "Alter the capital cost (``c``) or installable potential (``p``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use +``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use +``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use diff --git a/doc/configtables/snapshots.csv b/doc/configtables/snapshots.csv index 4d917f4d..00297498 100644 --- a/doc/configtables/snapshots.csv +++ b/doc/configtables/snapshots.csv @@ -1,4 +1,4 @@ ,Unit,Values,Description start,--,"str or datetime-like; e.g. YYYY-MM-DD","Left bound of date range" end,--,"str or datetime-like; e.g. YYYY-MM-DD","Right bound of date range" -closed,--,"One of {None, ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or both sides ``None``." +closed,--,"One of {None, ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or open on both sides ``None``." diff --git a/doc/configtables/solar.csv b/doc/configtables/solar.csv index 7be39c04..9aa24268 100644 --- a/doc/configtables/solar.csv +++ b/doc/configtables/solar.csv @@ -12,4 +12,3 @@ corine,--,"Any subset of the `CORINE Land Cover code list `_ natural protection areas. Area is excluded if ``true``." potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." -keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." diff --git a/doc/configuration.rst b/doc/configuration.rst index a448f817..4fdb3719 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -28,13 +28,24 @@ Top-level configuration .. _scenario: -``scenario`` -============ +``run`` +======= It is common conduct to analyse energy system optimisation models for **multiple scenarios** for a variety of reasons, e.g. assessing their sensitivity towards changing the temporal and/or geographical resolution or investigating how investment changes as more ambitious greenhouse-gas emission reduction targets are applied. +The ``run`` section is used for running and storing scenarios with different configurations which are not covered by :ref:`wildcards`. It determines the path at which resources, networks and results are stored. Therefore the user can run different configurations within the same directory. If a run with a non-empty name should use cutouts shared across runs, set ``shared_cutouts`` to `true`. + +.. literalinclude:: ../config.default.yaml + :language: yaml + :start-at: run: + :end-before: scenario: + + +``scenario`` +============ + The ``scenario`` section is an extraordinary section of the config file that is strongly connected to the :ref:`wildcards` and is designed to facilitate running multiple scenarios through a single command @@ -91,9 +102,6 @@ Specifies the temporal range to build an energy system model for as arguments to :widths: 25,7,22,30 :file: configtables/electricity.csv -.. warning:: - Carriers in ``conventional_carriers`` must not also be in ``extendable_carriers``. - .. _atlite_cf: ``atlite`` @@ -174,7 +182,7 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia .. literalinclude:: ../config.default.yaml :language: yaml :start-at: hydro: - :end-before: lines: + :end-before: conventional: .. csv-table:: :header-rows: 1 @@ -183,6 +191,17 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia .. _lines_cf: +``conventional`` +============= + +Define additional generator attribute for conventional carrier types. If a scalar value is given it is applied to all generators. However if a string starting with "data/" is given, the value is interpreted as a path to a csv file with country specific values. Then, the values are read in and applied to all generators of the given carrier in the given country. Note that the value(s) overwrite the existing values in the corresponding section of the ``generators`` dataframe. + +.. literalinclude:: ../config.default.yaml + :language: yaml + :start-at: conventional: + :end-before: lines: + + ``lines`` ============= @@ -233,8 +252,7 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia .. literalinclude:: ../config.default.yaml :language: yaml - :start-at: load: - :end-before: costs: + :lines: 212-217 .. csv-table:: :header-rows: 1 @@ -249,7 +267,7 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia .. literalinclude:: ../config.default.yaml :language: yaml :start-after: scaling_factor: - :end-before: solving: + :end-before: clustering: .. csv-table:: :header-rows: 1 @@ -257,8 +275,25 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia :file: configtables/costs.csv .. note:: - To change cost assumptions in more detail (i.e. other than ``marginal_cost`` and ``capital_cost``), consider modifying cost assumptions directly in ``data/costs.csv`` as this is not yet supported through the config file. - You can also build multiple different cost databases. Make a renamed copy of ``data/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``. + To change cost assumptions in more detail (i.e. other than ``marginal_cost`` and ``capital_cost``), consider modifying cost assumptions directly in ``resources/costs.csv`` as this is not yet supported through the config file. + You can also build multiple different cost databases. Make a renamed copy of ``resources/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``. + + +.. _clustering_cf: + +``clustering`` +============== + +.. literalinclude:: ../config.default.yaml + :language: yaml + :start-after: co2: + :end-before: solving: + +.. csv-table:: + :header-rows: 1 + :widths: 25,7,22,30 + :file: configtables/clustering.csv + .. _solving_cf: diff --git a/doc/contributing.rst b/doc/contributing.rst index d57f1212..57959124 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -16,10 +16,20 @@ to our `GitHub repository `_. * If you start working on a feature in the code, let us know by opening an issue or a draft pull request. This helps all of us to keep an overview on what is being done and helps to avoid a situation where we are doing the same work twice in parallel. -* We encourage you to use the `PEP 8 coding style `_. + +For linting, formatting and checking your code contributions +against our guidelines (e.g. we use `Black `_ as code style +use `pre-commit `_: + +1. Installation ``conda install -c conda-forge pre-commit`` or ``pip install pre-commit`` +2. Usage: + * To automatically activate ``pre-commit`` on every ``git commit``: Run ``pre-commit install`` + * To manually run it: ``pre-commit run --all`` + +Note that installing `pre-commit` locally is not strictly necessary. If you create a Pull Request the `pre-commit CI` will be triggered automatically and take care of the checks. For all code contributions we follow the four eyes principle (two person principle), i.e. all suggested code -including our own are reviewed by a second person before they are incoporated into our repository. +including our own are reviewed by a second person before they are incorporated into our repository. If you are unfamiliar with pull requests, the GitHub help pages have a nice `guide `_. diff --git a/doc/costs.rst b/doc/costs.rst index 5ced95dc..10b91e06 100644 --- a/doc/costs.rst +++ b/doc/costs.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -7,7 +7,13 @@ Cost Assumptions ################## -The database of cost assumptions is stored in ``data/costs.csv``. +The database of cost assumptions is retrieved from the repository +`PyPSA/technology-data `_ and then +saved to ``resources/costs.csv``. Cost assumptions of previous PyPSA-Eur +versions can be restored by setting in the ``Snakefile``: +``COSTS="data/costs.csv"``. + +The ``config.yaml`` provides options to choose a reference year (``costs: year:``) and use a specific version of the repository ``costs: version:``. It includes cost assumptions for all included technologies for specific years from various sources, namely for @@ -30,24 +36,12 @@ with a discount rate of :math:`r` over the economic lifetime :math:`n` using the Based on the parameters above the ``marginal_cost`` and ``capital_cost`` of the system components are calculated. -.. note:: - - Another great resource for cost assumptions is the `cost database from the Danish Energy Agency `_. Modifying Cost Assumptions ========================== Some cost assumptions (e.g. marginal cost and capital cost) can be directly overwritten in the ``config.yaml`` (cf. Section :ref:`costs_cf` in :ref:`config`). -To change cost assumptions in more detail, modify cost assumptions directly in ``data/costs.csv`` as this is not yet supported through the config file. +To change cost assumptions in more detail, modify cost assumptions directly in ``resources/costs.csv`` as this is not yet supported through the config file. -You can also build multiple different cost databases. Make a renamed copy of ``data/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``. - - -Default Cost Assumptions -======================== - -.. csv-table:: - :header-rows: 1 - :widths: 10,3,5,4,6,8 - :file: ../data/costs.csv +You can also build multiple different cost databases. Make a renamed copy of ``resources/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``. diff --git a/doc/img/elec.png b/doc/img/elec.png index 52d4f772..2f688671 100644 Binary files a/doc/img/elec.png and b/doc/img/elec.png differ diff --git a/doc/index.rst b/doc/index.rst index 5ee1db5b..a5d6092d 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -22,10 +22,6 @@ PyPSA-Eur: An Open Optimisation Model of the European Transmission System .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg :target: https://doi.org/10.5281/zenodo.3520874 -.. image:: https://badges.gitter.im/PyPSA/community.svg - :target: https://gitter.im/PyPSA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge - :alt: Chat on Gitter - .. image:: https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat :target: https://snakemake.readthedocs.io :alt: Snakemake @@ -41,7 +37,7 @@ It contains alternating current lines at and above 220 kV voltage level and all The model is suitable both for operational studies and generation and transmission expansion planning studies. The continental scope and highly resolved spatial scale enables a proper description of the long-range smoothing effects for renewable power generation and their varying resource availability. -.. image:: img/base.png +.. image:: img/elec.png :width: 50% :align: center @@ -199,7 +195,7 @@ The included ``.nc`` files are PyPSA network files which can be imported with Py import pypsa - filename = "elec_s_1024_ec.nc" # example + filename = "elec_s_1024_ec.nc" # example n = pypsa.Network(filename) Licence diff --git a/doc/installation.rst b/doc/installation.rst index 16fdf766..da1497e6 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -71,6 +71,7 @@ PyPSA is known to work with the free software - `Ipopt `_ - `Cbc `_ - `GLPK `_ (`WinGLKP `_) +- `HiGHS `_ and the non-free, commercial software (for some of which free academic licenses are available) @@ -102,6 +103,8 @@ It might be the case that you can only retrieve solutions by using a commercial conda activate pypsa-eur conda install -c conda-forge ipopt glpk +.. warning:: + On Windows, new versions of ``ipopt`` have caused problems. Consider downgrading to version 3.11.1. .. _defaultconfig: diff --git a/doc/introduction.rst b/doc/introduction.rst index bc4f267f..8e64b682 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/limitations.rst b/doc/limitations.rst index 3b6c03d7..99c22aed 100644 --- a/doc/limitations.rst +++ b/doc/limitations.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/make.bat b/doc/make.bat index 35dcecc2..2d7118ab 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -1,4 +1,4 @@ -REM SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors +REM SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors REM SPDX-License-Identifier: MIT @ECHO OFF diff --git a/doc/plotting.rst b/doc/plotting.rst index 6b0ce392..e31cb68f 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation.rst b/doc/preparation.rst index dba5e981..74e53b05 100644 --- a/doc/preparation.rst +++ b/doc/preparation.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/add_electricity.rst b/doc/preparation/add_electricity.rst index 0f3d431f..cec1228c 100644 --- a/doc/preparation/add_electricity.rst +++ b/doc/preparation/add_electricity.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/base_network.rst b/doc/preparation/base_network.rst index 1afc4e05..85e19707 100644 --- a/doc/preparation/base_network.rst +++ b/doc/preparation/base_network.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/build_bus_regions.rst b/doc/preparation/build_bus_regions.rst index 16aab725..7bb761e8 100644 --- a/doc/preparation/build_bus_regions.rst +++ b/doc/preparation/build_bus_regions.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/build_cutout.rst b/doc/preparation/build_cutout.rst index da2c04d1..2e6cb4a3 100644 --- a/doc/preparation/build_cutout.rst +++ b/doc/preparation/build_cutout.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/build_hydro_profile.rst b/doc/preparation/build_hydro_profile.rst index 77b12915..b940c39f 100644 --- a/doc/preparation/build_hydro_profile.rst +++ b/doc/preparation/build_hydro_profile.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/build_load_data.rst b/doc/preparation/build_load_data.rst index 03535981..3a42410e 100644 --- a/doc/preparation/build_load_data.rst +++ b/doc/preparation/build_load_data.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2020-2021 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2020-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/build_natura_raster.rst b/doc/preparation/build_natura_raster.rst index e3ec4364..b60be2f4 100644 --- a/doc/preparation/build_natura_raster.rst +++ b/doc/preparation/build_natura_raster.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/build_powerplants.rst b/doc/preparation/build_powerplants.rst index 19cce03b..e3d5dcef 100644 --- a/doc/preparation/build_powerplants.rst +++ b/doc/preparation/build_powerplants.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/build_renewable_profiles.rst b/doc/preparation/build_renewable_profiles.rst index 27e61583..82ac1312 100644 --- a/doc/preparation/build_renewable_profiles.rst +++ b/doc/preparation/build_renewable_profiles.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/build_shapes.rst b/doc/preparation/build_shapes.rst index 6bed0109..fc142879 100644 --- a/doc/preparation/build_shapes.rst +++ b/doc/preparation/build_shapes.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/prepare_links_p_nom.rst b/doc/preparation/prepare_links_p_nom.rst index 7ae9c3b4..78e7324d 100644 --- a/doc/preparation/prepare_links_p_nom.rst +++ b/doc/preparation/prepare_links_p_nom.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/preparation/retrieve.rst b/doc/preparation/retrieve.rst index 81f279de..bdd97c80 100644 --- a/doc/preparation/retrieve.rst +++ b/doc/preparation/retrieve.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -30,7 +30,7 @@ The :ref:`tutorial` uses a smaller cutout than required for the full model (30 M .. note:: To download cutouts yourself from the `ECMWF ERA5 `_ you need to `set up the CDS API `_. - + **Relevant Settings** diff --git a/doc/release_notes.rst b/doc/release_notes.rst index ec8009d8..b1b56416 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2021 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -7,6 +7,222 @@ Release Notes ########################################## +Upcoming Release +================ + +* Carriers of generators can now be excluded from aggregation in clustering network and simplify network. + +* Bugfix in the reserve constraint will increase demand related reserve requirements + +PyPSA-Eur 0.6.1 (20th September 2022) +===================================== + +* Individual commits are now tested against pre-commit hooks. This includes + black style formatting, sorting of package imports, Snakefile formatting and + others. Installation instructions can for the pre-commit can be found `here + `_. + +* Pre-commit CI is now part of the repository's CI. + +* The software now supports running the workflow with different settings within + the same directory. A new config section ``run`` was created that specifies + under which scenario ``name`` the created resources, networks and results + should be stored. If ``name`` is not specified, the workflow uses the default + paths. The entry ``shared_cutouts`` specifies whether the run should use + cutouts from the default root directory or use run-specific cutouts. + +* The heuristic distribution of today's renewable capacity installations is now + enabled by default. + +* The marginal costs of conventional generators are now taking the plant-specific + efficiency into account where available. + +PyPSA-Eur 0.6.0 (10th September 2022) +===================================== + +* Functionality to consider shipping routes when calculating the available area + for offshore technologies were added. Data for the shipping density comes from + the `Global Shipping Traffic Density dataset + `_. + +* When transforming all transmission lines to a unified voltage level of 380kV, + the workflow now preserves the transmission capacity rather than electrical + impedance and reactance. + +* Memory resources are now specified for all rules. + +* Filtering of power plant data was adjusted to new versions of + ``powerplantmatching``. + +* The resolution of land exclusion calculation is now a configurable option. See + setting ``excluder_resolution``. + + +PyPSA-Eur 0.5.0 (27th July 2022) +===================================== + +**New Features** + +* New network topology extracted from the ENTSO-E interactive map. + +* Added existing renewable capacities for all countries based on IRENA + statistics (IRENASTAT) using new ``powerplantmatching`` version: + * The corresponding ``config`` entries changed, cf. ``config.default.yaml``: + * old: ``estimate_renewable_capacities_from_capacity_stats`` + * new: ``estimate_renewable_capacities`` + * The estimation is endabled by setting the subkey ``enable`` to ``True``. + * Configuration of reference year for capacities can be configured (default: + ``2020``) + * The list of renewables provided by the OPSD database can be used as a basis, + using the tag ``from_opsd: True``. This adds the renewables from the + database and fills up the missing capacities with the heuristic + distribution. + * Uniform expansion limit of renewable build-up based on existing capacities + can be configured using ``expansion_limit`` option (default: ``false``; + limited to determined renewable potentials) + * Distribution of country-level capacities proportional to maximum annual + energy yield for each bus region + * The config key ``renewable_capacities_from_OPSD`` is deprecated and was moved + under the section, ``estimate_renewable_capacities``. To enable it, set + ``from_opsd`` to ``True``. + +* Add operational reserve margin constraint analogous to `GenX implementation + `_. Can be activated + with config setting ``electricity: operational_reserve:``. + +* Implement country-specific Energy Availability Factors (EAFs) for nuclear + power plants based on IAEA 2018-2020 reported country averages. These are + specified ``data/nuclear_p_max_pu.csv`` and translate to static ``p_max_pu`` + values. + +* Add function to add global constraint on use of gas in :mod:`prepare_network`. + This can be activated by including the keyword ``CH4L`` in the ``{opts}`` + wildcard which enforces the limit set in ``electricity: gaslimit:`` given in + MWh thermal. Alternatively, it is possible to append a number in the ``{opts}`` + wildcard, e.g. ``CH4L200`` which limits the gas use to 200 TWh thermal. + +* Add option to alter marginal costs of a carrier through ``{opts}`` wildcard: + ``+m``, e.g. ``gas+m2.5``, will multiply the default marginal + cost for gas by factor 2.5. + +* Hierarchical clustering was introduced. Distance metric is calculated from + renewable potentials on hourly (feature entry ends with ``-time``) or annual + (feature entry in config end with ``-cap``) values. + +* Greedy modularity clustering was introduced. Distance metric is based on electrical distance taking into account the impedance of all transmission lines of the network. + +* Techno-economic parameters of technologies (e.g. costs and efficiencies) will + now be retrieved from a separate repository `PyPSA/technology-data + `_ that collects assumptions from a + variety of sources. It is activated by default with ``enable: + retrieve_cost_data: true`` and controlled with ``costs: year:`` and ``costs: + version:``. The location of this data changed from ``data/costs.csv`` to + ``resources/costs.csv`` [`#184 + `_]. + +* A new section ``conventional`` was added to the config file. This section + contains configurations for conventional carriers. + +* Add configuration option to implement arbitrary generator attributes for + conventional generation technologies. + +* Add option to set CO2 emission prices through ``{opts}`` wildcard: ``Ep``, + e.g. ``Ep180``, will set the EUR/tCO2 price. + +**Changes** + +* Add an efficiency factor of 88.55% to offshore wind capacity factors as a + proxy for wake losses. More rigorous modelling is `planned + `_ [`#277 + `_]. + +* Following discussion in `#285 + `_ we have disabled the + correction factor for solar PV capacity factors by default while satellite + data is used. A correction factor of 0.854337 is recommended if reanalysis + data like ERA5 is used. + +* The default deployment density of AC- and DC-connected offshore wind capacity + is reduced from 3 MW/sqkm to a more conservative estimate of 2 MW/sqkm [`#280 + `_]. + +* The inclusion of renewable carriers is now specified in the config entry + ``renewable_carriers``. Before this was done by commenting/uncommenting + sub-sections in the ``renewable`` config section. + +* Now, all carriers that should be extendable have to be listed in the config + entry ``extendable_carriers``. Before, renewable carriers were always set to + be extendable. For backwards compatibility, the workflow is still looking at + the listed carriers under the ``renewable`` key. In the future, all of them + have to be listed under ``extendable_carriers``. + +* It is now possible to set conventional power plants as extendable by adding + them to the list of extendable ``Generator`` carriers in the config. + +* Listing conventional carriers in ``extendable_carriers`` but not in + ``conventional_carriers``, sets the corresponding conventional power plants as + extendable without a lower capacity bound of today's capacities. + +* Now, conventional carriers have an assigned capital cost by default. + +* The ``build_year`` and ``lifetime`` column are now defined for conventional + power plants. + +* Use updated SARAH-2 and ERA5 cutouts with slightly wider scope to east and + additional variables. + +* Resource definitions for memory usage now follow `Snakemake standard resource + definition + `_ + ``mem_mb`` rather than ``mem``. + +* The powerplants that have been shut down by 2021 are filtered out. + +* Updated historical `EIA hydro generation data `_. + +* Network building is made deterministic by supplying a fixed random state to + network clustering routines. + +* Clustering strategies for generator and bus attributes can now be specified directly in the ``config.yaml``. + +* Iterative solving with impedance updates is skipped if there are no expandable + lines. + +* The unused argument ``simple_hvdc_costs`` in :mod:`add_electricity` was + removed. + +* Switch from Germany to Belgium for continuous integration and tutorial to save + resources. + +* It is now possible to skip the progressbar for land eligibility calculations for additional speedup. + +**Bugs and Compatibility** + +* Fix crs bug. Change crs 4236 to 4326. + +* ``powerplantmatching>=0.5.1`` is now required for ``IRENASTATS``. + +* Update rasterio version to correctly calculate exclusion raster. + +* It is now possible to run the workflow with only landlocked countries. + +* Bugfixes for manual load adjustments across years. + +* Enable parallel computing with new dask version. + +* Restore compatibility of ``mock_snakemake`` with latest Snakemake versions. + +* Script ``build_bus_regions``: move voronoi partition from vresutils to script. + +* Script ``add_electricity``: remove ``vresutils.costdata.annuity`` dependency. + +* Fix the plot_network snakemake rule. + +* Compatibility with pandas 1.4. Address deprecations. + +* Restore Windows compatibility by using ``shutil.move`` rather than ``mv``. + + Synchronisation Release - Ukraine and Moldova (17th March 2022) =============================================================== @@ -42,59 +258,6 @@ This release is not on the ``master`` branch. It can be used with git checkout synchronisation-release -On March 16, 2022, the transmission networks of Ukraine and Moldova have -successfully been `synchronised with the continental European grid `_. We have taken -this as an opportunity to add the power systems of Ukraine and Moldova to -PyPSA-Eur. This includes: - -.. image:: img/synchronisation.png - :width: 500 - -* the transmission network topology from the `ENTSO-E interactive map `_. - -* existing power plants (incl. nuclear, coal, gas and hydro) from the `powerplantmatching `_ tool - -* country-level load time series from ENTSO-E through the `OPSD platform `_, which are then distributed heuristically to substations by GDP and population density. - -* wind and solar profiles based on ERA5 and SARAH-2 weather data - -* hydro profiles based on historical `EIA generation data `_ - -* a simplified calculation of wind and solar potentials based on the `Copernicus Land Cover dataset `_. - -* electrical characteristics of 750 kV transmission lines - -The Crimean power system is currently disconnected from the main Ukrainian grid and, hence, not included. - -This release is not on the ``master`` branch. It can be used with - -.. code-block:: bash - - git clone https://github.com/pypsa/pypsa-eur - git checkout synchronisation-release - - -Upcoming Regular Release -======================== - -* Add an efficiency factor of 88.55% to offshore wind capacity factors - as a proxy for wake losses. More rigorous modelling is `planned `_ - [`#277 `_]. - -* The default deployment density of AC- and DC-connected offshore wind capacity is reduced from 3 MW/sqkm - to a more conservative estimate of 2 MW/sqkm [`#280 `_]. - -* Following discussion in `#285 `_ we have disabled the - correction factor for solar PV capacity factors by default while satellite data is used. - A correction factor of 0.854337 is recommended if reanalysis data like ERA5 is used. - -* Resource definitions for memory usage now follow [Snakemake standard resource definition](https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#standard-resources) ```mem_mb`` rather than ``mem``. - -* Network building is made deterministic by supplying a fixed random state to network clustering routines. - -* New network topology extracted from the ENTSO-E interactive map. - - PyPSA-Eur 0.4.0 (22th September 2021) ===================================== @@ -109,7 +272,7 @@ PyPSA-Eur 0.4.0 (22th September 2021) (~factor 2). A lot of the code which calculated the land-use availability is now outsourced and does not rely on ``glaes``, ``geokit`` anymore. This facilitates the environment building and version compatibility of ``gdal``, ``libgdal`` with - other packages [`#224 `_]. + other packages [`#224 `_]. * Implemented changes to ``n.snapshot_weightings`` in new PyPSA version v0.18 (cf. `PyPSA/PyPSA/#227 `_) @@ -132,17 +295,17 @@ PyPSA-Eur 0.4.0 (22th September 2021) used or maintained. * The connection cost of generators in :mod:`simplify_network` are now reported - in ``resources/connection_costs_s{simpl}.csv`` + in ``resources/connection_costs_s{simpl}.csv`` [`#261 `_]. * The tutorial cutout was renamed from ``cutouts/europe-2013-era5.nc`` to - ``cutouts/europe-2013-era5-tutorial.nc`` to accomodate tutorial and productive + ``cutouts/be-03-2013-era5.nc`` to accommodate tutorial and productive cutouts side-by-side. * The flag ``keep_all_available_areas`` in the configuration for renewable - potentials was deprecated and now defaults to ``True``. + potentials was deprecated and now defaults to ``True``. -* Update dependencies in ``envs/environment.yaml`` +* Update dependencies in ``envs/environment.yaml`` [`#257 `_] * Continuous integration testing switches to Github Actions from Travis CI @@ -171,7 +334,7 @@ PyPSA-Eur 0.4.0 (22th September 2021) * Value for ``co2base`` in ``config.yaml`` adjusted to 1.487e9 t CO2-eq (from 3.1e9 t CO2-eq). The new value represents emissions related to the electricity sector for EU+UK+Balkan. The old value was too high and used when - the emissions wildcard in ``{opts}`` was used + the emissions wildcard in ``{opts}`` was used [`#233 `_]. * Add escape in :mod:`base_network` if all TYNDP links are already @@ -179,11 +342,11 @@ PyPSA-Eur 0.4.0 (22th September 2021) [`#246 `_]. * In :mod:`solve_operations_network` the optimised capacities are now - fixed for all extendable links, not only HVDC links + fixed for all extendable links, not only HVDC links [`#244 `_]. * The ``focus_weights`` are now also considered when pre-clustering in - the :mod:`simplify_network` rule + the :mod:`simplify_network` rule [`#241 `_]. * in :mod:`build_renewable_profile` where offshore wind profiles could @@ -203,14 +366,13 @@ PyPSA-Eur 0.4.0 (22th September 2021) load shedding generators are only added at the AC buses, excluding buses for H2 and battery stores [`#269 `_]. -* Delete duplicated capital costs at battery discharge link +* Delete duplicated capital costs at battery discharge link [`#240 `_]. * Propagate the solver log file name to the solver. Previously, the PyPSA network solving functions were not told about the solver logfile specified in the Snakemake file [`#247 `_] - PyPSA-Eur 0.3.0 (7th December 2020) =================================== @@ -221,7 +383,7 @@ Using the ``{opts}`` wildcard for scenarios: * An option is introduced which adds constraints such that each country or node produces on average a minimal share of its total consumption itself. For example ``EQ0.5c`` set in the ``{opts}`` wildcard requires each country to produce on average at least 50% of its consumption. Additionally, the option ``ATK`` requires autarky at each node and removes all means of power transmission through lines and links. ``ATKc`` only removes - cross-border transfer capacities. + cross-border transfer capacities. [`#166 `_]. * Added an option to alter the capital cost (``c``) or installable potentials (``p``) of carriers by a factor via ``carrier+{c,p}factor`` in the ``{opts}`` wildcard. @@ -308,7 +470,7 @@ Other: [`#191 `_]. * Raise a warning if ``tech_colors`` in the config are not defined for all carriers - [`#178 `_]. + [`#178 `_]. PyPSA-Eur 0.2.0 (8th June 2020) @@ -330,7 +492,7 @@ PyPSA-Eur 0.2.0 (8th June 2020) * Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 `_]. -* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 `_]. +* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all available weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 `_]. * Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 `_]. @@ -386,7 +548,7 @@ Release Process ``conda env export -n pypsa-eur -f envs/environment.fixed.yaml --no-builds`` from an up-to-date `pypsa-eur` environment. -* Update version number in ``doc/conf.py`` and ``*config.*.yaml``. +* Update version number in ``doc/conf.py``, ``CITATION.cff`` and ``*config.*.yaml``. * Open, review and merge pull request for branch ``release-v0.x.x``. Make sure to close issues and PRs or the release milestone with it (e.g. closes #X). diff --git a/doc/requirements.txt b/doc/requirements.txt index 2b461718..e048b4af 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2019-2021 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2019-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -18,4 +18,4 @@ pyyaml seaborn memory_profiler tables -descartes \ No newline at end of file +descartes diff --git a/doc/simplification.rst b/doc/simplification.rst index 280b1da0..3ac12d4f 100644 --- a/doc/simplification.rst +++ b/doc/simplification.rst @@ -1,7 +1,7 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/simplification/add_extra_components.rst b/doc/simplification/add_extra_components.rst index c1337b44..8ca30574 100644 --- a/doc/simplification/add_extra_components.rst +++ b/doc/simplification/add_extra_components.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/simplification/cluster_network.rst b/doc/simplification/cluster_network.rst index 52fc5840..80c9068d 100644 --- a/doc/simplification/cluster_network.rst +++ b/doc/simplification/cluster_network.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/simplification/prepare_network.rst b/doc/simplification/prepare_network.rst index d7e22e03..037f8190 100644 --- a/doc/simplification/prepare_network.rst +++ b/doc/simplification/prepare_network.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/simplification/simplify_network.rst b/doc/simplification/simplify_network.rst index 128a697f..9bbd66c6 100644 --- a/doc/simplification/simplify_network.rst +++ b/doc/simplification/simplify_network.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/solving.rst b/doc/solving.rst index 87fdc040..55707d79 100644 --- a/doc/solving.rst +++ b/doc/solving.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/solving/solve_network.rst b/doc/solving/solve_network.rst index 4b0b9861..4fe18368 100644 --- a/doc/solving/solve_network.rst +++ b/doc/solving/solve_network.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/solving/solve_operations_network.rst b/doc/solving/solve_operations_network.rst index b5bbc89f..d51fd5ab 100644 --- a/doc/solving/solve_operations_network.rst +++ b/doc/solving/solve_operations_network.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 17d4e3c1..297c2c10 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -43,51 +43,63 @@ For more information on the data dependencies of PyPSA-Eur, continue reading :re How to customise PyPSA-Eur? =========================== -The model can be adapted to only include selected countries (e.g. Germany) instead of all European countries to limit the spatial scope. +The model can be adapted to only include selected countries (e.g. Belgium) instead of all European countries to limit the spatial scope. .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 20 + :start-at: countries: + :end-before: snapshots: Likewise, the example's temporal scope can be restricted (e.g. to a single month). .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 24-27 + :start-at: snapshots: + :end-before: enable: It is also possible to allow less or more carbon-dioxide emissions. Here, we limit the emissions of Germany 100 Megatonnes per year. .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 38,40 + :start-at: electricity: + :end-before: exentable_carriers: PyPSA-Eur also includes a database of existing conventional powerplants. -We can select which types of powerplants we like to be included with fixed capacities: +We can select which types of powerplants we like to be included: .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 38,54 + :start-at: extendable_carriers: + :end-before: max_hours: To accurately model the temporal and spatial availability of renewables such as wind and solar energy, we rely on historical weather data. It is advisable to adapt the required range of coordinates to the selection of countries. .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 56-63 + :start-at: atlite: + :end-before: renewable: We can also decide which weather data source should be used to calculate potentials and capacity factor time-series for each carrier. For example, we may want to use the ERA-5 dataset for solar and not the default SARAH-2 dataset. .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 65,108-109 + :start-at: be-03-2013-era5: + :end-at: module: + +.. literalinclude:: ../config.tutorial.yaml + :language: yaml + :start-at: solar: + :end-at: cutout: Finally, it is possible to pick a solver. For instance, this tutorial uses the open-source solvers CBC and Ipopt and does not rely on the commercial solvers Gurobi or CPLEX (for which free academic licenses are available). .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 171,181-182 + :start-at: solver: + :end-before: plotting: .. note:: @@ -116,21 +128,12 @@ clustered down to 6 buses and every 24 hours aggregated to one snapshot. The com orders ``snakemake`` to run the script ``solve_network`` that produces the solved network and stores it in ``.../pypsa-eur/results/networks`` with the name ``elec_s_6_ec_lcopt_Co2L-24H.nc``: -.. code:: - - rule solve_network: - input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - [...] - script: "scripts/solve_network.py" +.. literalinclude:: ../Snakefile + :start-at: rule solve_network: + :end-before: rule solve_operations_network: .. until https://github.com/snakemake/snakemake/issues/46 closed -.. warning:: - On Windows the previous command may currently cause a ``MissingRuleException`` due to problems with output files in subfolders. - This is an `open issue `_ at `snakemake `_. - Windows users should add the option ``--keep-target-files`` to the command or instead run ``snakemake -j 1 solve_all_networks``. - This triggers a workflow of multiple preceding jobs that depend on each rule's inputs and outputs: .. graphviz:: @@ -218,7 +221,7 @@ A job (here ``simplify_network``) will display its attributes and normally some [] rule simplify_network: - input: networks/elec.nc, data/costs.csv, resources/regions_onshore.geojson, resources/regions_offshore.geojson + input: networks/elec.nc, resources/costs.csv, resources/regions_onshore.geojson, resources/regions_offshore.geojson output: networks/elec_s.nc, resources/regions_onshore_elec_s.geojson, resources/regions_offshore_elec_s.geojson, resources/clustermaps_elec_s.h5 jobid: 3 benchmark: benchmarks/simplify_network/elec_s @@ -247,7 +250,7 @@ Once the whole worktree is finished, it should show state so in the terminal: You will notice that many intermediate stages are saved, namely the outputs of each individual ``snakemake`` rule. -You can produce any output file occuring in the ``Snakefile`` by running +You can produce any output file occurring in the ``Snakefile`` by running .. code:: bash @@ -271,9 +274,8 @@ the wildcards given in ``scenario`` in the configuration file ``config.yaml`` ar .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 14-18 - -In this example we would not only solve a 6-node model of Germany but also a 2-node model. + :start-at: scenario: + :end-before: countries: How to analyse solved networks? =============================== @@ -286,4 +288,4 @@ The solved networks can be analysed just like any other PyPSA network (e.g. in J network = pypsa.Network("results/networks/elec_s_6_ec_lcopt_Co2L-24H.nc") -For inspiration, read the `examples section in the PyPSA documentation `_. +For inspiration, read the `examples section in the PyPSA documentation `_. diff --git a/doc/wildcards.rst b/doc/wildcards.rst index 2290de67..14b71c09 100644 --- a/doc/wildcards.rst +++ b/doc/wildcards.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2022 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -123,7 +123,7 @@ These cutouts will be stored in a folder specified by ``{cutout}``. The ``{technology}`` wildcard ============================= -The ``{technology}`` wildcard specifies for which renewable energy technology to produce availablity time +The ``{technology}`` wildcard specifies for which renewable energy technology to produce availability time series and potentials using the rule :mod:`build_renewable_profiles`. It can take the values ``onwind``, ``offwind-ac``, ``offwind-dc``, and ``solar`` but **not** ``hydro`` (since hydroelectric plant profiles are created by a different rule). @@ -155,4 +155,5 @@ formats depends on the used backend. To query the supported file types on your s .. code:: python import matplotlib.pyplot as plt + plt.gcf().canvas.get_supported_filetypes() diff --git a/envs/environment.fixed.yaml b/envs/environment.fixed.yaml index 3fe3d51a..1c7aeaad 100644 --- a/envs/environment.fixed.yaml +++ b/envs/environment.fixed.yaml @@ -1,311 +1,430 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 name: pypsa-eur channels: - - bioconda - - conda-forge - - defaults +- bioconda +- http://conda.anaconda.org/gurobi +- conda-forge +- defaults dependencies: - - _libgcc_mutex=0.1 - - _openmp_mutex=4.5 - - affine=2.3.0 - - alsa-lib=1.2.3 - - amply=0.1.4 - - appdirs=1.4.4 - - atlite=0.2.5 - - attrs=21.2.0 - - backcall=0.2.0 - - backports=1.0 - - backports.functools_lru_cache=1.6.4 - - beautifulsoup4=4.10.0 - - blosc=1.21.0 - - bokeh=2.3.3 - - boost-cpp=1.74.0 - - bottleneck=1.3.2 - - brotlipy=0.7.0 - - bzip2=1.0.8 - - c-ares=1.17.2 - - ca-certificates=2021.5.30 - - cairo=1.16.0 - - cartopy=0.19.0.post1 - - cdsapi=0.5.1 - - certifi=2021.5.30 - - cffi=1.14.6 - - cfitsio=3.470 - - cftime=1.5.0 - - chardet=4.0.0 - - charset-normalizer=2.0.0 - - click=7.1.2 - - click-plugins=1.1.1 - - cligj=0.7.2 - - cloudpickle=2.0.0 - - coincbc=2.10.5 - - colorama=0.4.4 - - conda=4.10.3 - - conda-package-handling=1.7.3 - - configargparse=1.5.2 - - connection_pool=0.0.3 - - country_converter=0.7.3 - - cryptography=3.4.7 - - curl=7.79.0 - - cycler=0.10.0 - - cytoolz=0.11.0 - - dask=2021.3.1 - - dask-core=2021.3.1 - - datrie=0.8.2 - - dbus=1.13.6 - - decorator=4.4.2 - - deprecation=2.1.0 - - descartes=1.1.0 - - distributed=2021.4.1 - - distro=1.5.0 - - docutils=0.17.1 - - entsoe-py=0.3.7 - - et_xmlfile=1.0.1 - - expat=2.4.1 - - filelock=3.0.12 - - fiona=1.8.18 - - fontconfig=2.13.1 - - freetype=2.10.4 - - freexl=1.0.6 - - fsspec=2021.8.1 - - gdal=3.2.1 - - geographiclib=1.52 - - geopandas=0.9.0 - - geopandas-base=0.9.0 - - geopy=2.2.0 - - geos=3.9.1 - - geotiff=1.6.0 - - gettext=0.19.8.1 - - giflib=5.2.1 - - gitdb=4.0.7 - - gitpython=3.1.23 - - glib=2.68.4 - - glib-tools=2.68.4 - - graphite2=1.3.13 - - gst-plugins-base=1.18.5 - - gstreamer=1.18.5 - - harfbuzz=2.9.1 - - hdf4=4.2.15 - - hdf5=1.10.6 - - heapdict=1.0.1 - - icu=68.1 - - idna=3.1 - - importlib-metadata=4.8.1 - - iniconfig=1.1.1 - - ipython=7.27.0 - - ipython_genutils=0.2.0 - - jdcal=1.4.1 - - jedi=0.18.0 - - jinja2=3.0.1 - - joblib=1.0.1 - - jpeg=9d - - json-c=0.15 - - jsonschema=3.2.0 - - jupyter_core=4.8.1 - - kealib=1.4.14 - - kiwisolver=1.3.2 - - krb5=1.19.2 - - lcms2=2.12 - - ld_impl_linux-64=2.36.1 - - libarchive=3.5.1 - - libblas=3.9.0 - - libcblas=3.9.0 - - libclang=11.1.0 - - libcurl=7.79.0 - - libdap4=3.20.6 - - libedit=3.1.20191231 - - libev=4.33 - - libevent=2.1.10 - - libffi=3.4.2 - - libgcc-ng=11.2.0 - - libgdal=3.2.1 - - libgfortran-ng=11.2.0 - - libgfortran5=11.2.0 - - libglib=2.68.4 - - libgomp=11.2.0 - - libiconv=1.16 - - libkml=1.3.0 - - liblapack=3.9.0 - - libllvm11=11.1.0 - - libnetcdf=4.7.4 - - libnghttp2=1.43.0 - - libogg=1.3.4 - - libopenblas=0.3.17 - - libopus=1.3.1 - - libpng=1.6.37 - - libpq=13.3 - - librttopo=1.1.0 - - libsolv=0.7.19 - - libspatialindex=1.9.3 - - libspatialite=5.0.1 - - libssh2=1.10.0 - - libstdcxx-ng=11.2.0 - - libtiff=4.2.0 - - libuuid=2.32.1 - - libvorbis=1.3.7 - - libwebp-base=1.2.1 - - libxcb=1.13 - - libxkbcommon=1.0.3 - - libxml2=2.9.12 - - libxslt=1.1.33 - - locket=0.2.0 - - lxml=4.6.3 - - lz4-c=1.9.3 - - lzo=2.10 - - mamba=0.15.3 - - mapclassify=2.4.3 - - markupsafe=2.0.1 - - matplotlib=3.4.3 - - matplotlib-base=3.4.3 - - matplotlib-inline=0.1.3 - - memory_profiler=0.58.0 - - mock=4.0.3 - - more-itertools=8.10.0 - - msgpack-python=1.0.2 - - munch=2.5.0 - - mysql-common=8.0.25 - - mysql-libs=8.0.25 - - nbformat=5.1.3 - - ncurses=6.2 - - netcdf4=1.5.6 - - networkx=2.6.3 - - nspr=4.30 - - nss=3.69 - - numexpr=2.7.3 - - numpy=1.21.2 - - olefile=0.46 - - openjdk=11.0.9.1 - - openjpeg=2.4.0 - - openpyxl=3.0.8 - - openssl=1.1.1l - - packaging=21.0 - - pandas=1.2.5 - - parso=0.8.2 - - partd=1.2.0 - - patsy=0.5.1 - - pcre=8.45 - - pexpect=4.8.0 - - pickleshare=0.7.5 - - pillow=8.2.0 - - pip=21.2.4 - - pixman=0.40.0 - - pluggy=1.0.0 - - ply=3.11 - - poppler=0.89.0 - - poppler-data=0.4.11 - - postgresql=13.3 - - powerplantmatching=0.4.8 - - progressbar2=3.53.1 - - proj=7.2.0 - - prompt-toolkit=3.0.20 - - psutil=5.8.0 - - pthread-stubs=0.4 - - ptyprocess=0.7.0 - - pulp=2.5.0 - - py=1.10.0 - - pycosat=0.6.3 - - pycountry=20.7.3 - - pycparser=2.20 - - pygments=2.10.0 - - pyomo=6.1.2 - - pyopenssl=20.0.1 - - pyparsing=2.4.7 - - pyproj=3.1.0 - - pypsa=0.18.0 - - pyqt=5.12.3 - - pyqt-impl=5.12.3 - - pyqt5-sip=4.19.18 - - pyqtchart=5.12 - - pyqtwebengine=5.12.1 - - pyrsistent=0.17.3 - - pyshp=2.1.3 - - pysocks=1.7.1 - - pytables=3.6.1 - - pytest=6.2.5 - - python=3.9.7 - - python-dateutil=2.8.2 - - python-utils=2.5.6 - - python_abi=3.9 - - pytz=2021.1 - - pyyaml=5.4.1 - - qt=5.12.9 - - rasterio=1.2.6 - - ratelimiter=1.2.0 - - readline=8.1 - - reproc=14.2.3 - - reproc-cpp=14.2.3 - - requests=2.26.0 - - rtree=0.9.7 - - ruamel_yaml=0.15.80 - - scikit-learn=0.24.2 - - scipy=1.7.1 - - seaborn=0.11.2 - - seaborn-base=0.11.2 - - setuptools=58.0.4 - - setuptools-scm=6.3.2 - - setuptools_scm=6.3.2 - - shapely=1.7.1 - - six=1.16.0 - - smart_open=5.2.1 - - smmap=3.0.5 - - snakemake-minimal=6.8.0 - - snuggs=1.4.7 - - sortedcontainers=2.4.0 - - soupsieve=2.0.1 - - sqlite=3.36.0 - - statsmodels=0.12.2 - - stopit=1.1.2 - - tabula-py=2.2.0 - - tabulate=0.8.9 - - tblib=1.7.0 - - threadpoolctl=2.2.0 - - tiledb=2.2.9 - - tk=8.6.11 - - toml=0.10.2 - - tomli=1.2.1 - - toolz=0.11.1 - - toposort=1.6 - - tornado=6.1 - - tqdm=4.62.3 - - traitlets=5.1.0 - - typing_extensions=3.10.0.2 - - tzcode=2021a - - tzdata=2021a - - urllib3=1.26.6 - - wcwidth=0.2.5 - - wheel=0.37.0 - - wrapt=1.12.1 - - xarray=0.19.0 - - xerces-c=3.2.3 - - xlrd=2.0.1 - - xorg-fixesproto=5.0 - - xorg-inputproto=2.3.2 - - xorg-kbproto=1.0.7 - - xorg-libice=1.0.10 - - xorg-libsm=1.2.3 - - xorg-libx11=1.7.2 - - xorg-libxau=1.0.9 - - xorg-libxdmcp=1.1.3 - - xorg-libxext=1.3.4 - - xorg-libxfixes=5.0.3 - - xorg-libxi=1.7.10 - - xorg-libxrender=0.9.10 - - xorg-libxtst=1.2.3 - - xorg-recordproto=1.14.2 - - xorg-renderproto=0.11.1 - - xorg-xextproto=7.3.0 - - xorg-xproto=7.0.31 - - xz=5.2.5 - - yaml=0.2.5 - - zict=2.0.0 - - zipp=3.5.0 - - zlib=1.2.11 - - zstd=1.4.9 - - pip: - - countrycode==0.2 - - sklearn==0.0 - - tsam==1.1.1 - - vresutils==0.3.1 +- _libgcc_mutex=0.1 +- _openmp_mutex=4.5 +- abseil-cpp=20210324.2 +- affine=2.3.1 +- alsa-lib=1.2.3.2 +- altair=4.2.0 +- ampl-mp=3.1.0 +- amply=0.1.5 +- anyio=3.6.1 +- appdirs=1.4.4 +- argon2-cffi=21.3.0 +- argon2-cffi-bindings=21.2.0 +- arrow-cpp=8.0.0 +- asttokens=2.0.5 +- atlite=0.2.9 +- attrs=21.4.0 +- aws-c-cal=0.5.11 +- aws-c-common=0.6.2 +- aws-c-event-stream=0.2.7 +- aws-c-io=0.10.5 +- aws-checksums=0.1.11 +- aws-sdk-cpp=1.8.186 +- babel=2.10.3 +- backcall=0.2.0 +- backports=1.0 +- backports.functools_lru_cache=1.6.4 +- beautifulsoup4=4.11.1 +- bleach=5.0.1 +- blinker=1.4 +- blosc=1.21.1 +- bokeh=2.4.3 +- boost-cpp=1.74.0 +- bottleneck=1.3.5 +- branca=0.5.0 +- brotli=1.0.9 +- brotli-bin=1.0.9 +- brotlipy=0.7.0 +- bzip2=1.0.8 +- c-ares=1.18.1 +- ca-certificates=2022.6.15.1 +- cachetools=5.0.0 +- cairo=1.16.0 +- cartopy=0.20.1 +- cdsapi=0.5.1 +- certifi=2022.6.15.1 +- cffi=1.15.1 +- cfitsio=4.0.0 +- cftime=1.6.1 +- charset-normalizer=2.1.0 +- click=8.0.4 +- click-plugins=1.1.1 +- cligj=0.7.2 +- cloudpickle=2.1.0 +- coin-or-cbc=2.10.8 +- coin-or-cgl=0.60.6 +- coin-or-clp=1.17.7 +- coin-or-osi=0.108.7 +- coin-or-utils=2.11.6 +- coincbc=2.10.8 +- colorama=0.4.5 +- colorcet=3.0.0 +- commonmark=0.9.1 +- configargparse=1.5.3 +- connection_pool=0.0.3 +- country_converter=0.7.4 +- cryptography=37.0.4 +- curl=7.83.1 +- cycler=0.11.0 +- cytoolz=0.12.0 +- dask=2022.7.0 +- dask-core=2022.7.0 +- dataclasses=0.8 +- datrie=0.8.2 +- dbus=1.13.6 +- debugpy=1.6.0 +- decorator=5.1.1 +- defusedxml=0.7.1 +- deprecation=2.1.0 +- descartes=1.1.0 +- distributed=2022.7.0 +- distro=1.6.0 +- docutils=0.19 +- dpath=2.0.6 +- entrypoints=0.4 +- entsoe-py=0.5.4 +- et_xmlfile=1.0.1 +- executing=0.8.3 +- expat=2.4.8 +- filelock=3.7.1 +- fiona=1.8.20 +- flit-core=3.7.1 +- folium=0.12.1.post1 +- font-ttf-dejavu-sans-mono=2.37 +- font-ttf-inconsolata=3.000 +- font-ttf-source-code-pro=2.038 +- font-ttf-ubuntu=0.83 +- fontconfig=2.14.0 +- fonts-conda-ecosystem=1 +- fonts-conda-forge=1 +- fonttools=4.34.4 +- freetype=2.10.4 +- freexl=1.0.6 +- fsspec=2022.5.0 +- future=0.18.2 +- gdal=3.3.3 +- geographiclib=1.52 +- geojson-rewind=1.0.2 +- geopandas=0.11.0 +- geopandas-base=0.11.0 +- geopy=2.2.0 +- geos=3.10.0 +- geotiff=1.7.0 +- gettext=0.19.8.1 +- gflags=2.2.2 +- giflib=5.2.1 +- gitdb=4.0.9 +- gitpython=3.1.27 +- glog=0.6.0 +- gmp=6.2.1 +- graphite2=1.3.13 +- grpc-cpp=1.45.2 +- gst-plugins-base=1.18.5 +- gstreamer=1.18.5 +- harfbuzz=2.9.1 +- hdf4=4.2.15 +- hdf5=1.12.1 +- heapdict=1.0.1 +- icu=68.2 +- idna=3.3 +- importlib-metadata=4.11.4 +- importlib_metadata=4.11.4 +- importlib_resources=5.8.0 +- iniconfig=1.1.1 +- ipykernel=6.15.1 +- ipython=8.4.0 +- ipython_genutils=0.2.0 +- ipywidgets=7.7.1 +- jedi=0.18.1 +- jinja2=3.1.2 +- joblib=1.1.0 +- jpeg=9e +- json-c=0.15 +- json5=0.9.5 +- jsonschema=4.7.2 +- jupyter_client=7.3.4 +- jupyter_core=4.10.0 +- jupyter_server=1.18.1 +- kealib=1.4.15 +- keyutils=1.6.1 +- kiwisolver=1.4.4 +- krb5=1.19.3 +- lcms2=2.12 +- ld_impl_linux-64=2.36.1 +- lerc=3.0 +- libblas=3.9.0 +- libbrotlicommon=1.0.9 +- libbrotlidec=1.0.9 +- libbrotlienc=1.0.9 +- libcblas=3.9.0 +- libclang=11.1.0 +- libcrc32c=1.1.2 +- libcurl=7.83.1 +- libdap4=3.20.6 +- libdeflate=1.12 +- libedit=3.1.20191231 +- libev=4.33 +- libevent=2.1.10 +- libffi=3.4.2 +- libgcc-ng=12.1.0 +- libgdal=3.3.3 +- libgfortran-ng=12.1.0 +- libgfortran5=12.1.0 +- libglib=2.72.1 +- libgomp=12.1.0 +- libgoogle-cloud=1.40.2 +- libiconv=1.16 +- libkml=1.3.0 +- liblapack=3.9.0 +- liblapacke=3.9.0 +- libllvm11=11.1.0 +- libnetcdf=4.8.1 +- libnghttp2=1.47.0 +- libnsl=2.0.0 +- libogg=1.3.4 +- libopenblas=0.3.20 +- libopus=1.3.1 +- libpng=1.6.37 +- libpq=13.5 +- libprotobuf=3.20.1 +- librttopo=1.1.0 +- libsodium=1.0.18 +- libspatialindex=1.9.3 +- libspatialite=5.0.1 +- libssh2=1.10.0 +- libstdcxx-ng=12.1.0 +- libthrift=0.16.0 +- libtiff=4.4.0 +- libutf8proc=2.7.0 +- libuuid=2.32.1 +- libvorbis=1.3.7 +- libwebp=1.2.2 +- libwebp-base=1.2.2 +- libxcb=1.13 +- libxkbcommon=1.0.3 +- libxml2=2.9.12 +- libxslt=1.1.33 +- libzip=1.9.2 +- libzlib=1.2.12 +- locket=1.0.0 +- lxml=4.8.0 +- lz4=4.0.0 +- lz4-c=1.9.3 +- lzo=2.10 +- mapclassify=2.4.3 +- markdown=3.4.1 +- markupsafe=2.1.1 +- matplotlib=3.5.2 +- matplotlib-base=3.5.2 +- matplotlib-inline=0.1.3 +- memory_profiler=0.60.0 +- metis=5.1.0 +- mistune=0.8.4 +- msgpack-python=1.0.4 +- mumps-include=5.2.1 +- mumps-seq=5.2.1 +- munch=2.5.0 +- munkres=1.1.4 +- mysql-common=8.0.29 +- mysql-libs=8.0.29 +- nbclassic=0.4.3 +- nbclient=0.6.6 +- nbconvert=6.5.0 +- nbconvert-core=6.5.0 +- nbconvert-pandoc=6.5.0 +- nbformat=5.4.0 +- ncurses=6.3 +- nest-asyncio=1.5.5 +- netcdf4=1.6.0 +- networkx=2.8.4 +- nomkl=1.0 +- notebook=6.4.12 +- notebook-shim=0.1.0 +- nspr=4.32 +- nss=3.78 +- numexpr=2.8.3 +- numpy=1.23.1 +- openjdk=11.0.9.1 +- openjpeg=2.4.0 +- openpyxl=3.0.9 +- openssl=1.1.1q +- orc=1.7.5 +- packaging=21.3 +- pandas=1.4.3 +- pandoc=2.18 +- pandocfilters=1.5.0 +- parquet-cpp=1.5.1 +- parso=0.8.3 +- partd=1.2.0 +- patsy=0.5.2 +- pcre=8.45 +- pexpect=4.8.0 +- pickleshare=0.7.5 +- pillow=9.2.0 +- pip=22.1.2 +- pixman=0.40.0 +- plac=1.3.5 +- pluggy=1.0.0 +- ply=3.11 +- poppler=21.09.0 +- poppler-data=0.4.11 +- postgresql=13.5 +- powerplantmatching=0.5.4 +- progressbar2=4.0.0 +- proj=8.1.1 +- prometheus_client=0.14.1 +- prompt-toolkit=3.0.30 +- protobuf=3.20.1 +- psutil=5.9.1 +- pthread-stubs=0.4 +- ptyprocess=0.7.0 +- pulp=2.6.0 +- pure_eval=0.2.2 +- py=1.11.0 +- pyarrow=8.0.0 +- pycountry=20.7.3 +- pycparser=2.21 +- pyct=0.4.6 +- pyct-core=0.4.6 +- pydeck=0.7.1 +- pygments=2.12.0 +- pympler=0.9 +- pyomo=6.4.1 +- pyopenssl=22.0.0 +- pyparsing=3.0.9 +- pyproj=3.2.1 +- pypsa=0.20.0 +- pyqt=5.12.3 +- pyqt-impl=5.12.3 +- pyqt5-sip=4.19.18 +- pyqtchart=5.12 +- pyqtwebengine=5.12.1 +- pyrsistent=0.18.1 +- pyshp=2.3.0 +- pysocks=1.7.1 +- pytables=3.7.0 +- pytest=7.1.2 +- python=3.9.13 +- python-dateutil=2.8.2 +- python-fastjsonschema=2.16.1 +- python-tzdata=2022.1 +- python-utils=3.3.3 +- python_abi=3.9 +- pytz=2022.1 +- pytz-deprecation-shim=0.1.0.post0 +- pyviz_comms=2.2.0 +- pyxlsb=1.0.9 +- pyyaml=6.0 +- pyzmq=23.2.0 +- qt=5.12.9 +- rasterio=1.2.9 +- ratelimiter=1.2.0 +- re2=2022.06.01 +- readline=8.1.2 +- requests=2.28.1 +- retry=0.9.2 +- rich=12.5.1 +- rtree=1.0.0 +- s2n=1.0.10 +- scikit-learn=1.1.1 +- scipy=1.8.1 +- scotch=6.0.9 +- seaborn=0.11.2 +- seaborn-base=0.11.2 +- semver=2.13.0 +- send2trash=1.8.0 +- setuptools=63.2.0 +- setuptools-scm=7.0.5 +- setuptools_scm=7.0.5 +- shapely=1.8.0 +- six=1.16.0 +- smart_open=6.0.0 +- smmap=3.0.5 +- snakemake-minimal=7.8.5 +- snappy=1.1.9 +- sniffio=1.2.0 +- snuggs=1.4.7 +- sortedcontainers=2.4.0 +- soupsieve=2.3.1 +- sqlite=3.39.1 +- stack_data=0.3.0 +- statsmodels=0.13.2 +- stopit=1.1.2 +- streamlit=1.10.0 +- tabula-py=2.2.0 +- tabulate=0.8.10 +- tblib=1.7.0 +- tenacity=8.0.1 +- terminado=0.15.0 +- threadpoolctl=3.1.0 +- tiledb=2.3.4 +- tinycss2=1.1.1 +- tk=8.6.12 +- toml=0.10.2 +- tomli=2.0.1 +- toolz=0.12.0 +- toposort=1.7 +- tornado=6.1 +- tqdm=4.64.0 +- traitlets=5.3.0 +- typing-extensions=4.3.0 +- typing_extensions=4.3.0 +- tzcode=2022a +- tzdata=2022a +- tzlocal=4.2 +- unicodedata2=14.0.0 +- unidecode=1.3.4 +- unixodbc=2.3.10 +- urllib3=1.26.10 +- validators=0.18.2 +- watchdog=2.1.9 +- wcwidth=0.2.5 +- webencodings=0.5.1 +- websocket-client=1.3.3 +- wheel=0.37.1 +- widgetsnbextension=3.6.1 +- wrapt=1.14.1 +- xarray=2022.3.0 +- xerces-c=3.2.3 +- xlrd=2.0.1 +- xorg-fixesproto=5.0 +- xorg-inputproto=2.3.2 +- xorg-kbproto=1.0.7 +- xorg-libice=1.0.10 +- xorg-libsm=1.2.3 +- xorg-libx11=1.7.2 +- xorg-libxau=1.0.9 +- xorg-libxdmcp=1.1.3 +- xorg-libxext=1.3.4 +- xorg-libxfixes=5.0.3 +- xorg-libxi=1.7.10 +- xorg-libxrender=0.9.10 +- xorg-libxtst=1.2.3 +- xorg-recordproto=1.14.2 +- xorg-renderproto=0.11.1 +- xorg-xextproto=7.3.0 +- xorg-xproto=7.0.31 +- xyzservices=2022.6.0 +- xz=5.2.5 +- yaml=0.2.5 +- yte=1.5.1 +- zeromq=4.3.4 +- zict=2.2.0 +- zipp=3.8.0 +- zlib=1.2.12 +- zstd=1.5.2 +- pip: + - countrycode==0.2 + - tsam==2.1.0 + - vresutils==0.3.1 diff --git a/envs/environment.yaml b/envs/environment.yaml index 7cb6b519..bc28faeb 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -1,60 +1,62 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT name: pypsa-eur channels: - - conda-forge - - bioconda +- conda-forge +- bioconda dependencies: - - python>=3.8 - - pip +- python>=3.8 +- pip - - pypsa>=0.19.2 - - atlite>=0.2.5 - - dask - - jupyter - - nbconvert +- pypsa>=0.21.3 +- atlite>=0.2.9 +- dask +- jupyter +- nbconvert # Dependencies of the workflow itself - - xlrd - - openpyxl - - pycountry - - seaborn - - snakemake-minimal - - memory_profiler - - yaml - - pytables - - lxml - - powerplantmatching>=0.4.8 - - numpy - - pandas - - geopandas - - xarray - - netcdf4 - - networkx - - scipy - - shapely - - progressbar2 - - pyomo - - matplotlib - - proj +- xlrd +- openpyxl +- pycountry +- seaborn +- snakemake-minimal +- memory_profiler +- yaml +- pytables +- lxml +- powerplantmatching>=0.5.5 +- numpy<1.24 +- pandas +- geopandas>=0.11.0 +- xarray +- netcdf4 +- networkx +- scipy +- shapely<2.0 +- progressbar2 +- pyomo +- matplotlib<3.6 +- proj +- fiona +- country_converter # Keep in conda environment when calling ipython - - ipython +- ipython # GIS dependencies: - - cartopy - - descartes - - rasterio +- cartopy +- descartes +- rasterio!=1.2.10 # PyPSA-Eur-Sec Dependencies - - geopy - - tqdm - - pytz - - country_converter - - tabula-py +- geopy +- tqdm +- pytz +- tabula-py +- pyxlsb - - pip: - - vresutils>=0.3.1 - - tsam>=1.1.0 +- pip: + - vresutils>=0.3.1 + - tsam>=1.1.0 diff --git a/scripts/_helpers.py b/scripts/_helpers.py index f1e5e887..ba88efb2 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -1,10 +1,14 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT -import pandas as pd from pathlib import Path +import pandas as pd + +REGION_COLS = ["geometry", "name", "x", "y", "country"] + def configure_logging(snakemake, skip_handlers=False): """ @@ -27,21 +31,26 @@ def configure_logging(snakemake, skip_handlers=False): import logging - kwargs = snakemake.config.get('logging', dict()) + kwargs = snakemake.config.get("logging", dict()).copy() kwargs.setdefault("level", "INFO") if skip_handlers is False: - fallback_path = Path(__file__).parent.joinpath('..', 'logs', f"{snakemake.rule}.log") - logfile = snakemake.log.get('python', snakemake.log[0] if snakemake.log - else fallback_path) + fallback_path = Path(__file__).parent.joinpath( + "..", "logs", f"{snakemake.rule}.log" + ) + logfile = snakemake.log.get( + "python", snakemake.log[0] if snakemake.log else fallback_path + ) kwargs.update( - {'handlers': [ - # Prefer the 'python' log, otherwise take the first log for each - # Snakemake rule - logging.FileHandler(logfile), - logging.StreamHandler() + { + "handlers": [ + # Prefer the 'python' log, otherwise take the first log for each + # Snakemake rule + logging.FileHandler(logfile), + logging.StreamHandler(), ] - }) + } + ) logging.basicConfig(**kwargs) @@ -79,138 +88,199 @@ def load_network(import_name=None, custom_components=None): if custom_components is not None: override_components = pypsa.components.components.copy() - override_component_attrs = Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()}) + override_component_attrs = Dict( + {k: v.copy() for k, v in pypsa.components.component_attrs.items()} + ) for k, v in custom_components.items(): - override_components.loc[k] = v['component'] - override_component_attrs[k] = pd.DataFrame(columns = ["type","unit","default","description","status"]) - for attr, val in v['attributes'].items(): + override_components.loc[k] = v["component"] + override_component_attrs[k] = pd.DataFrame( + columns=["type", "unit", "default", "description", "status"] + ) + for attr, val in v["attributes"].items(): override_component_attrs[k].loc[attr] = val - return pypsa.Network(import_name=import_name, - override_components=override_components, - override_component_attrs=override_component_attrs) + return pypsa.Network( + import_name=import_name, + override_components=override_components, + override_component_attrs=override_component_attrs, + ) def pdbcast(v, h): - return pd.DataFrame(v.values.reshape((-1, 1)) * h.values, - index=v.index, columns=h.index) + return pd.DataFrame( + v.values.reshape((-1, 1)) * h.values, index=v.index, columns=h.index + ) def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True): import pypsa - from add_electricity import update_transmission_costs, load_costs + from add_electricity import load_costs, update_transmission_costs n = pypsa.Network(fn) n.loads["carrier"] = n.loads.bus.map(n.buses.carrier) + " load" n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) - n.links["carrier"] = (n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier)) + n.links["carrier"] = ( + n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier) + ) n.lines["carrier"] = "AC line" n.transformers["carrier"] = "AC transformer" - n.lines['s_nom'] = n.lines['s_nom_min'] - n.links['p_nom'] = n.links['p_nom_min'] + n.lines["s_nom"] = n.lines["s_nom_min"] + n.links["p_nom"] = n.links["p_nom_min"] if combine_hydro_ps: - n.storage_units.loc[n.storage_units.carrier.isin({'PHS', 'hydro'}), 'carrier'] = 'hydro+PHS' + n.storage_units.loc[ + n.storage_units.carrier.isin({"PHS", "hydro"}), "carrier" + ] = "hydro+PHS" # if the carrier was not set on the heat storage units # bus_carrier = n.storage_units.bus.map(n.buses.carrier) # n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks" - Nyears = n.snapshot_weightings.objective.sum() / 8760. - costs = load_costs(Nyears, tech_costs, config['costs'], config['electricity']) + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs(tech_costs, config["costs"], config["electricity"], Nyears) update_transmission_costs(n, costs) return n + def update_p_nom_max(n): # if extendable carriers (solar/onwind/...) have capacity >= 0, # e.g. existing assets from the OPSD project are included to the network, # the installed capacity might exceed the expansion limit. # Hence, we update the assumptions. - - n.generators.p_nom_max = n.generators[['p_nom_min', 'p_nom_max']].max(1) + + n.generators.p_nom_max = n.generators[["p_nom_min", "p_nom_max"]].max(1) + def aggregate_p_nom(n): - return pd.concat([ - n.generators.groupby("carrier").p_nom_opt.sum(), - n.storage_units.groupby("carrier").p_nom_opt.sum(), - n.links.groupby("carrier").p_nom_opt.sum(), - n.loads_t.p.groupby(n.loads.carrier,axis=1).sum().mean() - ]) + return pd.concat( + [ + n.generators.groupby("carrier").p_nom_opt.sum(), + n.storage_units.groupby("carrier").p_nom_opt.sum(), + n.links.groupby("carrier").p_nom_opt.sum(), + n.loads_t.p.groupby(n.loads.carrier, axis=1).sum().mean(), + ] + ) + def aggregate_p(n): - return pd.concat([ - n.generators_t.p.sum().groupby(n.generators.carrier).sum(), - n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(), - n.stores_t.p.sum().groupby(n.stores.carrier).sum(), - -n.loads_t.p.sum().groupby(n.loads.carrier).sum() - ]) + return pd.concat( + [ + n.generators_t.p.sum().groupby(n.generators.carrier).sum(), + n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(), + n.stores_t.p.sum().groupby(n.stores.carrier).sum(), + -n.loads_t.p.sum().groupby(n.loads.carrier).sum(), + ] + ) + def aggregate_e_nom(n): - return pd.concat([ - (n.storage_units["p_nom_opt"]*n.storage_units["max_hours"]).groupby(n.storage_units["carrier"]).sum(), - n.stores["e_nom_opt"].groupby(n.stores.carrier).sum() - ]) + return pd.concat( + [ + (n.storage_units["p_nom_opt"] * n.storage_units["max_hours"]) + .groupby(n.storage_units["carrier"]) + .sum(), + n.stores["e_nom_opt"].groupby(n.stores.carrier).sum(), + ] + ) + def aggregate_p_curtailed(n): - return pd.concat([ - ((n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) - n.generators_t.p.sum()) - .groupby(n.generators.carrier).sum()), - ((n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum()) - .groupby(n.storage_units.carrier).sum()) - ]) + return pd.concat( + [ + ( + ( + n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) + - n.generators_t.p.sum() + ) + .groupby(n.generators.carrier) + .sum() + ), + ( + (n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum()) + .groupby(n.storage_units.carrier) + .sum() + ), + ] + ) + def aggregate_costs(n, flatten=False, opts=None, existing_only=False): - - components = dict(Link=("p_nom", "p0"), - Generator=("p_nom", "p"), - StorageUnit=("p_nom", "p"), - Store=("e_nom", "p"), - Line=("s_nom", None), - Transformer=("s_nom", None)) + components = dict( + Link=("p_nom", "p0"), + Generator=("p_nom", "p"), + StorageUnit=("p_nom", "p"), + Store=("e_nom", "p"), + Line=("s_nom", None), + Transformer=("s_nom", None), + ) costs = {} for c, (p_nom, p_attr) in zip( - n.iterate_components(components.keys(), skip_empty=False), - components.values() + n.iterate_components(components.keys(), skip_empty=False), components.values() ): - if c.df.empty: continue - if not existing_only: p_nom += "_opt" - costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum() + if c.df.empty: + continue + if not existing_only: + p_nom += "_opt" + costs[(c.list_name, "capital")] = ( + (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum() + ) if p_attr is not None: p = c.pnl[p_attr].sum() - if c.name == 'StorageUnit': + if c.name == "StorageUnit": p = p.loc[p > 0] - costs[(c.list_name, 'marginal')] = (p*c.df.marginal_cost).groupby(c.df.carrier).sum() + costs[(c.list_name, "marginal")] = ( + (p * c.df.marginal_cost).groupby(c.df.carrier).sum() + ) costs = pd.concat(costs) if flatten: assert opts is not None - conv_techs = opts['conv_techs'] + conv_techs = opts["conv_techs"] costs = costs.reset_index(level=0, drop=True) - costs = costs['capital'].add( - costs['marginal'].rename({t: t + ' marginal' for t in conv_techs}), - fill_value=0. + costs = costs["capital"].add( + costs["marginal"].rename({t: t + " marginal" for t in conv_techs}), + fill_value=0.0, ) return costs + def progress_retrieve(url, file): import urllib + from progressbar import ProgressBar pbar = ProgressBar(0, 100) def dlProgress(count, blockSize, totalSize): - pbar.update( int(count * blockSize * 100 / totalSize) ) + pbar.update(int(count * blockSize * 100 / totalSize)) urllib.request.urlretrieve(url, file, reporthook=dlProgress) +def get_aggregation_strategies(aggregation_strategies): + # default aggregation strategies that cannot be defined in .yaml format must be specified within + # the function, otherwise (when defaults are passed in the function's definition) they get lost + # when custom values are specified in the config. + + import numpy as np + from pypsa.networkclustering import _make_consense + + bus_strategies = dict(country=_make_consense("Bus", "country")) + bus_strategies.update(aggregation_strategies.get("buses", {})) + + generator_strategies = {"build_year": lambda x: 0, "lifetime": lambda x: np.inf} + generator_strategies.update(aggregation_strategies.get("generators", {})) + + return bus_strategies, generator_strategies + + def mock_snakemake(rulename, **wildcards): """ This function is expected to be executed from the 'scripts'-directory of ' @@ -227,20 +297,24 @@ def mock_snakemake(rulename, **wildcards): keyword arguments fixing the wildcards. Only necessary if wildcards are needed. """ - import snakemake as sm import os + + import snakemake as sm + from packaging.version import Version, parse from pypsa.descriptors import Dict from snakemake.script import Snakemake script_dir = Path(__file__).parent.resolve() - assert Path.cwd().resolve() == script_dir, \ - f'mock_snakemake has to be run from the repository scripts directory {script_dir}' + assert ( + Path.cwd().resolve() == script_dir + ), f"mock_snakemake has to be run from the repository scripts directory {script_dir}" os.chdir(script_dir.parent) for p in sm.SNAKEFILE_CHOICES: if os.path.exists(p): snakefile = p break - workflow = sm.Workflow(snakefile, overwrite_configfiles=[]) + kwargs = dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {} + workflow = sm.Workflow(snakefile, overwrite_configfiles=[], **kwargs) workflow.include(snakefile) workflow.global_resources = {} rule = workflow.get_rule(rulename) @@ -254,9 +328,18 @@ def mock_snakemake(rulename, **wildcards): io[i] = os.path.abspath(io[i]) make_accessable(job.input, job.output, job.log) - snakemake = Snakemake(job.input, job.output, job.params, job.wildcards, - job.threads, job.resources, job.log, - job.dag.workflow.config, job.rule.name, None,) + snakemake = Snakemake( + job.input, + job.output, + job.params, + job.wildcards, + job.threads, + job.resources, + job.log, + job.dag.workflow.config, + job.rule.name, + None, + ) # create log and output dir if not existent for path in list(snakemake.log) + list(snakemake.output): Path(path).parent.mkdir(parents=True, exist_ok=True) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 73a1935a..6734985e 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -1,4 +1,5 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -13,7 +14,7 @@ Relevant Settings costs: year: - USD2013_to_EUR2013: + version: dicountrate: emission_prices: @@ -24,8 +25,8 @@ Relevant Settings conventional_carriers: co2limit: extendable_carriers: - include_renewable_capacities_from_OPSD: - estimate_renewable_capacities_from_capacity_stats: + estimate_renewable_capacities: + load: scaling_factor: @@ -46,14 +47,14 @@ Relevant Settings Inputs ------ -- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. +- ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. - ``data/bundle/hydro_capacities.csv``: Hydropower plant store/discharge power capacities, energy storage capacity, and average hourly inflow by country. .. image:: ../img/hydrocapacities.png :scale: 34 % - ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used! -- ``resources/opsd_load.csv`` Hourly per-country load profiles. +- ``resources/load.csv`` Hourly per-country load profiles. - ``resources/regions_onshore.geojson``: confer :ref:`busregions` - ``resources/nuts3_shapes.geojson``: confer :ref:`shapes` - ``resources/powerplants.csv``: confer :ref:`powerplants` @@ -84,17 +85,15 @@ It further adds extendable ``generators`` with **zero** capacity for """ import logging -from _helpers import configure_logging, update_p_nom_max -import pypsa -import pandas as pd -import numpy as np -import xarray as xr import geopandas as gpd +import numpy as np +import pandas as pd import powerplantmatching as pm +import pypsa +import xarray as xr +from _helpers import configure_logging, update_p_nom_max from powerplantmatching.export import map_country_bus - -from vresutils.costdata import annuity from vresutils import transfer as vtransfer idx = pd.IndexSlice @@ -102,76 +101,98 @@ idx = pd.IndexSlice logger = logging.getLogger(__name__) -def normed(s): return s/s.sum() +def normed(s): + return s / s.sum() + + +def calculate_annuity(n, r): + """ + Calculate the annuity factor for an asset with lifetime n years and. + + discount rate of r, e.g. annuity(20, 0.05) * 20 = 1.6 + """ + + if isinstance(r, pd.Series): + return pd.Series(1 / n, index=r.index).where( + r == 0, r / (1.0 - 1.0 / (1.0 + r) ** n) + ) + elif r > 0: + return r / (1.0 - 1.0 / (1.0 + r) ** n) + else: + return 1 / n def _add_missing_carriers_from_costs(n, costs, carriers): missing_carriers = pd.Index(carriers).difference(n.carriers.index) - if missing_carriers.empty: return + if missing_carriers.empty: + return - emissions_cols = costs.columns.to_series()\ - .loc[lambda s: s.str.endswith('_emissions')].values - suptechs = missing_carriers.str.split('-').str[0] - emissions = costs.loc[suptechs, emissions_cols].fillna(0.) + emissions_cols = ( + costs.columns.to_series().loc[lambda s: s.str.endswith("_emissions")].values + ) + suptechs = missing_carriers.str.split("-").str[0] + emissions = costs.loc[suptechs, emissions_cols].fillna(0.0) emissions.index = missing_carriers - n.import_components_from_dataframe(emissions, 'Carrier') + n.import_components_from_dataframe(emissions, "Carrier") -def load_costs(tech_costs, config, elec_config, Nyears=1.): - +def load_costs(tech_costs, config, elec_config, Nyears=1.0): # set all asset costs and other parameters - costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index() + costs = pd.read_csv(tech_costs, index_col=[0, 1]).sort_index() - # correct units to MW and EUR - costs.loc[costs.unit.str.contains("/kW"),"value"] *= 1e3 - costs.loc[costs.unit.str.contains("USD"),"value"] *= config['USD2013_to_EUR2013'] + # correct units to MW + costs.loc[costs.unit.str.contains("/kW"), "value"] *= 1e3 + costs.unit = costs.unit.str.replace("/kW", "/MW") - costs = (costs.loc[idx[:,config['year'],:], "value"] - .unstack(level=2).groupby("technology").sum(min_count=1)) + fill_values = config["fill_values"] + costs = costs.value.unstack().fillna(fill_values) - costs = costs.fillna({"CO2 intensity" : 0, - "FOM" : 0, - "VOM" : 0, - "discount rate" : config['discountrate'], - "efficiency" : 1, - "fuel" : 0, - "investment" : 0, - "lifetime" : 25}) + costs["capital_cost"] = ( + ( + calculate_annuity(costs["lifetime"], costs["discount rate"]) + + costs["FOM"] / 100.0 + ) + * costs["investment"] + * Nyears + ) - costs["capital_cost"] = ((annuity(costs["lifetime"], costs["discount rate"]) + - costs["FOM"]/100.) * - costs["investment"] * Nyears) + costs.at["OCGT", "fuel"] = costs.at["gas", "fuel"] + costs.at["CCGT", "fuel"] = costs.at["gas", "fuel"] - costs.at['OCGT', 'fuel'] = costs.at['gas', 'fuel'] - costs.at['CCGT', 'fuel'] = costs.at['gas', 'fuel'] - - costs['marginal_cost'] = costs['VOM'] + costs['fuel'] / costs['efficiency'] + costs["marginal_cost"] = costs["VOM"] + costs["fuel"] / costs["efficiency"] costs = costs.rename(columns={"CO2 intensity": "co2_emissions"}) - costs.at['OCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions'] - costs.at['CCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions'] + costs.at["OCGT", "co2_emissions"] = costs.at["gas", "co2_emissions"] + costs.at["CCGT", "co2_emissions"] = costs.at["gas", "co2_emissions"] - costs.at['solar', 'capital_cost'] = 0.5*(costs.at['solar-rooftop', 'capital_cost'] + - costs.at['solar-utility', 'capital_cost']) + costs.at["solar", "capital_cost"] = ( + config["rooftop_share"] * costs.at["solar-rooftop", "capital_cost"] + + (1 - config["rooftop_share"]) * costs.at["solar-utility", "capital_cost"] + ) - def costs_for_storage(store, link1, link2=None, max_hours=1.): - capital_cost = link1['capital_cost'] + max_hours * store['capital_cost'] + def costs_for_storage(store, link1, link2=None, max_hours=1.0): + capital_cost = link1["capital_cost"] + max_hours * store["capital_cost"] if link2 is not None: - capital_cost += link2['capital_cost'] - return pd.Series(dict(capital_cost=capital_cost, - marginal_cost=0., - co2_emissions=0.)) + capital_cost += link2["capital_cost"] + return pd.Series( + dict(capital_cost=capital_cost, marginal_cost=0.0, co2_emissions=0.0) + ) - max_hours = elec_config['max_hours'] - costs.loc["battery"] = \ - costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"], - max_hours=max_hours['battery']) - costs.loc["H2"] = \ - costs_for_storage(costs.loc["hydrogen storage"], costs.loc["fuel cell"], - costs.loc["electrolysis"], max_hours=max_hours['H2']) + max_hours = elec_config["max_hours"] + costs.loc["battery"] = costs_for_storage( + costs.loc["battery storage"], + costs.loc["battery inverter"], + max_hours=max_hours["battery"], + ) + costs.loc["H2"] = costs_for_storage( + costs.loc["hydrogen storage underground"], + costs.loc["fuel cell"], + costs.loc["electrolysis"], + max_hours=max_hours["H2"], + ) - for attr in ('marginal_cost', 'capital_cost'): + for attr in ("marginal_cost", "capital_cost"): overwrites = config.get(attr) if overwrites is not None: overwrites = pd.Series(overwrites) @@ -181,28 +202,33 @@ def load_costs(tech_costs, config, elec_config, Nyears=1.): def load_powerplants(ppl_fn): - carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass', - 'ccgt, thermal': 'CCGT', 'hard coal': 'coal', "natural gas": "OCGT"} - return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'}) - .powerplant.to_pypsa_names() - .rename(columns=str.lower).drop(columns=['efficiency']) - .replace({'carrier': carrier_dict})) + carrier_dict = { + "ocgt": "OCGT", + "ccgt": "CCGT", + "bioenergy": "biomass", + "ccgt, thermal": "CCGT", + "hard coal": "coal", + "natural gas": "OCGT", + } + return ( + pd.read_csv(ppl_fn, index_col=0, dtype={"bus": "str"}) + .powerplant.to_pypsa_names() + .rename(columns=str.lower) + .replace({"carrier": carrier_dict}) + ) -def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.): - - substation_lv_i = n.buses.index[n.buses['substation_lv']] - regions = (gpd.read_file(regions).set_index('name') - .reindex(substation_lv_i)) - opsd_load = (pd.read_csv(load, index_col=0, parse_dates=True) - .filter(items=countries)) +def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.0): + substation_lv_i = n.buses.index[n.buses["substation_lv"]] + regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i) + opsd_load = pd.read_csv(load, index_col=0, parse_dates=True).filter(items=countries) ua_md_gdp = pd.read_csv(ua_md_gdp, dtype={'name': 'str'}).set_index('name') logger.info(f"Load data scaled with scalling factor {scaling}.") opsd_load *= scaling - nuts3 = gpd.read_file(nuts3_shapes).set_index('index') + nuts3 = gpd.read_file(nuts3_shapes).set_index("index") def upsample(cntry, group): l = opsd_load[cntry] @@ -211,12 +237,15 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1. return pd.DataFrame({group.index[0]: l}) else: nuts3_cntry = nuts3.loc[nuts3.country == cntry] - transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry, - normed=False).T.tocsr() - gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values), - index=group.index) - pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values), - index=group.index) + transfer = vtransfer.Shapes2Shapes( + group, nuts3_cntry.geometry, normed=False + ).T.tocsr() + gdp_n = pd.Series( + transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index + ) + pop_n = pd.Series( + transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index + ) # relative factors 0.6 and 0.4 have been determined from a linear # regression on the country to continent load data @@ -225,364 +254,600 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1. # overwrite factor because nuts3 provides no data for UA+MD factors = normed(ua_md_gdp.loc[group.index, "GDP_PPP"].squeeze()) - return pd.DataFrame(factors.values * l.values[:,np.newaxis], - index=l.index, columns=factors.index) + return pd.DataFrame( + factors.values * l.values[:, np.newaxis], + index=l.index, + columns=factors.index, + ) - load = pd.concat([upsample(cntry, group) for cntry, group - in regions.geometry.groupby(regions.country)], axis=1) + load = pd.concat( + [ + upsample(cntry, group) + for cntry, group in regions.geometry.groupby(regions.country) + ], + axis=1, + ) n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) -def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False): +def update_transmission_costs(n, costs, length_factor=1.0): # TODO: line length factor of lines is applied to lines and links. # Separate the function to distinguish. - n.lines['capital_cost'] = (n.lines['length'] * length_factor * - costs.at['HVAC overhead', 'capital_cost']) + n.lines["capital_cost"] = ( + n.lines["length"] * length_factor * costs.at["HVAC overhead", "capital_cost"] + ) - if n.links.empty: return + if n.links.empty: + return - dc_b = n.links.carrier == 'DC' + dc_b = n.links.carrier == "DC" # If there are no dc links, then the 'underwater_fraction' column # may be missing. Therefore we have to return here. - if n.links.loc[dc_b].empty: return + if n.links.loc[dc_b].empty: + return - if simple_hvdc_costs: - costs = (n.links.loc[dc_b, 'length'] * length_factor * - costs.at['HVDC overhead', 'capital_cost']) - else: - costs = (n.links.loc[dc_b, 'length'] * length_factor * - ((1. - n.links.loc[dc_b, 'underwater_fraction']) * - costs.at['HVDC overhead', 'capital_cost'] + - n.links.loc[dc_b, 'underwater_fraction'] * - costs.at['HVDC submarine', 'capital_cost']) + - costs.at['HVDC inverter pair', 'capital_cost']) - n.links.loc[dc_b, 'capital_cost'] = costs + costs = ( + n.links.loc[dc_b, "length"] + * length_factor + * ( + (1.0 - n.links.loc[dc_b, "underwater_fraction"]) + * costs.at["HVDC overhead", "capital_cost"] + + n.links.loc[dc_b, "underwater_fraction"] + * costs.at["HVDC submarine", "capital_cost"] + ) + + costs.at["HVDC inverter pair", "capital_cost"] + ) + n.links.loc[dc_b, "capital_cost"] = costs -def attach_wind_and_solar(n, costs, input_profiles, technologies, line_length_factor=1): +def attach_wind_and_solar( + n, costs, input_profiles, technologies, extendable_carriers, line_length_factor=1 +): # TODO: rename tech -> carrier, technologies -> carriers + _add_missing_carriers_from_costs(n, costs, technologies) for tech in technologies: - if tech == 'hydro': continue + if tech == "hydro": + continue - n.add("Carrier", name=tech) - with xr.open_dataset(getattr(input_profiles, 'profile_' + tech)) as ds: - if ds.indexes['bus'].empty: continue + with xr.open_dataset(getattr(input_profiles, "profile_" + tech)) as ds: + if ds.indexes["bus"].empty: + continue - suptech = tech.split('-', 2)[0] - if suptech == 'offwind': - underwater_fraction = ds['underwater_fraction'].to_pandas() - connection_cost = (line_length_factor * - ds['average_distance'].to_pandas() * - (underwater_fraction * - costs.at[tech + '-connection-submarine', 'capital_cost'] + - (1. - underwater_fraction) * - costs.at[tech + '-connection-underground', 'capital_cost'])) - capital_cost = (costs.at['offwind', 'capital_cost'] + - costs.at[tech + '-station', 'capital_cost'] + - connection_cost) - logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}" - .format(connection_cost.min(), connection_cost.max(), tech)) + suptech = tech.split("-", 2)[0] + if suptech == "offwind": + underwater_fraction = ds["underwater_fraction"].to_pandas() + connection_cost = ( + line_length_factor + * ds["average_distance"].to_pandas() + * ( + underwater_fraction + * costs.at[tech + "-connection-submarine", "capital_cost"] + + (1.0 - underwater_fraction) + * costs.at[tech + "-connection-underground", "capital_cost"] + ) + ) + capital_cost = ( + costs.at["offwind", "capital_cost"] + + costs.at[tech + "-station", "capital_cost"] + + connection_cost + ) + logger.info( + "Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format( + connection_cost.min(), connection_cost.max(), tech + ) + ) else: - capital_cost = costs.at[tech, 'capital_cost'] + capital_cost = costs.at[tech, "capital_cost"] - n.madd("Generator", ds.indexes['bus'], ' ' + tech, - bus=ds.indexes['bus'], - carrier=tech, - p_nom_extendable=True, - p_nom_max=ds['p_nom_max'].to_pandas(), - weight=ds['weight'].to_pandas(), - marginal_cost=costs.at[suptech, 'marginal_cost'], - capital_cost=capital_cost, - efficiency=costs.at[suptech, 'efficiency'], - p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas()) + n.madd( + "Generator", + ds.indexes["bus"], + " " + tech, + bus=ds.indexes["bus"], + carrier=tech, + p_nom_extendable=tech in extendable_carriers["Generator"], + p_nom_max=ds["p_nom_max"].to_pandas(), + weight=ds["weight"].to_pandas(), + marginal_cost=costs.at[suptech, "marginal_cost"], + capital_cost=capital_cost, + efficiency=costs.at[suptech, "efficiency"], + p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(), + ) -def attach_conventional_generators(n, costs, ppl, carriers): - +def attach_conventional_generators( + n, + costs, + ppl, + conventional_carriers, + extendable_carriers, + conventional_config, + conventional_inputs, +): + carriers = set(conventional_carriers) | set(extendable_carriers["Generator"]) _add_missing_carriers_from_costs(n, costs, carriers) - ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier') - .rename(index=lambda s: 'C' + str(s))) + ppl = ( + ppl.query("carrier in @carriers") + .join(costs, on="carrier", rsuffix="_r") + .rename(index=lambda s: "C" + str(s)) + ) + ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r) + ppl["marginal_cost"] = ( + ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency + ) - logger.info('Adding {} generators with capacities [MW] \n{}' - .format(len(ppl), ppl.groupby('carrier').p_nom.sum())) + logger.info( + "Adding {} generators with capacities [GW] \n{}".format( + len(ppl), ppl.groupby("carrier").p_nom.sum().div(1e3).round(2) + ) + ) - n.madd("Generator", ppl.index, - carrier=ppl.carrier, - bus=ppl.bus, - p_nom=ppl.p_nom, - efficiency=ppl.efficiency, - marginal_cost=ppl.marginal_cost, - capital_cost=0) + n.madd( + "Generator", + ppl.index, + carrier=ppl.carrier, + bus=ppl.bus, + p_nom_min=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0), + p_nom=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0), + p_nom_extendable=ppl.carrier.isin(extendable_carriers["Generator"]), + efficiency=ppl.efficiency, + marginal_cost=ppl.marginal_cost, + capital_cost=ppl.capital_cost, + build_year=ppl.datein.fillna(0).astype(int), + lifetime=(ppl.dateout - ppl.datein).fillna(np.inf), + ) - logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.') + for carrier in conventional_config: + + # Generators with technology affected + idx = n.generators.query("carrier == @carrier").index + + for attr in list(set(conventional_config[carrier]) & set(n.generators)): + + values = conventional_config[carrier][attr] + + if f"conventional_{carrier}_{attr}" in conventional_inputs: + # Values affecting generators of technology k country-specific + # First map generator buses to countries; then map countries to p_max_pu + values = pd.read_csv(values, index_col=0).iloc[:, 0] + bus_values = n.buses.country.map(values) + n.generators[attr].update( + n.generators.loc[idx].bus.map(bus_values).dropna() + ) + else: + # Single value affecting all generators of technology k indiscriminantely of country + n.generators.loc[idx, attr] = values def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **config): - _add_missing_carriers_from_costs(n, costs, carriers) - ppl = ppl.query('carrier == "hydro"').reset_index(drop=True)\ - .rename(index=lambda s: str(s) + ' hydro') + ppl = ( + ppl.query('carrier == "hydro"') + .reset_index(drop=True) + .rename(index=lambda s: str(s) + " hydro") + ) ror = ppl.query('technology == "Run-Of-River"') phs = ppl.query('technology == "Pumped Storage"') hydro = ppl.query('technology == "Reservoir"') - country = ppl['bus'].map(n.buses.country).rename("country") + country = ppl["bus"].map(n.buses.country).rename("country") inflow_idx = ror.index.union(hydro.index) if not inflow_idx.empty: - dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed) + dist_key = ppl.loc[inflow_idx, "p_nom"].groupby(country).transform(normed) with xr.open_dataarray(profile_hydro) as inflow: inflow_countries = pd.Index(country[inflow_idx]) - missing_c = (inflow_countries.unique() - .difference(inflow.indexes['countries'])) - assert missing_c.empty, (f"'{profile_hydro}' is missing " - f"inflow time-series for at least one country: {', '.join(missing_c)}") + missing_c = inflow_countries.unique().difference( + inflow.indexes["countries"] + ) + assert missing_c.empty, ( + f"'{profile_hydro}' is missing " + f"inflow time-series for at least one country: {', '.join(missing_c)}" + ) - inflow_t = (inflow.sel(countries=inflow_countries) - .rename({'countries': 'name'}) - .assign_coords(name=inflow_idx) - .transpose('time', 'name') - .to_pandas() - .multiply(dist_key, axis=1)) + inflow_t = ( + inflow.sel(countries=inflow_countries) + .rename({"countries": "name"}) + .assign_coords(name=inflow_idx) + .transpose("time", "name") + .to_pandas() + .multiply(dist_key, axis=1) + ) - if 'ror' in carriers and not ror.empty: - n.madd("Generator", ror.index, - carrier='ror', - bus=ror['bus'], - p_nom=ror['p_nom'], - efficiency=costs.at['ror', 'efficiency'], - capital_cost=costs.at['ror', 'capital_cost'], - weight=ror['p_nom'], - p_max_pu=(inflow_t[ror.index] - .divide(ror['p_nom'], axis=1) - .where(lambda df: df<=1., other=1.))) + if "ror" in carriers and not ror.empty: + n.madd( + "Generator", + ror.index, + carrier="ror", + bus=ror["bus"], + p_nom=ror["p_nom"], + efficiency=costs.at["ror", "efficiency"], + capital_cost=costs.at["ror", "capital_cost"], + weight=ror["p_nom"], + p_max_pu=( + inflow_t[ror.index] + .divide(ror["p_nom"], axis=1) + .where(lambda df: df <= 1.0, other=1.0) + ), + ) - if 'PHS' in carriers and not phs.empty: + if "PHS" in carriers and not phs.empty: # fill missing max hours to config value and # assume no natural inflow due to lack of data - max_hours = config.get('PHS_max_hours', 6) - phs = phs.replace({'max_hours': {0: max_hours}}) - n.madd('StorageUnit', phs.index, - carrier='PHS', - bus=phs['bus'], - p_nom=phs['p_nom'], - capital_cost=costs.at['PHS', 'capital_cost'], - max_hours=phs['max_hours'], - efficiency_store=np.sqrt(costs.at['PHS','efficiency']), - efficiency_dispatch=np.sqrt(costs.at['PHS','efficiency']), - cyclic_state_of_charge=True) + max_hours = config.get("PHS_max_hours", 6) + phs = phs.replace({"max_hours": {0: max_hours}}) + n.madd( + "StorageUnit", + phs.index, + carrier="PHS", + bus=phs["bus"], + p_nom=phs["p_nom"], + capital_cost=costs.at["PHS", "capital_cost"], + max_hours=phs["max_hours"], + efficiency_store=np.sqrt(costs.at["PHS", "efficiency"]), + efficiency_dispatch=np.sqrt(costs.at["PHS", "efficiency"]), + cyclic_state_of_charge=True, + ) - if 'hydro' in carriers and not hydro.empty: - hydro_max_hours = config.get('hydro_max_hours') + if "hydro" in carriers and not hydro.empty: + hydro_max_hours = config.get("hydro_max_hours") assert hydro_max_hours is not None, "No path for hydro capacities given." - hydro_stats = pd.read_csv(hydro_capacities, - comment="#", na_values='-', index_col=0) + hydro_stats = pd.read_csv( + hydro_capacities, comment="#", na_values="-", index_col=0 + ) e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6 - e_installed = hydro.eval('p_nom * max_hours').groupby(hydro.country).sum() + e_installed = hydro.eval("p_nom * max_hours").groupby(hydro.country).sum() e_missing = e_target - e_installed - missing_mh_i = hydro.query('max_hours == 0').index + missing_mh_i = hydro.query("max_hours == 0").index - if hydro_max_hours == 'energy_capacity_totals_by_country': + if hydro_max_hours == "energy_capacity_totals_by_country": # watch out some p_nom values like IE's are totally underrepresented - max_hours_country = e_missing / \ - hydro.loc[missing_mh_i].groupby('country').p_nom.sum() + max_hours_country = ( + e_missing / hydro.loc[missing_mh_i].groupby("country").p_nom.sum() + ) - elif hydro_max_hours == 'estimate_by_large_installations': - max_hours_country = hydro_stats['E_store[TWh]'] * 1e3 / \ - hydro_stats['p_nom_discharge[GW]'] + elif hydro_max_hours == "estimate_by_large_installations": + max_hours_country = ( + hydro_stats["E_store[TWh]"] * 1e3 / hydro_stats["p_nom_discharge[GW]"] + ) - missing_countries = (pd.Index(hydro['country'].unique()) - .difference(max_hours_country.dropna().index)) + missing_countries = pd.Index(hydro["country"].unique()).difference( + max_hours_country.dropna().index + ) if not missing_countries.empty: - logger.warning("Assuming max_hours=6 for hydro reservoirs in the countries: {}" - .format(", ".join(missing_countries))) - hydro_max_hours = hydro.max_hours.where(hydro.max_hours > 0, - hydro.country.map(max_hours_country)).fillna(6) + logger.warning( + "Assuming max_hours=6 for hydro reservoirs in the countries: {}".format( + ", ".join(missing_countries) + ) + ) + hydro_max_hours = hydro.max_hours.where( + hydro.max_hours > 0, hydro.country.map(max_hours_country) + ).fillna(6) - n.madd('StorageUnit', hydro.index, carrier='hydro', - bus=hydro['bus'], - p_nom=hydro['p_nom'], - max_hours=hydro_max_hours, - capital_cost=costs.at['hydro', 'capital_cost'], - marginal_cost=costs.at['hydro', 'marginal_cost'], - p_max_pu=1., # dispatch - p_min_pu=0., # store - efficiency_dispatch=costs.at['hydro', 'efficiency'], - efficiency_store=0., - cyclic_state_of_charge=True, - inflow=inflow_t.loc[:, hydro.index]) + n.madd( + "StorageUnit", + hydro.index, + carrier="hydro", + bus=hydro["bus"], + p_nom=hydro["p_nom"], + max_hours=hydro_max_hours, + capital_cost=costs.at["hydro", "capital_cost"], + marginal_cost=costs.at["hydro", "marginal_cost"], + p_max_pu=1.0, # dispatch + p_min_pu=0.0, # store + efficiency_dispatch=costs.at["hydro", "efficiency"], + efficiency_store=0.0, + cyclic_state_of_charge=True, + inflow=inflow_t.loc[:, hydro.index], + ) def attach_extendable_generators(n, costs, ppl, carriers): - + logger.warning( + "The function `attach_extendable_generators` is deprecated in v0.5.0." + ) _add_missing_carriers_from_costs(n, costs, carriers) for tech in carriers: - if tech.startswith('OCGT'): - ocgt = ppl.query("carrier in ['OCGT', 'CCGT']").groupby('bus', as_index=False).first() - n.madd('Generator', ocgt.index, - suffix=' OCGT', - bus=ocgt['bus'], - carrier=tech, - p_nom_extendable=True, - p_nom=0., - capital_cost=costs.at['OCGT', 'capital_cost'], - marginal_cost=costs.at['OCGT', 'marginal_cost'], - efficiency=costs.at['OCGT', 'efficiency']) - - elif tech.startswith('CCGT'): - ccgt = ppl.query("carrier in ['OCGT', 'CCGT']").groupby('bus', as_index=False).first() - n.madd('Generator', ccgt.index, - suffix=' CCGT', - bus=ccgt['bus'], - carrier=tech, - p_nom_extendable=True, - p_nom=0., - capital_cost=costs.at['CCGT', 'capital_cost'], - marginal_cost=costs.at['CCGT', 'marginal_cost'], - efficiency=costs.at['CCGT', 'efficiency']) - - elif tech.startswith('nuclear'): - nuclear = ppl.query("carrier == 'nuclear'").groupby('bus', as_index=False).first() - n.madd('Generator', nuclear.index, - suffix=' nuclear', - bus=nuclear['bus'], + if tech.startswith("OCGT"): + ocgt = ( + ppl.query("carrier in ['OCGT', 'CCGT']") + .groupby("bus", as_index=False) + .first() + ) + n.madd( + "Generator", + ocgt.index, + suffix=" OCGT", + bus=ocgt["bus"], carrier=tech, p_nom_extendable=True, - p_nom=0., - capital_cost=costs.at['nuclear', 'capital_cost'], - marginal_cost=costs.at['nuclear', 'marginal_cost'], - efficiency=costs.at['nuclear', 'efficiency']) + p_nom=0.0, + capital_cost=costs.at["OCGT", "capital_cost"], + marginal_cost=costs.at["OCGT", "marginal_cost"], + efficiency=costs.at["OCGT", "efficiency"], + ) + + elif tech.startswith("CCGT"): + ccgt = ( + ppl.query("carrier in ['OCGT', 'CCGT']") + .groupby("bus", as_index=False) + .first() + ) + n.madd( + "Generator", + ccgt.index, + suffix=" CCGT", + bus=ccgt["bus"], + carrier=tech, + p_nom_extendable=True, + p_nom=0.0, + capital_cost=costs.at["CCGT", "capital_cost"], + marginal_cost=costs.at["CCGT", "marginal_cost"], + efficiency=costs.at["CCGT", "efficiency"], + ) + + elif tech.startswith("nuclear"): + nuclear = ( + ppl.query("carrier == 'nuclear'").groupby("bus", as_index=False).first() + ) + n.madd( + "Generator", + nuclear.index, + suffix=" nuclear", + bus=nuclear["bus"], + carrier=tech, + p_nom_extendable=True, + p_nom=0.0, + capital_cost=costs.at["nuclear", "capital_cost"], + marginal_cost=costs.at["nuclear", "marginal_cost"], + efficiency=costs.at["nuclear", "efficiency"], + ) else: - raise NotImplementedError(f"Adding extendable generators for carrier " - "'{tech}' is not implemented, yet. " - "Only OCGT, CCGT and nuclear are allowed at the moment.") + raise NotImplementedError( + f"Adding extendable generators for carrier " + "'{tech}' is not implemented, yet. " + "Only OCGT, CCGT and nuclear are allowed at the moment." + ) +def attach_OPSD_renewables(n, tech_map): + tech_string = ", ".join(sum(tech_map.values(), [])) + logger.info(f"Using OPSD renewable capacities for carriers {tech_string}.") -def attach_OPSD_renewables(n, techs): + df = pm.data.OPSD_VRE().powerplant.convert_country_to_alpha2() + technology_b = ~df.Technology.isin(["Onshore", "Offshore"]) + df["Fueltype"] = df.Fueltype.where(technology_b, df.Technology).replace( + {"Solar": "PV"} + ) + df = df.query("Fueltype in @tech_map").powerplant.convert_country_to_alpha2() - available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB'] - tech_map = {'Onshore': 'onwind', 'Offshore': 'offwind', 'Solar': 'solar'} - countries = set(available) & set(n.buses.country) - tech_map = {k: v for k, v in tech_map.items() if v in techs} - - if not tech_map: - return - - logger.info(f'Using OPSD renewable capacities in {", ".join(countries)} ' - f'for technologies {", ".join(tech_map.values())}.') - - df = pd.concat([pm.data.OPSD_VRE_country(c) for c in countries]) - technology_b = ~df.Technology.isin(['Onshore', 'Offshore']) - df['Fueltype'] = df.Fueltype.where(technology_b, df.Technology) - df = df.query('Fueltype in @tech_map').powerplant.convert_country_to_alpha2() - - for fueltype, carrier_like in tech_map.items(): - gens = n.generators[lambda df: df.carrier.str.contains(carrier_like)] + for fueltype, carriers in tech_map.items(): + gens = n.generators[lambda df: df.carrier.isin(carriers)] buses = n.buses.loc[gens.bus.unique()] - gens_per_bus = gens.groupby('bus').p_nom.count() + gens_per_bus = gens.groupby("bus").p_nom.count() - caps = map_country_bus(df.query('Fueltype == @fueltype'), buses) - caps = caps.groupby(['bus']).Capacity.sum() + caps = map_country_bus(df.query("Fueltype == @fueltype"), buses) + caps = caps.groupby(["bus"]).Capacity.sum() caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) n.generators.p_nom.update(gens.bus.map(caps).dropna()) n.generators.p_nom_min.update(gens.bus.map(caps).dropna()) +def estimate_renewable_capacities(n, config): + year = config["electricity"]["estimate_renewable_capacities"]["year"] + tech_map = config["electricity"]["estimate_renewable_capacities"][ + "technology_mapping" + ] + countries = config["countries"] + expansion_limit = config["electricity"]["estimate_renewable_capacities"][ + "expansion_limit" + ] -def estimate_renewable_capacities(n, tech_map): + if not len(countries) or not len(tech_map): + return - if len(tech_map) == 0: return + capacities = pm.data.IRENASTAT().powerplant.convert_country_to_alpha2() + capacities = capacities.query( + "Year == @year and Technology in @tech_map and Country in @countries" + ) + capacities = capacities.groupby(["Technology", "Country"]).Capacity.sum() - capacities = (pm.data.Capacity_stats().powerplant.convert_country_to_alpha2() - [lambda df: df.Energy_Source_Level_2] - .set_index(['Fueltype', 'Country']).sort_index()) + logger.info( + f"Heuristics applied to distribute renewable capacities [GW]: " + f"\n{capacities.groupby('Technology').sum().div(1e3).round(2)}" + ) - countries = n.buses.country.unique() + for ppm_technology, techs in tech_map.items(): + tech_i = n.generators.query("carrier in @techs").index + stats = capacities.loc[ppm_technology].reindex(countries, fill_value=0.0) + country = n.generators.bus[tech_i].map(n.buses.country) + existent = n.generators.p_nom[tech_i].groupby(country).sum() + missing = stats - existent + dist = n.generators_t.p_max_pu.mean() * n.generators.p_nom_max - if len(countries) == 0: return + n.generators.loc[tech_i, "p_nom"] += ( + dist[tech_i] + .groupby(country) + .transform(lambda s: normed(s) * missing[s.name]) + .where(lambda s: s > 0.1, 0.0) # only capacities above 100kW + ) + n.generators.loc[tech_i, "p_nom_min"] = n.generators.loc[tech_i, "p_nom"] - logger.info('heuristics applied to distribute renewable capacities [MW] \n{}' - .format(capacities.query('Fueltype in @tech_map.keys() and Capacity >= 0.1') - .groupby('Country').agg({'Capacity': 'sum'}))) - - for ppm_fueltype, techs in tech_map.items(): - tech_capacities = capacities.loc[ppm_fueltype, 'Capacity']\ - .reindex(countries, fill_value=0.) - #tech_i = n.generators.query('carrier in @techs').index - tech_i = (n.generators.query('carrier in @techs') - [n.generators.query('carrier in @techs') - .bus.map(n.buses.country).isin(countries)].index) - n.generators.loc[tech_i, 'p_nom'] = ( - (n.generators_t.p_max_pu[tech_i].mean() * - n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation - .groupby(n.generators.bus.map(n.buses.country)) - .transform(lambda s: normed(s) * tech_capacities.at[s.name]) - .where(lambda s: s>0.1, 0.)) # only capacities above 100kW - n.generators.loc[tech_i, 'p_nom_min'] = n.generators.loc[tech_i, 'p_nom'] + if expansion_limit: + assert np.isscalar(expansion_limit) + logger.info( + f"Reducing capacity expansion limit to {expansion_limit*100:.2f}% of installed capacity." + ) + n.generators.loc[tech_i, "p_nom_max"] = ( + expansion_limit * n.generators.loc[tech_i, "p_nom_min"] + ) def add_nice_carrier_names(n, config): carrier_i = n.carriers.index - nice_names = (pd.Series(config['plotting']['nice_names']) - .reindex(carrier_i).fillna(carrier_i.to_series().str.title())) - n.carriers['nice_name'] = nice_names - colors = pd.Series(config['plotting']['tech_colors']).reindex(carrier_i) + nice_names = ( + pd.Series(config["plotting"]["nice_names"]) + .reindex(carrier_i) + .fillna(carrier_i.to_series().str.title()) + ) + n.carriers["nice_name"] = nice_names + colors = pd.Series(config["plotting"]["tech_colors"]).reindex(carrier_i) if colors.isna().any(): missing_i = list(colors.index[colors.isna()]) - logger.warning(f'tech_colors for carriers {missing_i} not defined in config.') - n.carriers['color'] = colors + logger.warning(f"tech_colors for carriers {missing_i} not defined in config.") + n.carriers["color"] = colors + if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('add_electricity') + + snakemake = mock_snakemake("add_electricity") configure_logging(snakemake) n = pypsa.Network(snakemake.input.base_network) - Nyears = n.snapshot_weightings.objective.sum() / 8760. + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 - costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears) + costs = load_costs( + snakemake.input.tech_costs, + snakemake.config["costs"], + snakemake.config["electricity"], + Nyears, + ) ppl = load_powerplants(snakemake.input.powerplants) +<<<<<<< HEAD attach_load(n, snakemake.input.regions, snakemake.input.load, snakemake.input.nuts3_shapes, snakemake.input.ua_md_gdp, snakemake.config['countries'], snakemake.config['load']['scaling_factor']) +======= + if "renewable_carriers" in snakemake.config["electricity"]: + renewable_carriers = set(snakemake.config["renewable"]) + else: + logger.warning( + "Missing key `renewable_carriers` under config entry `electricity`. " + "In future versions, this will raise an error. " + "Falling back to carriers listed under `renewable`." + ) + renewable_carriers = snakemake.config["renewable"] +>>>>>>> master - update_transmission_costs(n, costs, snakemake.config['lines']['length_factor']) + extendable_carriers = snakemake.config["electricity"]["extendable_carriers"] + if not (set(renewable_carriers) & set(extendable_carriers["Generator"])): + logger.warning( + "No renewables found in config entry `extendable_carriers`. " + "In future versions, these have to be explicitly listed. " + "Falling back to all renewables." + ) - carriers = snakemake.config['electricity']['conventional_carriers'] - attach_conventional_generators(n, costs, ppl, carriers) + conventional_carriers = snakemake.config["electricity"]["conventional_carriers"] - carriers = snakemake.config['renewable'] - attach_wind_and_solar(n, costs, snakemake.input, carriers, snakemake.config['lines']['length_factor']) + attach_load( + n, + snakemake.input.regions, + snakemake.input.load, + snakemake.input.nuts3_shapes, + snakemake.config["countries"], + snakemake.config["load"]["scaling_factor"], + ) - if 'hydro' in snakemake.config['renewable']: - carriers = snakemake.config['renewable']['hydro'].pop('carriers', []) - attach_hydro(n, costs, ppl, snakemake.input.profile_hydro, snakemake.input.hydro_capacities, - carriers, **snakemake.config['renewable']['hydro']) + update_transmission_costs(n, costs, snakemake.config["lines"]["length_factor"]) - carriers = snakemake.config['electricity']['extendable_carriers']['Generator'] - attach_extendable_generators(n, costs, ppl, carriers) + conventional_inputs = { + k: v for k, v in snakemake.input.items() if k.startswith("conventional_") + } + attach_conventional_generators( + n, + costs, + ppl, + conventional_carriers, + extendable_carriers, + snakemake.config.get("conventional", {}), + conventional_inputs, + ) - tech_map = snakemake.config['electricity'].get('estimate_renewable_capacities_from_capacity_stats', {}) - estimate_renewable_capacities(n, tech_map) - techs = snakemake.config['electricity'].get('renewable_capacities_from_OPSD', []) - attach_OPSD_renewables(n, techs) + attach_wind_and_solar( + n, + costs, + snakemake.input, + renewable_carriers, + extendable_carriers, + snakemake.config["lines"]["length_factor"], + ) + + if "hydro" in renewable_carriers: + conf = snakemake.config["renewable"]["hydro"] + attach_hydro( + n, + costs, + ppl, + snakemake.input.profile_hydro, + snakemake.input.hydro_capacities, + conf.pop("carriers", []), + **conf, + ) + + if "estimate_renewable_capacities" not in snakemake.config["electricity"]: + logger.warning( + "Missing key `estimate_renewable_capacities` under config entry `electricity`. " + "In future versions, this will raise an error. " + "Falling back to whether ``estimate_renewable_capacities_from_capacity_stats`` is in the config." + ) + if ( + "estimate_renewable_capacities_from_capacity_stats" + in snakemake.config["electricity"] + ): + estimate_renewable_caps = { + "enable": True, + **snakemake.config["electricity"][ + "estimate_renewable_capacities_from_capacity_stats" + ], + } + else: + estimate_renewable_caps = {"enable": False} + else: + estimate_renewable_caps = snakemake.config["electricity"][ + "estimate_renewable_capacities" + ] + if "enable" not in estimate_renewable_caps: + logger.warning( + "Missing key `enable` under config entry `estimate_renewable_capacities`. " + "In future versions, this will raise an error. Falling back to False." + ) + estimate_renewable_caps = {"enable": False} + if "from_opsd" not in estimate_renewable_caps: + logger.warning( + "Missing key `from_opsd` under config entry `estimate_renewable_capacities`. " + "In future versions, this will raise an error. " + "Falling back to whether `renewable_capacities_from_opsd` is non-empty." + ) + from_opsd = bool( + snakemake.config["electricity"].get("renewable_capacities_from_opsd", False) + ) + estimate_renewable_caps["from_opsd"] = from_opsd + + if estimate_renewable_caps["enable"]: + if estimate_renewable_caps["from_opsd"]: + tech_map = snakemake.config["electricity"]["estimate_renewable_capacities"][ + "technology_mapping" + ] + attach_OPSD_renewables(n, tech_map) + estimate_renewable_capacities(n, snakemake.config) update_p_nom_max(n) add_nice_carrier_names(n, snakemake.config) + n.meta = snakemake.config n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 287dd66e..ba784096 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -1,4 +1,5 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -13,7 +14,7 @@ Relevant Settings costs: year: - USD2013_to_EUR2013: + version: dicountrate: emission_prices: @@ -32,7 +33,7 @@ Relevant Settings Inputs ------ -- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. +- ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. Outputs ------- @@ -50,14 +51,16 @@ The rule :mod:`add_extra_components` attaches additional extendable components t - ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit. """ import logging -from _helpers import configure_logging -import pypsa -import pandas as pd import numpy as np - -from add_electricity import (load_costs, add_nice_carrier_names, - _add_missing_carriers_from_costs) +import pandas as pd +import pypsa +from _helpers import configure_logging +from add_electricity import ( + _add_missing_carriers_from_costs, + add_nice_carrier_names, + load_costs, +) idx = pd.IndexSlice @@ -65,8 +68,8 @@ logger = logging.getLogger(__name__) def attach_storageunits(n, costs, elec_opts): - carriers = elec_opts['extendable_carriers']['StorageUnit'] - max_hours = elec_opts['max_hours'] + carriers = elec_opts["extendable_carriers"]["StorageUnit"] + max_hours = elec_opts["max_hours"] _add_missing_carriers_from_costs(n, costs, carriers) @@ -76,128 +79,168 @@ def attach_storageunits(n, costs, elec_opts): lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} for carrier in carriers: - n.madd("StorageUnit", buses_i, ' ' + carrier, - bus=buses_i, - carrier=carrier, - p_nom_extendable=True, - capital_cost=costs.at[carrier, 'capital_cost'], - marginal_cost=costs.at[carrier, 'marginal_cost'], - efficiency_store=costs.at[lookup_store[carrier], 'efficiency'], - efficiency_dispatch=costs.at[lookup_dispatch[carrier], 'efficiency'], - max_hours=max_hours[carrier], - cyclic_state_of_charge=True) + roundtrip_correction = 0.5 if carrier == "battery" else 1 + + n.madd( + "StorageUnit", + buses_i, + " " + carrier, + bus=buses_i, + carrier=carrier, + p_nom_extendable=True, + capital_cost=costs.at[carrier, "capital_cost"], + marginal_cost=costs.at[carrier, "marginal_cost"], + efficiency_store=costs.at[lookup_store[carrier], "efficiency"] + ** roundtrip_correction, + efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"] + ** roundtrip_correction, + max_hours=max_hours[carrier], + cyclic_state_of_charge=True, + ) def attach_stores(n, costs, elec_opts): - carriers = elec_opts['extendable_carriers']['Store'] + carriers = elec_opts["extendable_carriers"]["Store"] _add_missing_carriers_from_costs(n, costs, carriers) buses_i = n.buses.index - bus_sub_dict = {k: n.buses[k].values for k in ['x', 'y', 'country']} + bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} - if 'H2' in carriers: + if "H2" in carriers: h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) - n.madd("Store", h2_buses_i, - bus=h2_buses_i, - carrier='H2', - e_nom_extendable=True, - e_cyclic=True, - capital_cost=costs.at["hydrogen storage", "capital_cost"]) + n.madd( + "Store", + h2_buses_i, + bus=h2_buses_i, + carrier="H2", + e_nom_extendable=True, + e_cyclic=True, + capital_cost=costs.at["hydrogen storage underground", "capital_cost"], + ) - n.madd("Link", h2_buses_i + " Electrolysis", - bus0=buses_i, - bus1=h2_buses_i, - carrier='H2 electrolysis', - p_nom_extendable=True, - efficiency=costs.at["electrolysis", "efficiency"], - capital_cost=costs.at["electrolysis", "capital_cost"], - marginal_cost=costs.at["electrolysis", "marginal_cost"]) + n.madd( + "Link", + h2_buses_i + " Electrolysis", + bus0=buses_i, + bus1=h2_buses_i, + carrier="H2 electrolysis", + p_nom_extendable=True, + efficiency=costs.at["electrolysis", "efficiency"], + capital_cost=costs.at["electrolysis", "capital_cost"], + marginal_cost=costs.at["electrolysis", "marginal_cost"], + ) - n.madd("Link", h2_buses_i + " Fuel Cell", - bus0=h2_buses_i, - bus1=buses_i, - carrier='H2 fuel cell', - p_nom_extendable=True, - efficiency=costs.at["fuel cell", "efficiency"], - #NB: fixed cost is per MWel - capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"], - marginal_cost=costs.at["fuel cell", "marginal_cost"]) + n.madd( + "Link", + h2_buses_i + " Fuel Cell", + bus0=h2_buses_i, + bus1=buses_i, + carrier="H2 fuel cell", + p_nom_extendable=True, + efficiency=costs.at["fuel cell", "efficiency"], + # NB: fixed cost is per MWel + capital_cost=costs.at["fuel cell", "capital_cost"] + * costs.at["fuel cell", "efficiency"], + marginal_cost=costs.at["fuel cell", "marginal_cost"], + ) - if 'battery' in carriers: - b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict) + if "battery" in carriers: + b_buses_i = n.madd( + "Bus", buses_i + " battery", carrier="battery", **bus_sub_dict + ) - n.madd("Store", b_buses_i, - bus=b_buses_i, - carrier='battery', - e_cyclic=True, - e_nom_extendable=True, - capital_cost=costs.at['battery storage', 'capital_cost'], - marginal_cost=costs.at["battery", "marginal_cost"]) + n.madd( + "Store", + b_buses_i, + bus=b_buses_i, + carrier="battery", + e_cyclic=True, + e_nom_extendable=True, + capital_cost=costs.at["battery storage", "capital_cost"], + marginal_cost=costs.at["battery", "marginal_cost"], + ) - n.madd("Link", b_buses_i + " charger", - bus0=buses_i, - bus1=b_buses_i, - carrier='battery charger', - efficiency=costs.at['battery inverter', 'efficiency'], - capital_cost=costs.at['battery inverter', 'capital_cost'], - p_nom_extendable=True, - marginal_cost=costs.at["battery inverter", "marginal_cost"]) + n.madd( + "Link", + b_buses_i + " charger", + bus0=buses_i, + bus1=b_buses_i, + carrier="battery charger", + # the efficiencies are "round trip efficiencies" + efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, + capital_cost=costs.at["battery inverter", "capital_cost"], + p_nom_extendable=True, + marginal_cost=costs.at["battery inverter", "marginal_cost"], + ) - n.madd("Link", b_buses_i + " discharger", - bus0=b_buses_i, - bus1=buses_i, - carrier='battery discharger', - efficiency=costs.at['battery inverter','efficiency'], - p_nom_extendable=True, - marginal_cost=costs.at["battery inverter", "marginal_cost"]) + n.madd( + "Link", + b_buses_i + " discharger", + bus0=b_buses_i, + bus1=buses_i, + carrier="battery discharger", + efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, + p_nom_extendable=True, + marginal_cost=costs.at["battery inverter", "marginal_cost"], + ) def attach_hydrogen_pipelines(n, costs, elec_opts): - ext_carriers = elec_opts['extendable_carriers'] - as_stores = ext_carriers.get('Store', []) + ext_carriers = elec_opts["extendable_carriers"] + as_stores = ext_carriers.get("Store", []) - if 'H2 pipeline' not in ext_carriers.get('Link',[]): return + if "H2 pipeline" not in ext_carriers.get("Link", []): + return - assert 'H2' in as_stores, ("Attaching hydrogen pipelines requires hydrogen " - "storage to be modelled as Store-Link-Bus combination. See " - "`config.yaml` at `electricity: extendable_carriers: Store:`.") + assert "H2" in as_stores, ( + "Attaching hydrogen pipelines requires hydrogen " + "storage to be modelled as Store-Link-Bus combination. See " + "`config.yaml` at `electricity: extendable_carriers: Store:`." + ) # determine bus pairs - attrs = ["bus0","bus1","length"] - candidates = pd.concat([n.lines[attrs], n.links.query('carrier=="DC"')[attrs]])\ - .reset_index(drop=True) + attrs = ["bus0", "bus1", "length"] + candidates = pd.concat( + [n.lines[attrs], n.links.query('carrier=="DC"')[attrs]] + ).reset_index(drop=True) # remove bus pair duplicates regardless of order of bus0 and bus1 - h2_links = candidates[~pd.DataFrame(np.sort(candidates[['bus0', 'bus1']])).duplicated()] + h2_links = candidates[ + ~pd.DataFrame(np.sort(candidates[["bus0", "bus1"]])).duplicated() + ] h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1) # add pipelines - n.madd("Link", - h2_links.index, - bus0=h2_links.bus0.values + " H2", - bus1=h2_links.bus1.values + " H2", - p_min_pu=-1, - p_nom_extendable=True, - length=h2_links.length.values, - capital_cost=costs.at['H2 pipeline','capital_cost']*h2_links.length, - efficiency=costs.at['H2 pipeline','efficiency'], - carrier="H2 pipeline") + n.madd( + "Link", + h2_links.index, + bus0=h2_links.bus0.values + " H2", + bus1=h2_links.bus1.values + " H2", + p_min_pu=-1, + p_nom_extendable=True, + length=h2_links.length.values, + capital_cost=costs.at["H2 pipeline", "capital_cost"] * h2_links.length, + efficiency=costs.at["H2 pipeline", "efficiency"], + carrier="H2 pipeline", + ) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('add_extra_components', network='elec', - simpl='', clusters=5) + + snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5) configure_logging(snakemake) n = pypsa.Network(snakemake.input.network) - elec_config = snakemake.config['electricity'] - - Nyears = n.snapshot_weightings.objective.sum() / 8760. - costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], elec_config, Nyears) + elec_config = snakemake.config["electricity"] + + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs( + snakemake.input.tech_costs, snakemake.config["costs"], elec_config, Nyears + ) attach_storageunits(n, costs, elec_config) attach_stores(n, costs, elec_config) @@ -205,4 +248,5 @@ if __name__ == "__main__": add_nice_carrier_names(n, snakemake.config) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/base_network.py b/scripts/base_network.py index 50ec8e53..f851a521 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -1,10 +1,14 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT # coding: utf-8 """ -Creates the network topology from a `ENTSO-E map extract `_ (March 2022) as a PyPSA network. +Creates the network topology from a `ENTSO-E map extract. + +`_ (March 2022) as a PyPSA +network. Relevant Settings ----------------- @@ -59,25 +63,24 @@ Outputs Description ----------- - """ import logging -from _helpers import configure_logging - -import pypsa -import yaml -import pandas as pd -import geopandas as gpd -import numpy as np -import networkx as nx - -from scipy import spatial -from scipy.sparse import csgraph from itertools import product -from shapely.geometry import Point, LineString -import shapely, shapely.prepared, shapely.wkt +import geopandas as gpd +import networkx as nx +import numpy as np +import pandas as pd +import pypsa +import shapely +import shapely.prepared +import shapely.wkt +import yaml +from _helpers import configure_logging +from scipy import spatial +from scipy.sparse import csgraph +from shapely.geometry import LineString, Point logger = logging.getLogger(__name__) @@ -97,48 +100,73 @@ def _get_country(df): def _find_closest_links(links, new_links, distance_upper_bound=1.5): - treecoords = np.asarray([np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten() - for s in links.geometry]) - querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']], - new_links[['x2', 'y2', 'x1', 'y1']]]) + treecoords = np.asarray( + [ + np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten() + for s in links.geometry + ] + ) + querycoords = np.vstack( + [new_links[["x1", "y1", "x2", "y2"]], new_links[["x2", "y2", "x1", "y1"]]] + ) tree = spatial.KDTree(treecoords) dist, ind = tree.query(querycoords, distance_upper_bound=distance_upper_bound) found_b = ind < len(links) - found_i = np.arange(len(new_links)*2)[found_b] % len(new_links) - return pd.DataFrame(dict(D=dist[found_b], - i=links.index[ind[found_b] % len(links)]), - index=new_links.index[found_i]).sort_values(by='D')\ - [lambda ds: ~ds.index.duplicated(keep='first')]\ - .sort_index()['i'] + found_i = np.arange(len(new_links) * 2)[found_b] % len(new_links) + return ( + pd.DataFrame( + dict(D=dist[found_b], i=links.index[ind[found_b] % len(links)]), + index=new_links.index[found_i], + ) + .sort_values(by="D")[lambda ds: ~ds.index.duplicated(keep="first")] + .sort_index()["i"] + ) def _load_buses_from_eg(eg_buses, europe_shape, config_elec): - buses = (pd.read_csv(eg_buses, quotechar="'", - true_values=['t'], false_values=['f'], - dtype=dict(bus_id="str")) - .set_index("bus_id") - .drop(['station_id'], axis=1) - .rename(columns=dict(voltage='v_nom'))) + buses = ( + pd.read_csv( + eg_buses, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict(bus_id="str"), + ) + .set_index("bus_id") + .drop(["station_id"], axis=1) + .rename(columns=dict(voltage="v_nom")) + ) - buses['carrier'] = buses.pop('dc').map({True: 'DC', False: 'AC'}) - buses['under_construction'] = buses['under_construction'].fillna(False).astype(bool) + buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"}) + buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool) # remove all buses outside of all countries including exclusive economic zones (offshore) - europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry'] + europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"] europe_shape_prepped = shapely.prepared.prep(europe_shape) - buses_in_europe_b = buses[['x', 'y']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) + buses_in_europe_b = buses[["x", "y"]].apply( + lambda p: europe_shape_prepped.contains(Point(p)), axis=1 + ) - buses_with_v_nom_to_keep_b = buses.v_nom.isin(config_elec['voltages']) | buses.v_nom.isnull() - logger.info("Removing buses with voltages {}".format(pd.Index(buses.v_nom.unique()).dropna().difference(config_elec['voltages']))) + buses_with_v_nom_to_keep_b = ( + buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull() + ) + logger.info( + "Removing buses with voltages {}".format( + pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"]) + ) + ) return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b]) def _load_transformers_from_eg(buses, eg_transformers): - transformers = (pd.read_csv(eg_transformers, quotechar="'", - true_values=['t'], false_values=['f'], - dtype=dict(transformer_id='str', bus0='str', bus1='str')) - .set_index('transformer_id')) + transformers = pd.read_csv( + eg_transformers, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict(transformer_id="str", bus0="str", bus1="str"), + ).set_index("transformer_id") transformers = _remove_dangling_branches(transformers, buses) @@ -146,33 +174,40 @@ def _load_transformers_from_eg(buses, eg_transformers): def _load_converters_from_eg(buses, eg_converters): - converters = (pd.read_csv(eg_converters, quotechar="'", - true_values=['t'], false_values=['f'], - dtype=dict(converter_id='str', bus0='str', bus1='str')) - .set_index('converter_id')) + converters = pd.read_csv( + eg_converters, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict(converter_id="str", bus0="str", bus1="str"), + ).set_index("converter_id") converters = _remove_dangling_branches(converters, buses) - converters['carrier'] = 'B2B' + converters["carrier"] = "B2B" return converters def _load_links_from_eg(buses, eg_links): - links = (pd.read_csv(eg_links, quotechar="'", true_values=['t'], false_values=['f'], - dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool")) - .set_index('link_id')) + links = pd.read_csv( + eg_links, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict(link_id="str", bus0="str", bus1="str", under_construction="bool"), + ).set_index("link_id") - links['length'] /= 1e3 + links["length"] /= 1e3 # Skagerrak Link is connected to 132kV bus which is removed in _load_buses_from_eg. # Connect to neighboring 380kV bus - links.loc[links.bus1=='6396', 'bus1'] = '6398' + links.loc[links.bus1 == "6396", "bus1"] = "6398" links = _remove_dangling_branches(links, buses) # Add DC line parameters - links['carrier'] = 'DC' + links["carrier"] = "DC" return links @@ -181,15 +216,21 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape): links_tyndp = pd.read_csv(links_tyndp) # remove all links from list which lie outside all of the desired countries - europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry'] + europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"] europe_shape_prepped = shapely.prepared.prep(europe_shape) - x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) - x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) + x1y1_in_europe_b = links_tyndp[["x1", "y1"]].apply( + lambda p: europe_shape_prepped.contains(Point(p)), axis=1 + ) + x2y2_in_europe_b = links_tyndp[["x2", "y2"]].apply( + lambda p: europe_shape_prepped.contains(Point(p)), axis=1 + ) is_within_covered_countries_b = x1y1_in_europe_b & x2y2_in_europe_b if not is_within_covered_countries_b.all(): - logger.info("TYNDP links outside of the covered area (skipping): " + - ", ".join(links_tyndp.loc[~ is_within_covered_countries_b, "Name"])) + logger.info( + "TYNDP links outside of the covered area (skipping): " + + ", ".join(links_tyndp.loc[~is_within_covered_countries_b, "Name"]) + ) links_tyndp = links_tyndp.loc[is_within_covered_countries_b] if links_tyndp.empty: @@ -197,25 +238,32 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape): has_replaces_b = links_tyndp.replaces.notnull() oids = dict(Bus=_get_oid(buses), Link=_get_oid(links)) - keep_b = dict(Bus=pd.Series(True, index=buses.index), - Link=pd.Series(True, index=links.index)) - for reps in links_tyndp.loc[has_replaces_b, 'replaces']: - for comps in reps.split(':'): - oids_to_remove = comps.split('.') + keep_b = dict( + Bus=pd.Series(True, index=buses.index), Link=pd.Series(True, index=links.index) + ) + for reps in links_tyndp.loc[has_replaces_b, "replaces"]: + for comps in reps.split(":"): + oids_to_remove = comps.split(".") c = oids_to_remove.pop(0) keep_b[c] &= ~oids[c].isin(oids_to_remove) - buses = buses.loc[keep_b['Bus']] - links = links.loc[keep_b['Link']] + buses = buses.loc[keep_b["Bus"]] + links = links.loc[keep_b["Link"]] - links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.20) + links_tyndp["j"] = _find_closest_links( + links, links_tyndp, distance_upper_bound=0.20 + ) # Corresponds approximately to 20km tolerances if links_tyndp["j"].notnull().any(): - logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"])) + logger.info( + "TYNDP links already in the dataset (skipping): " + + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"]) + ) links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()] - if links_tyndp.empty: return buses, links + if links_tyndp.empty: + return buses, links - tree = spatial.KDTree(buses[['x', 'y']]) + tree = spatial.KDTree(buses[["x", "y"]]) _, ind0 = tree.query(links_tyndp[["x1", "y1"]]) ind0_b = ind0 < len(buses) links_tyndp.loc[ind0_b, "bus0"] = buses.index[ind0[ind0_b]] @@ -224,24 +272,42 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape): ind1_b = ind1 < len(buses) links_tyndp.loc[ind1_b, "bus1"] = buses.index[ind1[ind1_b]] - links_tyndp_located_b = links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull() + links_tyndp_located_b = ( + links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull() + ) if not links_tyndp_located_b.all(): - logger.warning("Did not find connected buses for TYNDP links (skipping): " + ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"])) + logger.warning( + "Did not find connected buses for TYNDP links (skipping): " + + ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"]) + ) links_tyndp = links_tyndp.loc[links_tyndp_located_b] logger.info("Adding the following TYNDP links: " + ", ".join(links_tyndp["Name"])) links_tyndp = links_tyndp[["bus0", "bus1"]].assign( - carrier='DC', + carrier="DC", p_nom=links_tyndp["Power (MW)"], - length=links_tyndp["Length (given) (km)"].fillna(links_tyndp["Length (distance*1.2) (km)"]), + length=links_tyndp["Length (given) (km)"].fillna( + links_tyndp["Length (distance*1.2) (km)"] + ), under_construction=True, underground=False, - geometry=(links_tyndp[["x1", "y1", "x2", "y2"]] - .apply(lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1)), - tags=('"name"=>"' + links_tyndp["Name"] + '", ' + - '"ref"=>"' + links_tyndp["Ref"] + '", ' + - '"status"=>"' + links_tyndp["status"] + '"') + geometry=( + links_tyndp[["x1", "y1", "x2", "y2"]].apply( + lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1 + ) + ), + tags=( + '"name"=>"' + + links_tyndp["Name"] + + '", ' + + '"ref"=>"' + + links_tyndp["Ref"] + + '", ' + + '"status"=>"' + + links_tyndp["status"] + + '"' + ), ) links_tyndp.index = "T" + links_tyndp.index.astype(str) @@ -252,13 +318,25 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape): def _load_lines_from_eg(buses, eg_lines): - lines = (pd.read_csv(eg_lines, quotechar="'", true_values=['t'], false_values=['f'], - dtype=dict(line_id='str', bus0='str', bus1='str', - underground="bool", under_construction="bool")) - .set_index('line_id') - .rename(columns=dict(voltage='v_nom', circuits='num_parallel'))) + lines = ( + pd.read_csv( + eg_lines, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict( + line_id="str", + bus0="str", + bus1="str", + underground="bool", + under_construction="bool", + ), + ) + .set_index("line_id") + .rename(columns=dict(voltage="v_nom", circuits="num_parallel")) + ) - lines['length'] /= 1e3 + lines["length"] /= 1e3 lines = _remove_dangling_branches(lines, buses) @@ -269,18 +347,20 @@ def _apply_parameter_corrections(n, parameter_corrections): with open(parameter_corrections) as f: corrections = yaml.safe_load(f) - if corrections is None: return + if corrections is None: + return for component, attrs in corrections.items(): df = n.df(component) oid = _get_oid(df) - if attrs is None: continue + if attrs is None: + continue for attr, repls in attrs.items(): for i, r in repls.items(): - if i == 'oid': + if i == "oid": r = oid.map(repls["oid"]).dropna() - elif i == 'index': + elif i == "index": r = pd.Series(repls["index"]) else: raise NotImplementedError() @@ -289,78 +369,87 @@ def _apply_parameter_corrections(n, parameter_corrections): def _set_electrical_parameters_lines(lines, config): - v_noms = config['electricity']['voltages'] - linetypes = config['lines']['types'] + v_noms = config["electricity"]["voltages"] + linetypes = config["lines"]["types"] for v_nom in v_noms: - lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom] + lines.loc[lines["v_nom"] == v_nom, "type"] = linetypes[v_nom] - lines['s_max_pu'] = config['lines']['s_max_pu'] + lines["s_max_pu"] = config["lines"]["s_max_pu"] return lines def _set_lines_s_nom_from_linetypes(n): - n.lines['s_nom'] = ( - np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * - n.lines['v_nom'] * n.lines.num_parallel + n.lines["s_nom"] = ( + np.sqrt(3) + * n.lines["type"].map(n.line_types.i_nom) + * n.lines["v_nom"] + * n.lines.num_parallel ) def _set_electrical_parameters_links(links, config, links_p_nom): - if links.empty: return links + if links.empty: + return links - p_max_pu = config['links'].get('p_max_pu', 1.) - links['p_max_pu'] = p_max_pu - links['p_min_pu'] = -p_max_pu + p_max_pu = config["links"].get("p_max_pu", 1.0) + links["p_max_pu"] = p_max_pu + links["p_min_pu"] = -p_max_pu links_p_nom = pd.read_csv(links_p_nom) # filter links that are not in operation anymore - removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False) + removed_b = links_p_nom.Remarks.str.contains("Shut down|Replaced", na=False) links_p_nom = links_p_nom[~removed_b] # find closest link for all links in links_p_nom - links_p_nom['j'] = _find_closest_links(links, links_p_nom) + links_p_nom["j"] = _find_closest_links(links, links_p_nom) - links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'}) + links_p_nom = links_p_nom.groupby(["j"], as_index=False).agg({"Power (MW)": "sum"}) p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"] # Don't update p_nom if it's already set - p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom + p_nom_unset = ( + p_nom.drop(links.index[links.p_nom.notnull()], errors="ignore") + if "p_nom" in links + else p_nom + ) links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset return links def _set_electrical_parameters_converters(converters, config): - p_max_pu = config['links'].get('p_max_pu', 1.) - converters['p_max_pu'] = p_max_pu - converters['p_min_pu'] = -p_max_pu + p_max_pu = config["links"].get("p_max_pu", 1.0) + converters["p_max_pu"] = p_max_pu + converters["p_min_pu"] = -p_max_pu - converters['p_nom'] = 2000 + converters["p_nom"] = 2000 # Converters are combined with links - converters['under_construction'] = False - converters['underground'] = False + converters["under_construction"] = False + converters["underground"] = False return converters def _set_electrical_parameters_transformers(transformers, config): - config = config['transformers'] + config = config["transformers"] ## Add transformer parameters - transformers["x"] = config.get('x', 0.1) - transformers["s_nom"] = config.get('s_nom', 2000) - transformers['type'] = config.get('type', '') + transformers["x"] = config.get("x", 0.1) + transformers["s_nom"] = config.get("s_nom", 2000) + transformers["type"] = config.get("type", "") return transformers def _remove_dangling_branches(branches, buses): - return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)]) + return pd.DataFrame( + branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)] + ) def _remove_unconnected_components(network): @@ -370,44 +459,62 @@ def _remove_unconnected_components(network): component_sizes = component.value_counts() components_to_remove = component_sizes.iloc[1:] - logger.info("Removing {} unconnected network components with less than {} buses. In total {} buses." - .format(len(components_to_remove), components_to_remove.max(), components_to_remove.sum())) + logger.info( + "Removing {} unconnected network components with less than {} buses. In total {} buses.".format( + len(components_to_remove), + components_to_remove.max(), + components_to_remove.sum(), + ) + ) return network[component == component_sizes.index[0]] def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): - buses = n.buses def buses_in_shape(shape): shape = shapely.prepared.prep(shape) return pd.Series( - np.fromiter((shape.contains(Point(x, y)) - for x, y in buses.loc[:,["x", "y"]].values), - dtype=bool, count=len(buses)), - index=buses.index + np.fromiter( + ( + shape.contains(Point(x, y)) + for x, y in buses.loc[:, ["x", "y"]].values + ), + dtype=bool, + count=len(buses), + ), + index=buses.index, ) - countries = config['countries'] - country_shapes = gpd.read_file(country_shapes).set_index('name')['geometry'] - offshore_shapes = gpd.read_file(offshore_shapes).set_index('name')['geometry'] - substation_b = buses['symbol'].str.contains('substation|converter station', case=False) + countries = config["countries"] + country_shapes = gpd.read_file(country_shapes).set_index("name")["geometry"] + # reindexing necessary for supporting empty geo-dataframes + offshore_shapes = gpd.read_file(offshore_shapes) + offshore_shapes = offshore_shapes.reindex(columns=["name", "geometry"]).set_index( + "name" + )["geometry"] + substation_b = buses["symbol"].str.contains( + "substation|converter station", case=False + ) def prefer_voltage(x, which): index = x.index if len(index) == 1: return pd.Series(index, index) - key = (x.index[0] - if x['v_nom'].isnull().all() - else getattr(x['v_nom'], 'idx' + which)()) + key = ( + x.index[0] + if x["v_nom"].isnull().all() + else getattr(x["v_nom"], "idx" + which)() + ) return pd.Series(key, index) - gb = buses.loc[substation_b].groupby(['x', 'y'], as_index=False, - group_keys=False, sort=False) - bus_map_low = gb.apply(prefer_voltage, 'min') + gb = buses.loc[substation_b].groupby( + ["x", "y"], as_index=False, group_keys=False, sort=False + ) + bus_map_low = gb.apply(prefer_voltage, "min") lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False) - bus_map_high = gb.apply(prefer_voltage, 'max') + bus_map_high = gb.apply(prefer_voltage, "max") hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False) onshore_b = pd.Series(False, buses.index) @@ -418,47 +525,66 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): onshore_country_b = buses_in_shape(onshore_shape) onshore_b |= onshore_country_b - buses.loc[onshore_country_b, 'country'] = country + buses.loc[onshore_country_b, "country"] = country - if country not in offshore_shapes.index: continue + if country not in offshore_shapes.index: + continue offshore_country_b = buses_in_shape(offshore_shapes[country]) offshore_b |= offshore_country_b - buses.loc[offshore_country_b, 'country'] = country + buses.loc[offshore_country_b, "country"] = country # Only accept buses as low-voltage substations (where load is attached), if # they have at least one connection which is not under_construction has_connections_b = pd.Series(False, index=buses.index) - for b, df in product(('bus0', 'bus1'), (n.lines, n.links)): - has_connections_b |= ~ df.groupby(b).under_construction.min() + for b, df in product(("bus0", "bus1"), (n.lines, n.links)): + has_connections_b |= ~df.groupby(b).under_construction.min() - buses['substation_lv'] = lv_b & onshore_b & (~ buses['under_construction']) & has_connections_b - buses['substation_off'] = (offshore_b | (hv_b & onshore_b)) & (~ buses['under_construction']) + buses["substation_lv"] = ( + lv_b & onshore_b & (~buses["under_construction"]) & has_connections_b + ) + buses["substation_off"] = (offshore_b | (hv_b & onshore_b)) & ( + ~buses["under_construction"] + ) c_nan_b = buses.country.isnull() if c_nan_b.sum() > 0: c_tag = _get_country(buses.loc[c_nan_b]) c_tag.loc[~c_tag.isin(countries)] = np.nan - n.buses.loc[c_nan_b, 'country'] = c_tag + n.buses.loc[c_nan_b, "country"] = c_tag c_tag_nan_b = n.buses.country.isnull() # Nearest country in path length defines country of still homeless buses # Work-around until commit 705119 lands in pypsa release - n.transformers['length'] = 0. - graph = n.graph(weight='length') - n.transformers.drop('length', axis=1, inplace=True) + n.transformers["length"] = 0.0 + graph = n.graph(weight="length") + n.transformers.drop("length", axis=1, inplace=True) for b in n.buses.index[c_tag_nan_b]: - df = (pd.DataFrame(dict(pathlength=nx.single_source_dijkstra_path_length(graph, b, cutoff=200))) - .join(n.buses.country).dropna()) - assert not df.empty, "No buses with defined country within 200km of bus `{}`".format(b) - n.buses.at[b, 'country'] = df.loc[df.pathlength.idxmin(), 'country'] + df = ( + pd.DataFrame( + dict( + pathlength=nx.single_source_dijkstra_path_length( + graph, b, cutoff=200 + ) + ) + ) + .join(n.buses.country) + .dropna() + ) + assert ( + not df.empty + ), "No buses with defined country within 200km of bus `{}`".format(b) + n.buses.at[b, "country"] = df.loc[df.pathlength.idxmin(), "country"] - logger.warning("{} buses are not in any country or offshore shape," - " {} have been assigned from the tag of the entsoe map," - " the rest from the next bus in terms of pathlength." - .format(c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum())) + logger.warning( + "{} buses are not in any country or offshore shape," + " {} have been assigned from the tag of the entsoe map," + " the rest from the next bus in terms of pathlength.".format( + c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum() + ) + ) return buses @@ -467,11 +593,13 @@ def _replace_b2b_converter_at_country_border_by_link(n): # Affects only the B2B converter in Lithuania at the Polish border at the moment buscntry = n.buses.country linkcntry = n.links.bus0.map(buscntry) - converters_i = n.links.index[(n.links.carrier == 'B2B') & (linkcntry == n.links.bus1.map(buscntry))] + converters_i = n.links.index[ + (n.links.carrier == "B2B") & (linkcntry == n.links.bus1.map(buscntry)) + ] def findforeignbus(G, i): cntry = linkcntry.at[i] - for busattr in ('bus0', 'bus1'): + for busattr in ("bus0", "bus1"): b0 = n.links.at[i, busattr] for b1 in G[b0]: if buscntry[b1] != cntry: @@ -484,67 +612,93 @@ def _replace_b2b_converter_at_country_border_by_link(n): if busattr is not None: comp, line = next(iter(G[b0][b1])) if comp != "Line": - logger.warning("Unable to replace B2B `{}` expected a Line, but found a {}" - .format(i, comp)) + logger.warning( + "Unable to replace B2B `{}` expected a Line, but found a {}".format( + i, comp + ) + ) continue n.links.at[i, busattr] = b1 - n.links.at[i, 'p_nom'] = min(n.links.at[i, 'p_nom'], n.lines.at[line, 's_nom']) - n.links.at[i, 'carrier'] = 'DC' - n.links.at[i, 'underwater_fraction'] = 0. - n.links.at[i, 'length'] = n.lines.at[line, 'length'] + n.links.at[i, "p_nom"] = min( + n.links.at[i, "p_nom"], n.lines.at[line, "s_nom"] + ) + n.links.at[i, "carrier"] = "DC" + n.links.at[i, "underwater_fraction"] = 0.0 + n.links.at[i, "length"] = n.lines.at[line, "length"] n.remove("Line", line) n.remove("Bus", b0) - logger.info("Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}" - .format(i, b0, line, linkcntry.at[i], buscntry.at[b1])) + logger.info( + "Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}".format( + i, b0, line, linkcntry.at[i], buscntry.at[b1] + ) + ) def _set_links_underwater_fraction(n, offshore_shapes): - if n.links.empty: return + if n.links.empty: + return - if not hasattr(n.links, 'geometry'): - n.links['underwater_fraction'] = 0. + if not hasattr(n.links, "geometry"): + n.links["underwater_fraction"] = 0.0 else: offshore_shape = gpd.read_file(offshore_shapes).unary_union links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads)) - n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length + n.links["underwater_fraction"] = ( + links.intersection(offshore_shape).length / links.length + ) def _adjust_capacities_of_under_construction_branches(n, config): - lines_mode = config['lines'].get('under_construction', 'undef') - if lines_mode == 'zero': - n.lines.loc[n.lines.under_construction, 'num_parallel'] = 0. - n.lines.loc[n.lines.under_construction, 's_nom'] = 0. - elif lines_mode == 'remove': + lines_mode = config["lines"].get("under_construction", "undef") + if lines_mode == "zero": + n.lines.loc[n.lines.under_construction, "num_parallel"] = 0.0 + n.lines.loc[n.lines.under_construction, "s_nom"] = 0.0 + elif lines_mode == "remove": n.mremove("Line", n.lines.index[n.lines.under_construction]) - elif lines_mode != 'keep': - logger.warning("Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines.") + elif lines_mode != "keep": + logger.warning( + "Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines." + ) - links_mode = config['links'].get('under_construction', 'undef') - if links_mode == 'zero': - n.links.loc[n.links.under_construction, "p_nom"] = 0. - elif links_mode == 'remove': + links_mode = config["links"].get("under_construction", "undef") + if links_mode == "zero": + n.links.loc[n.links.under_construction, "p_nom"] = 0.0 + elif links_mode == "remove": n.mremove("Link", n.links.index[n.links.under_construction]) - elif links_mode != 'keep': - logger.warning("Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links.") + elif links_mode != "keep": + logger.warning( + "Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links." + ) - if lines_mode == 'remove' or links_mode == 'remove': + if lines_mode == "remove" or links_mode == "remove": # We might need to remove further unconnected components n = _remove_unconnected_components(n) return n -def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links, - links_p_nom, links_tyndp, europe_shape, country_shapes, offshore_shapes, - parameter_corrections, config): +def base_network( + eg_buses, + eg_converters, + eg_transformers, + eg_lines, + eg_links, + links_p_nom, + links_tyndp, + europe_shape, + country_shapes, + offshore_shapes, + parameter_corrections, + config, +): - buses = _load_buses_from_eg(eg_buses, europe_shape, config['electricity']) + buses = _load_buses_from_eg(eg_buses, europe_shape, config["electricity"]) links = _load_links_from_eg(buses, eg_links) - if config['links'].get('include_tyndp'): + if config["links"].get("include_tyndp"): buses, links = _add_links_from_tyndp(buses, links, links_tyndp, europe_shape) converters = _load_converters_from_eg(buses, eg_converters) @@ -558,9 +712,9 @@ def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links, converters = _set_electrical_parameters_converters(converters, config) n = pypsa.Network() - n.name = 'PyPSA-Eur' + n.name = "PyPSA-Eur" - n.set_snapshots(pd.date_range(freq='h', **config['snapshots'])) + n.set_snapshots(pd.date_range(freq="h", **config["snapshots"])) n.import_components_from_dataframe(buses, "Bus") n.import_components_from_dataframe(lines, "Line") @@ -584,14 +738,28 @@ def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links, return n + if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('base_network') + + snakemake = mock_snakemake("base_network") configure_logging(snakemake) - n = base_network(snakemake.input.eg_buses, snakemake.input.eg_converters, snakemake.input.eg_transformers, snakemake.input.eg_lines, snakemake.input.eg_links, - snakemake.input.links_p_nom, snakemake.input.links_tyndp, snakemake.input.europe_shape, snakemake.input.country_shapes, snakemake.input.offshore_shapes, - snakemake.input.parameter_corrections, snakemake.config) + n = base_network( + snakemake.input.eg_buses, + snakemake.input.eg_converters, + snakemake.input.eg_transformers, + snakemake.input.eg_lines, + snakemake.input.eg_links, + snakemake.input.links_p_nom, + snakemake.input.links_tyndp, + snakemake.input.europe_shape, + snakemake.input.country_shapes, + snakemake.input.offshore_shapes, + snakemake.input.parameter_corrections, + snakemake.config, + ) + n.meta = snakemake.config n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index d91d0575..6e6fdd32 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -1,9 +1,11 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ -Creates Voronoi shapes for each bus representing both onshore and offshore regions. +Creates Voronoi shapes for each bus representing both onshore and offshore +regions. Relevant Settings ----------------- @@ -38,41 +40,94 @@ Outputs Description ----------- - """ import logging -from _helpers import configure_logging - -import pypsa import os -import pandas as pd -import geopandas as gpd -from vresutils.graph import voronoi_partition_pts +import geopandas as gpd +import numpy as np +import pandas as pd +import pypsa +from _helpers import REGION_COLS, configure_logging +from scipy.spatial import Voronoi +from shapely.geometry import Polygon logger = logging.getLogger(__name__) -def save_to_geojson(s, fn): - if os.path.exists(fn): - os.unlink(fn) - schema = {**gpd.io.file.infer_schema(s), 'geometry': 'Unknown'} - s.to_file(fn, driver='GeoJSON', schema=schema) +def voronoi_partition_pts(points, outline): + """ + Compute the polygons of a voronoi partition of `points` within the + polygon `outline`. Taken from + https://github.com/FRESNA/vresutils/blob/master/vresutils/graph.py + Attributes + ---------- + points : Nx2 - ndarray[dtype=float] + outline : Polygon + Returns + ------- + polygons : N - ndarray[dtype=Polygon|MultiPolygon] + """ + + points = np.asarray(points) + + if len(points) == 1: + polygons = [outline] + else: + xmin, ymin = np.amin(points, axis=0) + xmax, ymax = np.amax(points, axis=0) + xspan = xmax - xmin + yspan = ymax - ymin + + # to avoid any network positions outside all Voronoi cells, append + # the corners of a rectangle framing these points + vor = Voronoi( + np.vstack( + ( + points, + [ + [xmin - 3.0 * xspan, ymin - 3.0 * yspan], + [xmin - 3.0 * xspan, ymax + 3.0 * yspan], + [xmax + 3.0 * xspan, ymin - 3.0 * yspan], + [xmax + 3.0 * xspan, ymax + 3.0 * yspan], + ], + ) + ) + ) + + polygons = [] + for i in range(len(points)): + poly = Polygon(vor.vertices[vor.regions[vor.point_region[i]]]) + + if not poly.is_valid: + poly = poly.buffer(0) + + poly = poly.intersection(outline) + + polygons.append(poly) + + return polygons if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_bus_regions') + + snakemake = mock_snakemake("build_bus_regions") configure_logging(snakemake) - countries = snakemake.config['countries'] + countries = snakemake.config["countries"] n = pypsa.Network(snakemake.input.base_network) - country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'] - offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('name')['geometry'] + country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index("name")[ + "geometry" + ] + offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes) + offshore_shapes = offshore_shapes.reindex(columns=REGION_COLS).set_index("name")[ + "geometry" + ] onshore_regions = [] offshore_regions = [] @@ -82,27 +137,42 @@ if __name__ == "__main__": onshore_shape = country_shapes[country] onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]] - onshore_regions.append(gpd.GeoDataFrame({ - 'name': onshore_locs.index, - 'x': onshore_locs['x'], - 'y': onshore_locs['y'], - 'geometry': voronoi_partition_pts(onshore_locs.values, onshore_shape), - 'country': country - })) + onshore_regions.append( + gpd.GeoDataFrame( + { + "name": onshore_locs.index, + "x": onshore_locs["x"], + "y": onshore_locs["y"], + "geometry": voronoi_partition_pts( + onshore_locs.values, onshore_shape + ), + "country": country, + } + ) + ) - if country not in offshore_shapes.index: continue + if country not in offshore_shapes.index: + continue offshore_shape = offshore_shapes[country] offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]] - offshore_regions_c = gpd.GeoDataFrame({ - 'name': offshore_locs.index, - 'x': offshore_locs['x'], - 'y': offshore_locs['y'], - 'geometry': voronoi_partition_pts(offshore_locs.values, offshore_shape), - 'country': country - }) + offshore_regions_c = gpd.GeoDataFrame( + { + "name": offshore_locs.index, + "x": offshore_locs["x"], + "y": offshore_locs["y"], + "geometry": voronoi_partition_pts(offshore_locs.values, offshore_shape), + "country": country, + } + ) offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2] offshore_regions.append(offshore_regions_c) - save_to_geojson(pd.concat(onshore_regions, ignore_index=True), snakemake.output.regions_onshore) - - save_to_geojson(pd.concat(offshore_regions, ignore_index=True), snakemake.output.regions_offshore) + pd.concat(onshore_regions, ignore_index=True).to_file( + snakemake.output.regions_onshore + ) + if offshore_regions: + pd.concat(offshore_regions, ignore_index=True).to_file( + snakemake.output.regions_offshore + ) + else: + offshore_shapes.to_frame().to_file(snakemake.output.regions_offshore) diff --git a/scripts/build_cutout.py b/scripts/build_cutout.py index 78eafac6..0d852b5b 100644 --- a/scripts/build_cutout.py +++ b/scripts/build_cutout.py @@ -1,4 +1,5 @@ -# SPDX-FileCopyrightText: : 2017-2021 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -88,43 +89,42 @@ A **SARAH-2 cutout** can be used to amend the fields ``temperature``, ``influx_t Description ----------- - """ import logging + import atlite import geopandas as gpd import pandas as pd from _helpers import configure_logging - logger = logging.getLogger(__name__) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_cutout', cutout='europe-2013-era5') + + snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5") configure_logging(snakemake) - cutout_params = snakemake.config['atlite']['cutouts'][snakemake.wildcards.cutout] + cutout_params = snakemake.config["atlite"]["cutouts"][snakemake.wildcards.cutout] - snapshots = pd.date_range(freq='h', **snakemake.config['snapshots']) + snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"]) time = [snapshots[0], snapshots[-1]] - cutout_params['time'] = slice(*cutout_params.get('time', time)) + cutout_params["time"] = slice(*cutout_params.get("time", time)) - if {'x', 'y', 'bounds'}.isdisjoint(cutout_params): + if {"x", "y", "bounds"}.isdisjoint(cutout_params): # Determine the bounds from bus regions with a buffer of two grid cells onshore = gpd.read_file(snakemake.input.regions_onshore) offshore = gpd.read_file(snakemake.input.regions_offshore) - regions = onshore.append(offshore) - d = max(cutout_params.get('dx', 0.25), cutout_params.get('dy', 0.25))*2 - cutout_params['bounds'] = regions.total_bounds + [-d, -d, d, d] - elif {'x', 'y'}.issubset(cutout_params): - cutout_params['x'] = slice(*cutout_params['x']) - cutout_params['y'] = slice(*cutout_params['y']) - + regions = pd.concat([onshore, offshore]) + d = max(cutout_params.get("dx", 0.25), cutout_params.get("dy", 0.25)) * 2 + cutout_params["bounds"] = regions.total_bounds + [-d, -d, d, d] + elif {"x", "y"}.issubset(cutout_params): + cutout_params["x"] = slice(*cutout_params["x"]) + cutout_params["y"] = slice(*cutout_params["y"]) logging.info(f"Preparing cutout with parameters {cutout_params}.") - features = cutout_params.pop('features', None) + features = cutout_params.pop("features", None) cutout = atlite.Cutout(snakemake.output[0], **cutout_params) cutout.prepare(features=features) diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 74efc2ef..0e3f877f 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -1,6 +1,7 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -60,36 +61,98 @@ Description """ import logging -from _helpers import configure_logging import atlite +import country_converter as coco import geopandas as gpd -from vresutils import hydro as vhydro +import pandas as pd +from _helpers import configure_logging + +cc = coco.CountryConverter() + + +def get_eia_annual_hydro_generation(fn, countries): + # in billion kWh/a = TWh/a + df = pd.read_csv(fn, skiprows=2, index_col=1, na_values=[" ", "--"]).iloc[1:, 1:] + df.index = df.index.str.strip() + + former_countries = { + "Former Czechoslovakia": dict( + countries=["Czech Republic", "Slovakia"], start=1980, end=1992 + ), + "Former Serbia and Montenegro": dict( + countries=["Serbia", "Montenegro"], start=1992, end=2005 + ), + "Former Yugoslavia": dict( + countries=[ + "Slovenia", + "Croatia", + "Bosnia and Herzegovina", + "Serbia", + "Montenegro", + "North Macedonia", + ], + start=1980, + end=1991, + ), + } + + for k, v in former_countries.items(): + period = [str(i) for i in range(v["start"], v["end"] + 1)] + ratio = df.loc[v["countries"]].T.dropna().sum() + ratio /= ratio.sum() + for country in v["countries"]: + df.loc[country, period] = df.loc[k, period] * ratio[country] + + baltic_states = ["Latvia", "Estonia", "Lithuania"] + df.loc[baltic_states] = ( + df.loc[baltic_states].T.fillna(df.loc[baltic_states].mean(axis=1)).T + ) + + df.loc["Germany"] = df.filter(like="Germany", axis=0).sum() + df.loc["Serbia"] += df.loc["Kosovo"].fillna(0.0) + df = df.loc[~df.index.str.contains("Former")] + df.drop(["Europe", "Germany, West", "Germany, East", "Kosovo"], inplace=True) + + df.index = cc.convert(df.index, to="iso2") + df.index.name = "countries" + + df = df.T[countries] * 1e6 # in MWh/a + + return df + logger = logging.getLogger(__name__) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_hydro_profile') + + snakemake = mock_snakemake("build_hydro_profile") configure_logging(snakemake) - config_hydro = snakemake.config['renewable']['hydro'] + config_hydro = snakemake.config["renewable"]["hydro"] cutout = atlite.Cutout(snakemake.input.cutout) - countries = snakemake.config['countries'] - country_shapes = (gpd.read_file(snakemake.input.country_shapes) - .set_index('name')['geometry'].reindex(countries)) - country_shapes.index.name = 'countries' + countries = snakemake.config["countries"] + country_shapes = ( + gpd.read_file(snakemake.input.country_shapes) + .set_index("name")["geometry"] + .reindex(countries) + ) + country_shapes.index.name = "countries" - eia_stats = vhydro.get_eia_annual_hydro_generation( - snakemake.input.eia_hydro_generation).reindex(columns=countries) - inflow = cutout.runoff(shapes=country_shapes, - smooth=True, - lower_threshold_quantile=True, - normalize_using_yearly=eia_stats) + fn = snakemake.input.eia_hydro_generation + eia_stats = get_eia_annual_hydro_generation(fn, countries) - if 'clip_min_inflow' in config_hydro: - inflow = inflow.where(inflow > config_hydro['clip_min_inflow'], 0) + inflow = cutout.runoff( + shapes=country_shapes, + smooth=True, + lower_threshold_quantile=True, + normalize_using_yearly=eia_stats, + ) + + if "clip_min_inflow" in config_hydro: + inflow = inflow.where(inflow > config_hydro["clip_min_inflow"], 0) inflow.to_netcdf(snakemake.output[0]) diff --git a/scripts/build_load_data.py b/scripts/build_load_data.py index 052660d2..3993c254 100755 --- a/scripts/build_load_data.py +++ b/scripts/build_load_data.py @@ -1,10 +1,16 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ +This rule downloads the load data from `Open Power System Data Time series. -This rule downloads the load data from `Open Power System Data Time series `_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file. +`_. For all countries in +the network, the per country load timeseries with suffix +``_load_actual_entsoe_transparency`` are extracted from the dataset. After +filling small gaps linearly and large gaps by copying time-slice of a given +period, the load data is exported to a ``.csv`` file. Relevant Settings ----------------- @@ -26,22 +32,21 @@ Relevant Settings Inputs ------ +- ``data/load_raw.csv``: Outputs ------- -- ``resource/time_series_60min_singleindex_filtered.csv``: - - +- ``resources/load.csv``: """ import logging -logger = logging.getLogger(__name__) -from _helpers import configure_logging -import pandas as pd -import numpy as np +logger = logging.getLogger(__name__) import dateutil +import numpy as np +import pandas as pd +from _helpers import configure_logging from pandas import Timedelta as Delta @@ -70,24 +75,29 @@ def load_timeseries(fn, years, countries, powerstatistics=True): """ logger.info(f"Retrieving load data from '{fn}'.") - pattern = 'power_statistics' if powerstatistics else 'transparency' - pattern = f'_load_actual_entsoe_{pattern}' - rename = lambda s: s[:-len(pattern)] + pattern = "power_statistics" if powerstatistics else "transparency" + pattern = f"_load_actual_entsoe_{pattern}" + rename = lambda s: s[: -len(pattern)] date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True) - - return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser) - .filter(like=pattern) - .rename(columns=rename) - .dropna(how="all", axis=0) - .rename(columns={'GB_UKM' : 'GB'}) - .filter(items=countries) - .loc[years]) + return ( + pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser) + .filter(like=pattern) + .rename(columns=rename) + .dropna(how="all", axis=0) + .rename(columns={"GB_UKM": "GB"}) + .filter(items=countries) + .loc[years] + ) def consecutive_nans(ds): - return (ds.isnull().astype(int) - .groupby(ds.notnull().astype(int).cumsum()[ds.isnull()]) - .transform('sum').fillna(0)) + return ( + ds.isnull() + .astype(int) + .groupby(ds.notnull().astype(int).cumsum()[ds.isnull()]) + .transform("sum") + .fillna(0) + ) def fill_large_gaps(ds, shift): @@ -97,94 +107,163 @@ def fill_large_gaps(ds, shift): This function fills gaps ragning from 3 to 168 hours (one week). """ shift = Delta(shift) - nhours = shift / np.timedelta64(1, 'h') + nhours = shift / np.timedelta64(1, "h") if (consecutive_nans(ds) > nhours).any(): - logger.warning('There exist gaps larger then the time shift used for ' - 'copying time slices.') + logger.warning( + "There exist gaps larger then the time shift used for " + "copying time slices." + ) time_shift = pd.Series(ds.values, ds.index + shift) return ds.where(ds.notnull(), time_shift.reindex_like(ds)) def nan_statistics(df): def max_consecutive_nans(ds): - return (ds.isnull().astype(int) - .groupby(ds.notnull().astype(int).cumsum()) - .sum().max()) + return ( + ds.isnull() + .astype(int) + .groupby(ds.notnull().astype(int).cumsum()) + .sum() + .max() + ) + consecutive = df.apply(max_consecutive_nans) total = df.isnull().sum() - max_total_per_month = df.isnull().resample('m').sum().max() - return pd.concat([total, consecutive, max_total_per_month], - keys=['total', 'consecutive', 'max_total_per_month'], axis=1) + max_total_per_month = df.isnull().resample("m").sum().max() + return pd.concat( + [total, consecutive, max_total_per_month], + keys=["total", "consecutive", "max_total_per_month"], + axis=1, + ) -def copy_timeslice(load, cntry, start, stop, delta): +def copy_timeslice(load, cntry, start, stop, delta, fn_load=None): start = pd.Timestamp(start) stop = pd.Timestamp(stop) - if start-delta in load.index and stop in load.index and cntry in load: - load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values + if start in load.index and stop in load.index: + if start - delta in load.index and stop - delta in load.index and cntry in load: + load.loc[start:stop, cntry] = load.loc[ + start - delta : stop - delta, cntry + ].values + elif fn_load is not None: + duration = pd.date_range(freq="h", start=start - delta, end=stop - delta) + load_raw = load_timeseries(fn_load, duration, [cntry], powerstatistics) + load.loc[start:stop, cntry] = load_raw.loc[ + start - delta : stop - delta, cntry + ].values -def manual_adjustment(load, powerstatistics, countries): +def manual_adjustment(load, fn_load, powerstatistics, countries): """ Adjust gaps manual for load data from OPSD time-series package. - 1. For the ENTSOE power statistics load data (if powerstatistics is True) + 1. For the ENTSOE power statistics load data (if powerstatistics is True) - Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the - same load curve as Serbia and Albania the same as Macdedonia, both scaled - by the corresponding ratio of total energy consumptions reported by - IEA Data browser [0] for the year 2013. + Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the + same load curve as Serbia and Albania the same as Macdedonia, both scaled + by the corresponding ratio of total energy consumptions reported by + IEA Data browser [0] for the year 2013. - 2. For the ENTSOE transparency load data (if powerstatistics is False) + 2. For the ENTSOE transparency load data (if powerstatistics is False) - Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the - same load curve as Montenegro, scaled by the corresponding ratio of total energy - consumptions reported by IEA Data browser [0] for the year 2016. + Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the + same load curve as Montenegro, scaled by the corresponding ratio of total energy + consumptions reported by IEA Data browser [0] for the year 2016. - [0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons + [0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons - Parameters - ---------- - load : pd.DataFrame - Load time-series with UTC timestamps x ISO-2 countries - powerstatistics: bool - Whether argument load comprises the electricity consumption data of - the ENTSOE power statistics or of the ENTSOE transparency map + Parameters + ---------- + load : pd.DataFrame + Load time-series with UTC timestamps x ISO-2 countries + powerstatistics: bool + Whether argument load comprises the electricity consumption data of + the ENTSOE power statistics or of the ENTSOE transparency map + load_fn: str + File name or url location (file format .csv) - Returns - ------- - load : pd.DataFrame - Manual adjusted and interpolated load time-series with UTC - timestamps x ISO-2 countries + Returns + ------- + load : pd.DataFrame + Manual adjusted and interpolated load time-series with UTC + timestamps x ISO-2 countries """ if powerstatistics: - if 'MK' in load.columns: - if 'AL' not in load.columns or load.AL.isnull().values.all(): - load['AL'] = load['MK'] * (4.1 / 7.4) - if 'RS' in load.columns: - if 'KV' not in load.columns or load.KV.isnull().values.all(): - load['KV'] = load['RS'] * (4.8 / 27.) + if "MK" in load.columns: + if "AL" not in load.columns or load.AL.isnull().values.all(): + load["AL"] = load["MK"] * (4.1 / 7.4) + if "RS" in load.columns: + if "KV" not in load.columns or load.KV.isnull().values.all(): + load["KV"] = load["RS"] * (4.8 / 27.0) - copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1)) - copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2)) - copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1)) - copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1)) + copy_timeslice( + load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1) + ) + copy_timeslice( + load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2) + ) + copy_timeslice( + load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1) + ) + copy_timeslice( + load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1) + ) # is a WE, so take WE before - copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1)) - copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1)) - copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1)) + copy_timeslice( + load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1) + ) + copy_timeslice( + load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1) + ) + copy_timeslice( + load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1) + ) # whole january missing - copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364)) + copy_timeslice( + load, + "GB", + "2010-01-01 00:00", + "2010-01-31 23:00", + Delta(days=-365), + fn_load, + ) + # 1.1. at midnight gets special treatment + copy_timeslice( + load, + "IE", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) + copy_timeslice( + load, + "PT", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) + copy_timeslice( + load, + "GB", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) else: - if 'ME' in load: - if 'AL' not in load and 'AL' in countries: - load['AL'] = load.ME * (5.7/2.9) - if 'MK' not in load and 'MK' in countries: - load['MK'] = load.ME * (6.7/2.9) - copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1)) + if "ME" in load: + if "AL" not in load and "AL" in countries: + load["AL"] = load.ME * (5.7 / 2.9) + if "MK" not in load and "MK" in countries: + load["MK"] = load.ME * (6.7 / 2.9) + copy_timeslice( + load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1) + ) if 'UA' in countries: copy_timeslice(load, 'UA', '2013-01-25 14:00', '2013-01-28 21:00', Delta(weeks=1)) @@ -195,18 +274,19 @@ def manual_adjustment(load, powerstatistics, countries): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_load_data') + + snakemake = mock_snakemake("build_load_data") configure_logging(snakemake) - powerstatistics = snakemake.config['load']['power_statistics'] - interpolate_limit = snakemake.config['load']['interpolate_limit'] - countries = snakemake.config['countries'] - snapshots = pd.date_range(freq='h', **snakemake.config['snapshots']) + powerstatistics = snakemake.config["load"]["power_statistics"] + interpolate_limit = snakemake.config["load"]["interpolate_limit"] + countries = snakemake.config["countries"] + snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"]) years = slice(snapshots[0], snapshots[-1]) - time_shift = snakemake.config['load']['time_shift_for_large_gaps'] + time_shift = snakemake.config["load"]["time_shift_for_large_gaps"] load = load_timeseries(snakemake.input[0], years, countries, powerstatistics) @@ -220,20 +300,21 @@ if __name__ == "__main__": # https://www.iea.org/data-and-statistics/data-browser/?country=MOLDOVA&fuel=Energy%20consumption&indicator=TotElecCons load['MD'] = 6.2e6*(load_ua/load_ua.sum()) - if snakemake.config['load']['manual_adjustments']: - load = manual_adjustment(load, powerstatistics, countries) + if snakemake.config["load"]["manual_adjustments"]: + load = manual_adjustment(load, snakemake.input[0], powerstatistics, countries) logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.") - load = load.interpolate(method='linear', limit=interpolate_limit) + load = load.interpolate(method="linear", limit=interpolate_limit) - logger.info("Filling larger gaps by copying time-slices of period " - f"'{time_shift}'.") + logger.info( + "Filling larger gaps by copying time-slices of period " f"'{time_shift}'." + ) load = load.apply(fill_large_gaps, shift=time_shift) assert not load.isna().any().any(), ( - 'Load data contains nans. Adjust the parameters ' - '`time_shift_for_large_gaps` or modify the `manual_adjustment` function ' - 'for implementing the needed load data modifications.') + "Load data contains nans. Adjust the parameters " + "`time_shift_for_large_gaps` or modify the `manual_adjustment` function " + "for implementing the needed load data modifications." + ) load.to_csv(snakemake.output[0]) - diff --git a/scripts/build_natura_raster.py b/scripts/build_natura_raster.py index 71d2c45e..33e4cf99 100644 --- a/scripts/build_natura_raster.py +++ b/scripts/build_natura_raster.py @@ -1,9 +1,13 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ -Rasters the vector data of the `Natura 2000 `_ natural protection areas onto all cutout regions. +Rasters the vector data of the `Natura 2000. + +`_ natural protection areas onto all +cutout regions. Relevant Settings ----------------- @@ -36,15 +40,14 @@ Outputs Description ----------- - """ import logging -from _helpers import configure_logging, retrieve_snakemake_keys import atlite import geopandas as gpd import rasterio as rio +from _helpers import configure_logging from rasterio.features import geometry_mask from rasterio.warp import transform_bounds @@ -56,11 +59,11 @@ def determine_cutout_xXyY(cutout_name): assert cutout.crs.to_epsg() == 4326 x, X, y, Y = cutout.extent dx, dy = cutout.dx, cutout.dy - return [x - dx/2., X + dx/2., y - dy/2., Y + dy/2.] + return [x - dx / 2.0, X + dx / 2.0, y - dy / 2.0, Y + dy / 2.0] def get_transform_and_shape(bounds, res): - left, bottom = [(b // res)* res for b in bounds[:2]] + left, bottom = [(b // res) * res for b in bounds[:2]] right, top = [(b // res + 1) * res for b in bounds[2:]] shape = int((top - bottom) // res), int((right - left) / res) transform = rio.Affine(res, 0, left, 0, -res, top) @@ -68,25 +71,32 @@ def get_transform_and_shape(bounds, res): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_natura_raster') + + snakemake = mock_snakemake("build_natura_raster") configure_logging(snakemake) - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) - - cutouts = paths.cutouts + cutouts = snakemake.input.cutouts xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) bounds = transform_bounds(4326, 3035, min(xs), min(ys), max(Xs), max(Ys)) transform, out_shape = get_transform_and_shape(bounds, res=100) # adjusted boundaries - shapes = gpd.read_file(paths.natura).to_crs(3035) - raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform) + shapes = gpd.read_file(snakemake.input.natura).to_crs(3035) + raster = ~geometry_mask(shapes.geometry, out_shape, transform) raster = raster.astype(rio.uint8) - with rio.open(out[0], 'w', driver='GTiff', dtype=rio.uint8, - count=1, transform=transform, crs=3035, compress='lzw', - width=raster.shape[1], height=raster.shape[0]) as dst: + with rio.open( + snakemake.output[0], + "w", + driver="GTiff", + dtype=rio.uint8, + count=1, + transform=transform, + crs=3035, + compress="lzw", + width=raster.shape[1], + height=raster.shape[0], + ) as dst: dst.write(raster, indexes=1) - diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index 764028d1..f65b4966 100755 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -1,10 +1,15 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT # coding: utf-8 """ -Retrieves conventional powerplant capacities and locations from `powerplantmatching `_, assigns these to buses and creates a ``.csv`` file. It is possible to amend the powerplant database with custom entries provided in ``data/custom_powerplants.csv``. +Retrieves conventional powerplant capacities and locations from +`powerplantmatching `_, assigns +these to buses and creates a ``.csv`` file. It is possible to amend the +powerplant database with custom entries provided in +``data/custom_powerplants.csv``. Relevant Settings ----------------- @@ -68,18 +73,15 @@ The configuration options ``electricity: powerplants_filter`` and ``electricity: powerplants_filter: Country not in ['Germany'] and YearCommissioned <= 2015 custom_powerplants: YearCommissioned <= 2015 - """ import logging -from _helpers import configure_logging -import pypsa -import powerplantmatching as pm import pandas as pd -import numpy as np - -from scipy.spatial import cKDTree as KDTree +import powerplantmatching as pm +import pypsa +from _helpers import configure_logging +from powerplantmatching.export import map_country_bus logger = logging.getLogger(__name__) @@ -87,56 +89,78 @@ logger = logging.getLogger(__name__) def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False): if not custom_ppl_query: return ppl - add_ppls = pd.read_csv(custom_powerplants, index_col=0, - dtype={'bus': 'str'}) + add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={"bus": "str"}) if isinstance(custom_ppl_query, str): add_ppls.query(custom_ppl_query, inplace=True) - return pd.concat([ppl, add_ppls], sort=False, ignore_index=True, verify_integrity=True) + return pd.concat( + [ppl, add_ppls], sort=False, ignore_index=True, verify_integrity=True + ) + + +def replace_natural_gas_technology(df): + mapping = {"Steam Turbine": "OCGT", "Combustion Engine": "OCGT"} + tech = df.Technology.replace(mapping).fillna("OCGT") + return df.Technology.where(df.Fueltype != "Natural Gas", tech) + + +def replace_natural_gas_fueltype(df): + return df.Fueltype.where(df.Fueltype != "Natural Gas", df.Technology) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_powerplants') + + snakemake = mock_snakemake("build_powerplants") configure_logging(snakemake) n = pypsa.Network(snakemake.input.base_network) countries = n.buses.country.unique() - ppl = (pm.powerplants(from_url=True) - .powerplant.fill_missing_decommyears() - .powerplant.convert_country_to_alpha2() - .query('Fueltype not in ["Solar", "Wind"] and Country in @countries') - .replace({'Technology': {'Steam Turbine': 'OCGT'}}) - .assign(Fueltype=lambda df: ( - df.Fueltype - .where(df.Fueltype != 'Natural Gas', - df.Technology.replace('Steam Turbine', - 'OCGT').fillna('OCGT'))))) + ppl = ( + pm.powerplants(from_url=True) + .powerplant.fill_missing_decommissioning_years() + .powerplant.convert_country_to_alpha2() + .query('Fueltype not in ["Solar", "Wind"] and Country in @countries') + .assign(Technology=replace_natural_gas_technology) + .assign(Fueltype=replace_natural_gas_fueltype) + ) - ppl_query = snakemake.config['electricity']['powerplants_filter'] + # Correct bioenergy for countries where possible + opsd = pm.data.OPSD_VRE().powerplant.convert_country_to_alpha2() + opsd = opsd.query('Country in @countries and Fueltype == "Bioenergy"') + opsd["Name"] = "Biomass" + available_countries = opsd.Country.unique() + ppl = ppl.query('not (Country in @available_countries and Fueltype == "Bioenergy")') + ppl = pd.concat([ppl, opsd]) + + ppl_query = snakemake.config["electricity"]["powerplants_filter"] if isinstance(ppl_query, str): ppl.query(ppl_query, inplace=True) # add carriers from own powerplant files: - custom_ppl_query = snakemake.config['electricity']['custom_powerplants'] - ppl = add_custom_powerplants(ppl, snakemake.input.custom_powerplants, custom_ppl_query) + custom_ppl_query = snakemake.config["electricity"]["custom_powerplants"] + ppl = add_custom_powerplants( + ppl, snakemake.input.custom_powerplants, custom_ppl_query + ) - cntries_without_ppl = [c for c in countries if c not in ppl.Country.unique()] + countries_wo_ppl = set(countries) - set(ppl.Country.unique()) + if countries_wo_ppl: + logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}") - for c in countries: - substation_i = n.buses.query('substation_lv and country == @c').index - kdtree = KDTree(n.buses.loc[substation_i, ['x','y']].values) - ppl_i = ppl.query('Country == @c').index - - tree_i = kdtree.query(ppl.loc[ppl_i, ['lon','lat']].values)[1] - ppl.loc[ppl_i, 'bus'] = substation_i.append(pd.Index([np.nan]))[tree_i] - - if cntries_without_ppl: - logging.warning(f"No powerplants known in: {', '.join(cntries_without_ppl)}") + substations = n.buses.query("substation_lv") + ppl = map_country_bus(ppl, substations) bus_null_b = ppl["bus"].isnull() if bus_null_b.any(): - logging.warning(f"Couldn't find close bus for {bus_null_b.sum()} powerplants") + logging.warning( + f"Couldn't find close bus for {bus_null_b.sum()} powerplants. " + "Removing them from the powerplants list." + ) + ppl = ppl[~bus_null_b] - ppl.to_csv(snakemake.output[0]) + # TODO: This has to fixed in PPM, some powerplants are still duplicated + cumcount = ppl.groupby(["bus", "Fueltype"]).cumcount() + 1 + ppl.Name = ppl.Name.where(cumcount == 1, ppl.Name + " " + cumcount.astype(str)) + + ppl.reset_index(drop=True).to_csv(snakemake.output[0]) diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index ab4f2027..58b12ef7 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -1,15 +1,17 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT -"""Calculates for each network node the -(i) installable capacity (based on land-use), (ii) the available generation time -series (based on weather data), and (iii) the average distance from the node for -onshore wind, AC-connected offshore wind, DC-connected offshore wind and solar -PV generators. In addition for offshore wind it calculates the fraction of the -grid connection which is under water. +""" +Calculates for each network node the (i) installable capacity (based on land- +use), (ii) the available generation time series (based on weather data), and +(iii) the average distance from the node for onshore wind, AC-connected +offshore wind, DC-connected offshore wind and solar PV generators. In addition +for offshore wind it calculates the fraction of the grid connection which is +under water. .. note:: Hydroelectric profiles are built in script :mod:`build_hydro_profiles`. @@ -177,86 +179,109 @@ node (`p_nom_max`): ``simple`` and ``conservative``: - ``conservative`` assertains the nodal limit by increasing capacities proportional to the layout until the limit of an individual grid cell is reached. - """ -import progressbar as pgb -import geopandas as gpd -import xarray as xr -import numpy as np import functools -import atlite import logging -from pypsa.geo import haversine -from shapely.geometry import LineString import time +import atlite +import geopandas as gpd +import numpy as np +import progressbar as pgb +import xarray as xr from _helpers import configure_logging +from dask.distributed import Client, LocalCluster +from pypsa.geo import haversine +from shapely.geometry import LineString logger = logging.getLogger(__name__) -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_renewable_profiles', technology='solar') + + snakemake = mock_snakemake("build_renewable_profiles", technology="solar") configure_logging(snakemake) pgb.streams.wrap_stderr() - nprocesses = snakemake.config['atlite'].get('nprocesses') - noprogress = not snakemake.config['atlite'].get('show_progress', True) - config = snakemake.config['renewable'][snakemake.wildcards.technology] - resource = config['resource'] # pv panel config / wind turbine config - correction_factor = config.get('correction_factor', 1.) - capacity_per_sqkm = config['capacity_per_sqkm'] - p_nom_max_meth = config.get('potential', 'conservative') + nprocesses = int(snakemake.threads) + noprogress = not snakemake.config["atlite"].get("show_progress", False) + config = snakemake.config["renewable"][snakemake.wildcards.technology] + resource = config["resource"] # pv panel config / wind turbine config + correction_factor = config.get("correction_factor", 1.0) + capacity_per_sqkm = config["capacity_per_sqkm"] + p_nom_max_meth = config.get("potential", "conservative") if isinstance(config.get("corine", {}), list): - config['corine'] = {'grid_codes': config['corine']} + config["corine"] = {"grid_codes": config["corine"]} - if correction_factor != 1.: - logger.info(f'correction_factor is set as {correction_factor}') + if correction_factor != 1.0: + logger.info(f"correction_factor is set as {correction_factor}") + cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) + client = Client(cluster, asynchronous=True) - cutout = atlite.Cutout(snakemake.input['cutout']) - regions = gpd.read_file(snakemake.input.regions).set_index('name').rename_axis('bus') + cutout = atlite.Cutout(snakemake.input["cutout"]) + regions = gpd.read_file(snakemake.input.regions) + assert not regions.empty, ( + f"List of regions in {snakemake.input.regions} is empty, please " + "disable the corresponding renewable technology" + ) + # do not pull up, set_index does not work if geo dataframe is empty + regions = regions.set_index("name").rename_axis("bus") buses = regions.index - excluder = atlite.ExclusionContainer(crs=3035, res=100) + res = config.get("excluder_resolution", 100) + excluder = atlite.ExclusionContainer(crs=3035, res=res) - if config['natura']: + if config["natura"]: excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True) corine = config.get("corine", {}) if "grid_codes" in corine: codes = corine["grid_codes"] excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035) - if corine.get("distance", 0.) > 0.: + if corine.get("distance", 0.0) > 0.0: codes = corine["distance_grid_codes"] buffer = corine["distance"] - excluder.add_raster(snakemake.input.corine, codes=codes, buffer=buffer, crs=3035) + excluder.add_raster( + snakemake.input.corine, codes=codes, buffer=buffer, crs=3035 + ) + + if "ship_threshold" in config: + shipping_threshold = ( + config["ship_threshold"] * 8760 * 6 + ) # approximation because 6 years of data which is hourly collected + func = functools.partial(np.less, shipping_threshold) + excluder.add_raster( + snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True + ) if "max_depth" in config: # lambda not supported for atlite + multiprocessing # use named function np.greater with partially frozen argument instead # and exclude areas where: -max_depth > grid cell depth - func = functools.partial(np.greater,-config['max_depth']) - excluder.add_raster(snakemake.input.gebco, codes=func, crs=4236, nodata=-1000) + func = functools.partial(np.greater, -config["max_depth"]) + excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000) - if 'min_shore_distance' in config: - buffer = config['min_shore_distance'] + if "min_shore_distance" in config: + buffer = config["min_shore_distance"] excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer) - if 'max_shore_distance' in config: - buffer = config['max_shore_distance'] - excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer, invert=True) + if "max_shore_distance" in config: + buffer = config["max_shore_distance"] + excluder.add_geometry( + snakemake.input.country_shapes, buffer=buffer, invert=True + ) kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress) if noprogress: - logger.info('Calculate landuse availabilities...') + logger.info("Calculate landuse availabilities...") start = time.time() availability = cutout.availabilitymatrix(regions, excluder, **kwargs) duration = time.time() - start - logger.info(f'Completed availability calculation ({duration:2.2f}s)') + logger.info(f"Completed availability calculation ({duration:2.2f}s)") else: availability = cutout.availabilitymatrix(regions, excluder, **kwargs) @@ -267,35 +292,41 @@ if __name__ == '__main__': availability.loc[availability_MDUA.coords] = availability_MDUA area = cutout.grid.to_crs(3035).area / 1e6 - area = xr.DataArray(area.values.reshape(cutout.shape), - [cutout.coords['y'], cutout.coords['x']]) + area = xr.DataArray( + area.values.reshape(cutout.shape), [cutout.coords["y"], cutout.coords["x"]] + ) - potential = capacity_per_sqkm * availability.sum('bus') * area - func = getattr(cutout, resource.pop('method')) - resource['dask_kwargs'] = {'num_workers': nprocesses} + potential = capacity_per_sqkm * availability.sum("bus") * area + func = getattr(cutout, resource.pop("method")) + resource["dask_kwargs"] = {"scheduler": client} capacity_factor = correction_factor * func(capacity_factor=True, **resource) layout = capacity_factor * area * capacity_per_sqkm - profile, capacities = func(matrix=availability.stack(spatial=['y','x']), - layout=layout, index=buses, - per_unit=True, return_capacity=True, **resource) + profile, capacities = func( + matrix=availability.stack(spatial=["y", "x"]), + layout=layout, + index=buses, + per_unit=True, + return_capacity=True, + **resource, + ) logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')") - if p_nom_max_meth == 'simple': + if p_nom_max_meth == "simple": p_nom_max = capacity_per_sqkm * availability @ area - elif p_nom_max_meth == 'conservative': - max_cap_factor = capacity_factor.where(availability!=0).max(['x', 'y']) + elif p_nom_max_meth == "conservative": + max_cap_factor = capacity_factor.where(availability != 0).max(["x", "y"]) p_nom_max = capacities / max_cap_factor else: - raise AssertionError('Config key `potential` should be one of "simple" ' - f'(default) or "conservative", not "{p_nom_max_meth}"') + raise AssertionError( + 'Config key `potential` should be one of "simple" ' + f'(default) or "conservative", not "{p_nom_max_meth}"' + ) + logger.info("Calculate average distances.") + layoutmatrix = (layout * availability).stack(spatial=["y", "x"]) - - logger.info('Calculate average distances.') - layoutmatrix = (layout * availability).stack(spatial=['y','x']) - - coords = cutout.grid[['x', 'y']] - bus_coords = regions[['x', 'y']] + coords = cutout.grid[["x", "y"]] + bus_coords = regions[["x", "y"]] average_distance = [] centre_of_mass = [] @@ -304,39 +335,45 @@ if __name__ == '__main__': nz_b = row != 0 row = row[nz_b] co = coords[nz_b] - distances = haversine(bus_coords.loc[bus], co) + distances = haversine(bus_coords.loc[bus], co) average_distance.append((distances * (row / row.sum())).sum()) centre_of_mass.append(co.values.T @ (row / row.sum())) average_distance = xr.DataArray(average_distance, [buses]) - centre_of_mass = xr.DataArray(centre_of_mass, [buses, ('spatial', ['x', 'y'])]) - - - ds = xr.merge([(correction_factor * profile).rename('profile'), - capacities.rename('weight'), - p_nom_max.rename('p_nom_max'), - potential.rename('potential'), - average_distance.rename('average_distance')]) + centre_of_mass = xr.DataArray(centre_of_mass, [buses, ("spatial", ["x", "y"])]) + ds = xr.merge( + [ + (correction_factor * profile).rename("profile"), + capacities.rename("weight"), + p_nom_max.rename("p_nom_max"), + potential.rename("potential"), + average_distance.rename("average_distance"), + ] + ) if snakemake.wildcards.technology.startswith("offwind"): - logger.info('Calculate underwater fraction of connections.') - offshore_shape = gpd.read_file(snakemake.input['offshore_shapes']).unary_union + logger.info("Calculate underwater fraction of connections.") + offshore_shape = gpd.read_file(snakemake.input["offshore_shapes"]).unary_union underwater_fraction = [] for bus in buses: p = centre_of_mass.sel(bus=bus).data - line = LineString([p, regions.loc[bus, ['x', 'y']]]) - frac = line.intersection(offshore_shape).length/line.length + line = LineString([p, regions.loc[bus, ["x", "y"]]]) + frac = line.intersection(offshore_shape).length / line.length underwater_fraction.append(frac) - ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses]) + ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses]) # select only buses with some capacity and minimal capacity factor - ds = ds.sel(bus=((ds['profile'].mean('time') > config.get('min_p_max_pu', 0.)) & - (ds['p_nom_max'] > config.get('min_p_nom_max', 0.)))) + ds = ds.sel( + bus=( + (ds["profile"].mean("time") > config.get("min_p_max_pu", 0.0)) + & (ds["p_nom_max"] > config.get("min_p_nom_max", 0.0)) + ) + ) - if 'clip_p_max_pu' in config: - min_p_max_pu = config['clip_p_max_pu'] - ds['profile'] = ds['profile'].where(ds['profile'] >= min_p_max_pu, 0) + if "clip_p_max_pu" in config: + min_p_max_pu = config["clip_p_max_pu"] + ds["profile"] = ds["profile"].where(ds["profile"] >= min_p_max_pu, 0) ds.to_netcdf(snakemake.output.profile) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 22aed1fe..cda27963 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -1,9 +1,12 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ -Creates GIS shape files of the countries, exclusive economic zones and `NUTS3 `_ areas. +Creates GIS shape files of the countries, exclusive economic zones and `NUTS3 < +https://en.wikipedia.org/wiki/Nomenclature_of_Territorial_Units_for_Statistics> +`_ areas. Relevant Settings ----------------- @@ -64,23 +67,20 @@ Outputs Description ----------- - """ import logging -from _helpers import configure_logging - -import os -import numpy as np -from operator import attrgetter from functools import reduce from itertools import takewhile +from operator import attrgetter -import pandas as pd import geopandas as gpd +import numpy as np +import pandas as pd +import pycountry as pyc +from _helpers import configure_logging from shapely.geometry import MultiPolygon, Polygon from shapely.ops import unary_union -import pycountry as pyc logger = logging.getLogger(__name__) @@ -95,140 +95,187 @@ def _get_country(target, **keys): def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True): if isinstance(polys, MultiPolygon): - polys = sorted(polys.geoms, key=attrgetter('area'), reverse=True) + polys = sorted(polys.geoms, key=attrgetter("area"), reverse=True) mainpoly = polys[0] - mainlength = np.sqrt(mainpoly.area/(2.*np.pi)) + mainlength = np.sqrt(mainpoly.area / (2.0 * np.pi)) if mainpoly.area > minarea: - polys = MultiPolygon([p - for p in takewhile(lambda p: p.area > minarea, polys) - if not filterremote or (mainpoly.distance(p) < mainlength)]) + polys = MultiPolygon( + [ + p + for p in takewhile(lambda p: p.area > minarea, polys) + if not filterremote or (mainpoly.distance(p) < mainlength) + ] + ) else: polys = mainpoly return polys.simplify(tolerance=tolerance) def countries(naturalearth, country_list): - if 'RS' in country_list: country_list.append('KV') + if "RS" in country_list: + country_list.append("KV") df = gpd.read_file(naturalearth) # Names are a hassle in naturalearth, try several fields - fieldnames = (df[x].where(lambda s: s!='-99') for x in ('ISO_A2', 'WB_A2', 'ADM0_A3')) - df['name'] = reduce(lambda x,y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2] + fieldnames = ( + df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3") + ) + df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2] - df = df.loc[df.name.isin(country_list) & ((df['scalerank'] == 0) | (df['scalerank'] == 5))] - s = df.set_index('name')['geometry'].map(_simplify_polys) - if 'RS' in country_list: s['RS'] = s['RS'].union(s.pop('KV')) + df = df.loc[ + df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5)) + ] + s = df.set_index("name")["geometry"].map(_simplify_polys) + if "RS" in country_list: + s["RS"] = s["RS"].union(s.pop("KV")) + # cleanup shape union + s["RS"] = Polygon(s["RS"].exterior.coords) return s def eez(country_shapes, eez, country_list): df = gpd.read_file(eez) - df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in country_list])] - df['name'] = df['ISO_3digit'].map(lambda c: _get_country('alpha_2', alpha_3=c)) - s = df.set_index('name').geometry.map(lambda s: _simplify_polys(s, filterremote=False)) - s = gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3}) + df = df.loc[ + df["ISO_3digit"].isin( + [_get_country("alpha_3", alpha_2=c) for c in country_list] + ) + ] + df["name"] = df["ISO_3digit"].map(lambda c: _get_country("alpha_2", alpha_3=c)) + s = df.set_index("name").geometry.map( + lambda s: _simplify_polys(s, filterremote=False) + ) + s = gpd.GeoSeries( + {k: v for k, v in s.items() if v.distance(country_shapes[k]) < 1e-3} + ) + s = s.to_frame("geometry") s.index.name = "name" return s def country_cover(country_shapes, eez_shapes=None): - shapes = list(country_shapes) + shapes = country_shapes if eez_shapes is not None: - shapes += list(eez_shapes) + shapes = pd.concat([shapes, eez_shapes]) europe_shape = unary_union(shapes) if isinstance(europe_shape, MultiPolygon): - europe_shape = max(europe_shape, key=attrgetter('area')) + europe_shape = max(europe_shape, key=attrgetter("area")) return Polygon(shell=europe_shape.exterior) def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp): df = gpd.read_file(nuts3) - df = df.loc[df['STAT_LEVL_'] == 3] - df['geometry'] = df['geometry'].map(_simplify_polys) - df = df.rename(columns={'NUTS_ID': 'id'})[['id', 'geometry']].set_index('id') + df = df.loc[df["STAT_LEVL_"] == 3] + df["geometry"] = df["geometry"].map(_simplify_polys) + df = df.rename(columns={"NUTS_ID": "id"})[["id", "geometry"]].set_index("id") - pop = pd.read_table(nuts3pop, na_values=[':'], delimiter=' ?\t', engine='python') - pop = (pop - .set_index(pd.MultiIndex.from_tuples(pop.pop('unit,geo\\time').str.split(','))).loc['THS'] - .applymap(lambda x: pd.to_numeric(x, errors='coerce')) - .fillna(method='bfill', axis=1))['2014'] + pop = pd.read_table(nuts3pop, na_values=[":"], delimiter=" ?\t", engine="python") + pop = ( + pop.set_index( + pd.MultiIndex.from_tuples(pop.pop("unit,geo\\time").str.split(",")) + ) + .loc["THS"] + .applymap(lambda x: pd.to_numeric(x, errors="coerce")) + .fillna(method="bfill", axis=1) + )["2014"] - gdp = pd.read_table(nuts3gdp, na_values=[':'], delimiter=' ?\t', engine='python') - gdp = (gdp - .set_index(pd.MultiIndex.from_tuples(gdp.pop('unit,geo\\time').str.split(','))).loc['EUR_HAB'] - .applymap(lambda x: pd.to_numeric(x, errors='coerce')) - .fillna(method='bfill', axis=1))['2014'] + gdp = pd.read_table(nuts3gdp, na_values=[":"], delimiter=" ?\t", engine="python") + gdp = ( + gdp.set_index( + pd.MultiIndex.from_tuples(gdp.pop("unit,geo\\time").str.split(",")) + ) + .loc["EUR_HAB"] + .applymap(lambda x: pd.to_numeric(x, errors="coerce")) + .fillna(method="bfill", axis=1) + )["2014"] cantons = pd.read_csv(ch_cantons) - cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS'] - cantons = cantons.str.pad(5, side='right', fillchar='0') + cantons = cantons.set_index(cantons["HASC"].str[3:])["NUTS"] + cantons = cantons.str.pad(5, side="right", fillchar="0") swiss = pd.read_excel(ch_popgdp, skiprows=3, index_col=0) swiss.columns = swiss.columns.to_series().map(cantons) - swiss_pop = pd.to_numeric(swiss.loc['Residents in 1000', 'CH040':]) + swiss_pop = pd.to_numeric(swiss.loc["Residents in 1000", "CH040":]) pop = pd.concat([pop, swiss_pop]) - swiss_gdp = pd.to_numeric(swiss.loc['Gross domestic product per capita in Swiss francs', 'CH040':]) + swiss_gdp = pd.to_numeric( + swiss.loc["Gross domestic product per capita in Swiss francs", "CH040":] + ) gdp = pd.concat([gdp, swiss_gdp]) df = df.join(pd.DataFrame(dict(pop=pop, gdp=gdp))) - df['country'] = df.index.to_series().str[:2].replace(dict(UK='GB', EL='GR')) + df["country"] = df.index.to_series().str[:2].replace(dict(UK="GB", EL="GR")) - excludenuts = pd.Index(('FRA10', 'FRA20', 'FRA30', 'FRA40', 'FRA50', - 'PT200', 'PT300', - 'ES707', 'ES703', 'ES704','ES705', 'ES706', 'ES708', 'ES709', - 'FI2', 'FR9')) - excludecountry = pd.Index(('MT', 'TR', 'LI', 'IS', 'CY', 'KV')) + excludenuts = pd.Index( + ( + "FRA10", + "FRA20", + "FRA30", + "FRA40", + "FRA50", + "PT200", + "PT300", + "ES707", + "ES703", + "ES704", + "ES705", + "ES706", + "ES708", + "ES709", + "FI2", + "FR9", + ) + ) + excludecountry = pd.Index(("MT", "TR", "LI", "IS", "CY", "KV")) df = df.loc[df.index.difference(excludenuts)] df = df.loc[~df.country.isin(excludecountry)] manual = gpd.GeoDataFrame( - [['BA1', 'BA', 3871.], - ['RS1', 'RS', 7210.], - ['AL1', 'AL', 2893.]], - columns=['NUTS_ID', 'country', 'pop'] - ).set_index('NUTS_ID') - manual['geometry'] = manual['country'].map(country_shapes) + [["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]], + columns=["NUTS_ID", "country", "pop"], + ).set_index("NUTS_ID") + manual["geometry"] = manual["country"].map(country_shapes) manual = manual.dropna() df = pd.concat([df, manual], sort=False) - df.loc['ME000', 'pop'] = 650. + df.loc["ME000", "pop"] = 650.0 return df -def save_to_geojson(df, fn): - if os.path.exists(fn): - os.unlink(fn) - if not isinstance(df, gpd.GeoDataFrame): - df = gpd.GeoDataFrame(dict(geometry=df)) - df = df.reset_index() - schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'} - df.to_file(fn, driver='GeoJSON', schema=schema) - - if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_shapes') + + snakemake = mock_snakemake("build_shapes") configure_logging(snakemake) - country_shapes = countries(snakemake.input.naturalearth, snakemake.config['countries']) - save_to_geojson(country_shapes, snakemake.output.country_shapes) + country_shapes = countries( + snakemake.input.naturalearth, snakemake.config["countries"] + ) + country_shapes.reset_index().to_file(snakemake.output.country_shapes) - offshore_shapes = eez(country_shapes, snakemake.input.eez, snakemake.config['countries']) - save_to_geojson(offshore_shapes, snakemake.output.offshore_shapes) + offshore_shapes = eez( + country_shapes, snakemake.input.eez, snakemake.config["countries"] + ) + offshore_shapes.reset_index().to_file(snakemake.output.offshore_shapes) - europe_shape = country_cover(country_shapes, offshore_shapes) - save_to_geojson(gpd.GeoSeries(europe_shape), snakemake.output.europe_shape) + europe_shape = gpd.GeoDataFrame( + geometry=[country_cover(country_shapes, offshore_shapes.geometry)] + ) + europe_shape.reset_index().to_file(snakemake.output.europe_shape) - nuts3_shapes = nuts3(country_shapes, snakemake.input.nuts3, snakemake.input.nuts3pop, - snakemake.input.nuts3gdp, snakemake.input.ch_cantons, snakemake.input.ch_popgdp) - - save_to_geojson(nuts3_shapes, snakemake.output.nuts3_shapes) + nuts3_shapes = nuts3( + country_shapes, + snakemake.input.nuts3, + snakemake.input.nuts3pop, + snakemake.input.nuts3gdp, + snakemake.input.ch_cantons, + snakemake.input.ch_popgdp, + ) + nuts3_shapes.reset_index().to_file(snakemake.output.nuts3_shapes) diff --git a/scripts/build_ship_raster.py b/scripts/build_ship_raster.py new file mode 100644 index 00000000..0ae134fd --- /dev/null +++ b/scripts/build_ship_raster.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +""" +Transforms the global ship density data from +https://datacatalog.worldbank.org/search/dataset/0037580/Global-Shipping- +Traffic-Density to the size of the considered cutout. The global ship density +raster is later used for the exclusion when calculating the offshore +potentials. + +Relevant Settings +----------------- + +.. code:: yaml + + renewable: + {technology}: + cutout: + +.. seealso:: + Documentation of the configuration file ``config.yaml`` at + :ref:`renewable_cf` + +Inputs +------ + +- ``data/bundle/shipdensity/shipdensity_global.zip``: `Global ship density from `. + +Outputs +------- + +- ``resources/europe_shipdensity_raster.nc``: Reduced version of `Global ship density from = v1.2: + feature_data.columns = feature_data.columns.astype(str) + + feature_data = feature_data.fillna(0) + + return feature_data def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): - """Determine the number of clusters per country""" + """ + Determine the number of clusters per country. + """ - L = (n.loads_t.p_set.mean() - .groupby(n.loads.bus).sum() - .groupby([n.buses.country, n.buses.sub_network]).sum() - .pipe(normed)) + L = ( + n.loads_t.p_set.mean() + .groupby(n.loads.bus) + .sum() + .groupby([n.buses.country, n.buses.sub_network]) + .sum() + .pipe(normed) + ) - N = n.buses.groupby(['country', 'sub_network']).size() + N = n.buses.groupby(["country", "sub_network"]).size() - assert n_clusters >= len(N) and n_clusters <= N.sum(), \ - f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." + assert ( + n_clusters >= len(N) and n_clusters <= N.sum() + ), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." if focus_weights is not None: total_focus = sum(list(focus_weights.values())) - assert total_focus <= 1.0, "The sum of focus weights must be less than or equal to 1." + assert ( + total_focus <= 1.0 + ), "The sum of focus weights must be less than or equal to 1." for country, weight in focus_weights.items(): L[country] = weight / len(L[country]) - remainder = [c not in focus_weights.keys() for c in L.index.get_level_values('country')] + remainder = [ + c not in focus_weights.keys() for c in L.index.get_level_values("country") + ] L[remainder] = L.loc[remainder].pipe(normed) * (1 - total_focus) - logger.warning('Using custom focus weights for determining number of clusters.') + logger.warning("Using custom focus weights for determining number of clusters.") - assert np.isclose(L.sum(), 1.0, rtol=1e-3), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." + assert np.isclose( + L.sum(), 1.0, rtol=1e-3 + ), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." m = po.ConcreteModel() + def n_bounds(model, *n_id): return (1, N[n_id]) + m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers) m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters)) - m.objective = po.Objective(expr=sum((m.n[i] - L.loc[i]*n_clusters)**2 for i in L.index), - sense=po.minimize) + m.objective = po.Objective( + expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index), + sense=po.minimize, + ) opt = po.SolverFactory(solver_name) - if not opt.has_capability('quadratic_objective'): - logger.warning(f'The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`.') - opt = po.SolverFactory('ipopt') + if not opt.has_capability("quadratic_objective"): + logger.warning( + f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`." + ) + opt = po.SolverFactory("ipopt") results = opt.solve(m) - assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}" + assert ( + results["Solver"][0]["Status"] == "ok" + ), f"Solver returned non-optimally: {results}" return pd.Series(m.n.get_values(), index=L.index).round().astype(int) -def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds): +def busmap_for_n_clusters( + n, + n_clusters, + solver_name, + focus_weights=None, + algorithm="kmeans", + feature=None, + **algorithm_kwds, +): if algorithm == "kmeans": - algorithm_kwds.setdefault('n_init', 1000) - algorithm_kwds.setdefault('max_iter', 30000) - algorithm_kwds.setdefault('tol', 1e-6) - algorithm_kwds.setdefault('random_state', 0) + algorithm_kwds.setdefault("n_init", 1000) + algorithm_kwds.setdefault("max_iter", 30000) + algorithm_kwds.setdefault("tol", 1e-6) + algorithm_kwds.setdefault("random_state", 0) + + def fix_country_assignment_for_hac(n): + from scipy.sparse import csgraph + + # overwrite country of nodes that are disconnected from their country-topology + for country in n.buses.country.unique(): + m = n[n.buses.country == country].copy() + + _, labels = csgraph.connected_components( + m.adjacency_matrix(), directed=False + ) + + component = pd.Series(labels, index=m.buses.index) + component_sizes = component.value_counts() + + if len(component_sizes) > 1: + disconnected_bus = component[ + component == component_sizes.index[-1] + ].index[0] + + neighbor_bus = n.lines.query( + "bus0 == @disconnected_bus or bus1 == @disconnected_bus" + ).iloc[0][["bus0", "bus1"]] + new_country = list( + set(n.buses.loc[neighbor_bus].country) - set([country]) + )[0] + + logger.info( + f"overwriting country `{country}` of bus `{disconnected_bus}` " + f"to new country `{new_country}`, because it is disconnected " + "from its initial inter-country transmission grid." + ) + n.buses.at[disconnected_bus, "country"] = new_country + return n + + if algorithm == "hac": + feature = get_feature_for_hac(n, buses_i=n.buses.index, feature=feature) + n = fix_country_assignment_for_hac(n) + + if (algorithm != "hac") and (feature is not None): + logger.warning( + f"Keyword argument feature is only valid for algorithm `hac`. " + f"Given feature `{feature}` will be ignored." + ) n.determine_network_topology() - n_clusters = distribute_clusters(n, n_clusters, focus_weights=focus_weights, solver_name=solver_name) - - def reduce_network(n, buses): - nr = pypsa.Network() - nr.import_components_from_dataframe(buses, "Bus") - nr.import_components_from_dataframe(n.lines.loc[n.lines.bus0.isin(buses.index) & n.lines.bus1.isin(buses.index)], "Line") - return nr + n_clusters = distribute_clusters( + n, n_clusters, focus_weights=focus_weights, solver_name=solver_name + ) def busmap_for_country(x): - prefix = x.name[0] + x.name[1] + ' ' + prefix = x.name[0] + x.name[1] + " " logger.debug(f"Determining busmap for country {prefix[:-1]}") if len(x) == 1: - return pd.Series(prefix + '0', index=x.index) + return pd.Series(prefix + "0", index=x.index) weight = weighting_for_country(n, x) if algorithm == "kmeans": - return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds) - elif algorithm == "spectral": - return prefix + busmap_by_spectral_clustering(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) - elif algorithm == "louvain": - return prefix + busmap_by_louvain(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) + return prefix + busmap_by_kmeans( + n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds + ) + elif algorithm == "hac": + return prefix + busmap_by_hac( + n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index] + ) + elif algorithm == "modularity": + return prefix + busmap_by_greedy_modularity( + n, n_clusters[x.name], buses_i=x.index + ) else: - raise ValueError(f"`algorithm` must be one of 'kmeans', 'spectral' or 'louvain'. Is {algorithm}.") + raise ValueError( + f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}." + ) - return (n.buses.groupby(['country', 'sub_network'], group_keys=False) - .apply(busmap_for_country).squeeze().rename('busmap')) + return ( + n.buses.groupby(["country", "sub_network"], group_keys=False) + .apply(busmap_for_country) + .squeeze() + .rename("busmap") + ) -def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None, - line_length_factor=1.25, potential_mode='simple', solver_name="cbc", - algorithm="kmeans", extended_link_costs=0, focus_weights=None): +def clustering_for_n_clusters( + n, + n_clusters, + custom_busmap=False, + aggregate_carriers=None, + line_length_factor=1.25, + aggregation_strategies=dict(), + solver_name="cbc", + algorithm="hac", + feature=None, + extended_link_costs=0, + focus_weights=None, +): - if potential_mode == 'simple': - p_nom_max_strategy = pd.Series.sum - elif potential_mode == 'conservative': - p_nom_max_strategy = pd.Series.min - else: - raise AttributeError(f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'") + bus_strategies, generator_strategies = get_aggregation_strategies( + aggregation_strategies + ) if not isinstance(custom_busmap, pd.Series): - busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm) + busmap = busmap_for_n_clusters( + n, n_clusters, solver_name, focus_weights, algorithm, feature + ) else: busmap = custom_busmap clustering = get_clustering_from_busmap( - n, busmap, - bus_strategies=dict(country=_make_consense("Bus", "country")), + n, + busmap, + bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=line_length_factor, - generator_strategies={'p_nom_max': p_nom_max_strategy, 'p_nom_min': pd.Series.sum}, - scale_link_capital_costs=False) + generator_strategies=generator_strategies, + scale_link_capital_costs=False, + ) if not n.links.empty: nc = clustering.network - nc.links['underwater_fraction'] = (n.links.eval('underwater_fraction * length') - .div(nc.links.length).dropna()) - nc.links['capital_cost'] = (nc.links['capital_cost'] - .add((nc.links.length - n.links.length) - .clip(lower=0).mul(extended_link_costs), - fill_value=0)) + nc.links["underwater_fraction"] = ( + n.links.eval("underwater_fraction * length").div(nc.links.length).dropna() + ) + nc.links["capital_cost"] = nc.links["capital_cost"].add( + (nc.links.length - n.links.length).clip(lower=0).mul(extended_link_costs), + fill_value=0, + ) return clustering -def save_to_geojson(s, fn): - if os.path.exists(fn): - os.unlink(fn) - df = s.reset_index() - schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'} - df.to_file(fn, driver='GeoJSON', schema=schema) - - def cluster_regions(busmaps, input=None, output=None): - busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) - for which in ('regions_onshore', 'regions_offshore'): - regions = gpd.read_file(getattr(input, which)).set_index('name') - geom_c = regions.geometry.groupby(busmap).apply(shapely.ops.unary_union) - regions_c = gpd.GeoDataFrame(dict(geometry=geom_c)) - regions_c.index.name = 'name' - save_to_geojson(regions_c, getattr(output, which)) + for which in ("regions_onshore", "regions_offshore"): + regions = gpd.read_file(getattr(input, which)) + regions = regions.reindex(columns=["name", "geometry"]).set_index("name") + regions_c = regions.dissolve(busmap) + regions_c.index.name = "name" + regions_c = regions_c.reset_index() + regions_c.to_file(getattr(output, which)) def plot_busmap_for_n_clusters(n, n_clusters, fn=None): @@ -322,69 +452,112 @@ def plot_busmap_for_n_clusters(n, n_clusters, fn=None): cr = sns.color_palette("hls", len(cs)) n.plot(bus_colors=busmap.map(dict(zip(cs, cr)))) if fn is not None: - plt.savefig(fn, bbox_inches='tight') + plt.savefig(fn, bbox_inches="tight") del cs, cr if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('cluster_network', network='elec', simpl='', clusters='5') + + snakemake = mock_snakemake("cluster_network", simpl="", clusters="5") configure_logging(snakemake) n = pypsa.Network(snakemake.input.network) - focus_weights = snakemake.config.get('focus_weights', None) + focus_weights = snakemake.config.get("focus_weights", None) - renewable_carriers = pd.Index([tech - for tech in n.generators.carrier.unique() - if tech in snakemake.config['renewable']]) + renewable_carriers = pd.Index( + [ + tech + for tech in n.generators.carrier.unique() + if tech in snakemake.config["renewable"] + ] + ) - if snakemake.wildcards.clusters.endswith('m'): + exclude_carriers = snakemake.config["clustering"]["cluster_network"].get( + "exclude_carriers", [] + ) + aggregate_carriers = set(n.generators.carrier) - set(exclude_carriers) + if snakemake.wildcards.clusters.endswith("m"): n_clusters = int(snakemake.wildcards.clusters[:-1]) - aggregate_carriers = pd.Index(n.generators.carrier.unique()).difference(renewable_carriers) - elif snakemake.wildcards.clusters == 'all': + aggregate_carriers = snakemake.config["electricity"].get( + "conventional_carriers" + ) + elif snakemake.wildcards.clusters == "all": n_clusters = len(n.buses) - aggregate_carriers = None # All else: n_clusters = int(snakemake.wildcards.clusters) - aggregate_carriers = None # All if n_clusters == len(n.buses): # Fast-path if no clustering is necessary busmap = n.buses.index.to_series() linemap = n.lines.index.to_series() - clustering = pypsa.networkclustering.Clustering(n, busmap, linemap, linemap, pd.Series(dtype='O')) + clustering = pypsa.networkclustering.Clustering( + n, busmap, linemap, linemap, pd.Series(dtype="O") + ) else: - line_length_factor = snakemake.config['lines']['length_factor'] - Nyears = n.snapshot_weightings.objective.sum()/8760 + line_length_factor = snakemake.config["lines"]["length_factor"] + Nyears = n.snapshot_weightings.objective.sum() / 8760 - hvac_overhead_cost = (load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears) - .at['HVAC overhead', 'capital_cost']) + hvac_overhead_cost = load_costs( + snakemake.input.tech_costs, + snakemake.config["costs"], + snakemake.config["electricity"], + Nyears, + ).at["HVAC overhead", "capital_cost"] def consense(x): v = x.iat[0] - assert ((x == v).all() or x.isnull().all()), ( - "The `potential` configuration option must agree for all renewable carriers, for now!" - ) + assert ( + x == v + ).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!" return v - potential_mode = consense(pd.Series([snakemake.config['renewable'][tech]['potential'] - for tech in renewable_carriers])) + + aggregation_strategies = snakemake.config["clustering"].get( + "aggregation_strategies", {} + ) + # translate str entries of aggregation_strategies to pd.Series functions: + aggregation_strategies = { + p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} + for p in aggregation_strategies.keys() + } + custom_busmap = snakemake.config["enable"].get("custom_busmap", False) if custom_busmap: - custom_busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True) + custom_busmap = pd.read_csv( + snakemake.input.custom_busmap, index_col=0, squeeze=True + ) custom_busmap.index = custom_busmap.index.astype(str) logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}") - clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers, - line_length_factor, potential_mode, - snakemake.config['solving']['solver']['name'], - "kmeans", hvac_overhead_cost, focus_weights) + cluster_config = snakemake.config.get("clustering", {}).get( + "cluster_network", {} + ) + clustering = clustering_for_n_clusters( + n, + n_clusters, + custom_busmap, + aggregate_carriers, + line_length_factor, + aggregation_strategies, + snakemake.config["solving"]["solver"]["name"], + cluster_config.get("algorithm", "hac"), + cluster_config.get("feature", "solar+onwind-time"), + hvac_overhead_cost, + focus_weights, + ) - update_p_nom_max(n) - + update_p_nom_max(clustering.network) + + clustering.network.meta = dict( + snakemake.config, **dict(wildcards=dict(snakemake.wildcards)) + ) clustering.network.export_to_netcdf(snakemake.output.network) - for attr in ('busmap', 'linemap'): #also available: linemap_positive, linemap_negative + for attr in ( + "busmap", + "linemap", + ): # also available: linemap_positive, linemap_negative getattr(clustering, attr).to_csv(snakemake.output[attr]) cluster_regions((clustering.busmap,), snakemake.input, snakemake.output) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index 854e9463..0d12a04c 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -1,4 +1,5 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -11,8 +12,9 @@ Relevant Settings .. code:: yaml costs: - USD2013_to_EUR2013: - discountrate: + year: + version: + fill_values: marginal_cost: capital_cost: @@ -32,9 +34,9 @@ Outputs Description ----------- -The following rule can be used to summarize the results in seperate .csv files: +The following rule can be used to summarize the results in separate .csv files: -.. code:: +.. code:: bash snakemake results/summaries/elec_s_all_lall_Co2L-3H_all clusters @@ -45,28 +47,26 @@ The following rule can be used to summarize the results in seperate .csv files: the line volume/cost cap field can be set to one of the following: * ``lv1.25`` for a particular line volume extension by 25% * ``lc1.25`` for a line cost extension by 25 % -* ``lall`` for all evalutated caps +* ``lall`` for all evaluated caps * ``lvall`` for all line volume caps * ``lcall`` for all line cost caps Replacing '/summaries/' with '/plots/' creates nice colored maps of the results. - """ import logging -from _helpers import configure_logging - import os -import pypsa -import pandas as pd +import pandas as pd +import pypsa +from _helpers import configure_logging from add_electricity import load_costs, update_transmission_costs idx = pd.IndexSlice logger = logging.getLogger(__name__) -opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} +opt_name = {"Store": "e", "Line": "s", "Transformer": "s"} def _add_indexed_rows(df, raw_index): @@ -78,102 +78,149 @@ def _add_indexed_rows(df, raw_index): def assign_carriers(n): - if "carrier" not in n.loads: n.loads["carrier"] = "electricity" - for carrier in ["transport","heat","urban heat"]: - n.loads.loc[n.loads.index.str.contains(carrier),"carrier"] = carrier + for carrier in ["transport", "heat", "urban heat"]: + n.loads.loc[n.loads.index.str.contains(carrier), "carrier"] = carrier - n.storage_units['carrier'].replace({'hydro': 'hydro+PHS', 'PHS': 'hydro+PHS'}, inplace=True) + n.storage_units["carrier"].replace( + {"hydro": "hydro+PHS", "PHS": "hydro+PHS"}, inplace=True + ) if "carrier" not in n.lines: n.lines["carrier"] = "AC" n.lines["carrier"].replace({"AC": "lines"}, inplace=True) - if n.links.empty: n.links["carrier"] = pd.Series(dtype=str) + if n.links.empty: + n.links["carrier"] = pd.Series(dtype=str) n.links["carrier"].replace({"DC": "lines"}, inplace=True) - if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "": - n.stores.loc["EU gas Store","carrier"] = "gas Store" + if ( + "EU gas store" in n.stores.index + and n.stores.loc["EU gas Store", "carrier"] == "" + ): + n.stores.loc["EU gas Store", "carrier"] = "gas Store" def calculate_costs(n, label, costs): - - for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): - capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"] capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum() # Index tuple(s) indicating the newly to-be-added row(s) - raw_index = tuple([[c.list_name],["capital"],list(capital_costs_grouped.index)]) + raw_index = tuple( + [[c.list_name], ["capital"], list(capital_costs_grouped.index)] + ) costs = _add_indexed_rows(costs, raw_index) - costs.loc[idx[raw_index],label] = capital_costs_grouped.values + costs.loc[idx[raw_index], label] = capital_costs_grouped.values if c.name == "Link": - p = c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum() + p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum() elif c.name == "Line": continue elif c.name == "StorageUnit": - p_all = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0) - p_all[p_all < 0.] = 0. + p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) + p_all[p_all < 0.0] = 0.0 p = p_all.sum() else: - p = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum() + p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum() - marginal_costs = p*c.df.marginal_cost + marginal_costs = p * c.df.marginal_cost marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum() - costs = costs.reindex(costs.index.union(pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index]))) + costs = costs.reindex( + costs.index.union( + pd.MultiIndex.from_product( + [[c.list_name], ["marginal"], marginal_costs_grouped.index] + ) + ) + ) - costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values + costs.loc[ + idx[c.list_name, "marginal", list(marginal_costs_grouped.index)], label + ] = marginal_costs_grouped.values return costs -def calculate_curtailment(n, label, curtailment): - avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum() +def calculate_curtailment(n, label, curtailment): + avail = ( + n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt) + .sum() + .groupby(n.generators.carrier) + .sum() + ) used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() - curtailment[label] = (((avail - used)/avail)*100).round(3) + curtailment[label] = (((avail - used) / avail) * 100).round(3) return curtailment + def calculate_energy(n, label, energy): + for c in n.iterate_components(n.one_port_components | n.branch_components): - for c in n.iterate_components(n.one_port_components|n.branch_components): - - if c.name in {'Generator', 'Load', 'ShuntImpedance'}: - c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() - elif c.name in {'StorageUnit', 'Store'}: - c_energies = c.pnl.p.multiply(n.snapshot_weightings.stores,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() + if c.name in {"Generator", "Load", "ShuntImpedance"}: + c_energies = ( + c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) + .sum() + .multiply(c.df.sign) + .groupby(c.df.carrier) + .sum() + ) + elif c.name in {"StorageUnit", "Store"}: + c_energies = ( + c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0) + .sum() + .multiply(c.df.sign) + .groupby(c.df.carrier) + .sum() + ) else: - c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings.generators,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum()).groupby(c.df.carrier).sum() + c_energies = ( + ( + -c.pnl.p1.multiply(n.snapshot_weightings.generators, axis=0).sum() + - c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum() + ) + .groupby(c.df.carrier) + .sum() + ) energy = include_in_summary(energy, [c.list_name], label, c_energies) return energy -def include_in_summary(summary, multiindexprefix, label, item): +def include_in_summary(summary, multiindexprefix, label, item): # Index tuple(s) indicating the newly to-be-added row(s) - raw_index = tuple([multiindexprefix,list(item.index)]) + raw_index = tuple([multiindexprefix, list(item.index)]) summary = _add_indexed_rows(summary, raw_index) summary.loc[idx[raw_index], label] = item.values return summary -def calculate_capacity(n,label,capacity): +def calculate_capacity(n, label, capacity): for c in n.iterate_components(n.one_port_components): - if 'p_nom_opt' in c.df.columns: - c_capacities = abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() + if "p_nom_opt" in c.df.columns: + c_capacities = ( + abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() + ) + capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) + elif "e_nom_opt" in c.df.columns: + c_capacities = ( + abs(c.df.e_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() + ) capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) for c in n.iterate_components(n.passive_branch_components): - c_capacities = c.df['s_nom_opt'].groupby(c.df.carrier).sum() + c_capacities = c.df["s_nom_opt"].groupby(c.df.carrier).sum() capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) for c in n.iterate_components(n.controllable_branch_components): @@ -182,16 +229,20 @@ def calculate_capacity(n,label,capacity): return capacity -def calculate_supply(n, label, supply): - """calculate the max dispatch of each component at the buses where the loads are attached""" - load_types = n.loads.carrier.value_counts().index +def calculate_supply(n, label, supply): + """ + calculate the max dispatch of each component at the buses where the loads + are attached. + """ + + load_types = n.buses.carrier.unique() for i in load_types: - buses = n.loads.bus[n.loads.carrier == i].values + buses = n.buses.query("carrier == @i").index - bus_map = pd.Series(False,index=n.buses.index) + bus_map = pd.Series(False, index=n.buses.index) bus_map.loc[buses] = True @@ -202,43 +253,57 @@ def calculate_supply(n, label, supply): if len(items) == 0 or c.pnl.p.empty: continue - s = c.pnl.p[items].max().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum() + s = ( + c.pnl.p[items] + .max() + .multiply(c.df.loc[items, "sign"]) + .groupby(c.df.loc[items, "carrier"]) + .sum() + ) # Index tuple(s) indicating the newly to-be-added row(s) - raw_index = tuple([[i],[c.list_name],list(s.index)]) + raw_index = tuple([[i], [c.list_name], list(s.index)]) supply = _add_indexed_rows(supply, raw_index) - supply.loc[idx[raw_index],label] = s.values - + supply.loc[idx[raw_index], label] = s.values for c in n.iterate_components(n.branch_components): - for end in ["0","1"]: + for end in ["0", "1"]: items = c.df.index[c.df["bus" + end].map(bus_map)] - if len(items) == 0 or c.pnl["p"+end].empty: + if len(items) == 0 or c.pnl["p" + end].empty: continue - #lots of sign compensation for direction and to do maximums - s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum() + # lots of sign compensation for direction and to do maximums + s = (-1) ** (1 - int(end)) * ( + (-1) ** int(end) * c.pnl["p" + end][items] + ).max().groupby(c.df.loc[items, "carrier"]).sum() - supply = supply.reindex(supply.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) - supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values + supply = supply.reindex( + supply.index.union( + pd.MultiIndex.from_product([[i], [c.list_name], s.index]) + ) + ) + supply.loc[idx[i, c.list_name, list(s.index)], label] = s.values return supply def calculate_supply_energy(n, label, supply_energy): - """calculate the total dispatch of each component at the buses where the loads are attached""" + """ + calculate the total dispatch of each component at the buses where the loads + are attached. + """ - load_types = n.loads.carrier.value_counts().index + load_types = n.buses.carrier.unique() for i in load_types: - buses = n.loads.bus[n.loads.carrier == i].values + buses = n.buses.query("carrier == @i").index - bus_map = pd.Series(False,index=n.buses.index) + bus_map = pd.Series(False, index=n.buses.index) bus_map.loc[buses] = True @@ -249,55 +314,83 @@ def calculate_supply_energy(n, label, supply_energy): if len(items) == 0 or c.pnl.p.empty: continue - s = c.pnl.p[items].sum().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum() + s = ( + c.pnl.p[items] + .sum() + .multiply(c.df.loc[items, "sign"]) + .groupby(c.df.loc[items, "carrier"]) + .sum() + ) # Index tuple(s) indicating the newly to-be-added row(s) - raw_index = tuple([[i],[c.list_name],list(s.index)]) + raw_index = tuple([[i], [c.list_name], list(s.index)]) supply_energy = _add_indexed_rows(supply_energy, raw_index) - supply_energy.loc[idx[raw_index],label] = s.values - + supply_energy.loc[idx[raw_index], label] = s.values for c in n.iterate_components(n.branch_components): - for end in ["0","1"]: + for end in ["0", "1"]: items = c.df.index[c.df["bus" + end].map(bus_map)] - if len(items) == 0 or c.pnl['p' + end].empty: + if len(items) == 0 or c.pnl["p" + end].empty: continue - s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum() + s = (-1) * c.pnl["p" + end][items].sum().groupby( + c.df.loc[items, "carrier"] + ).sum() - supply_energy = supply_energy.reindex(supply_energy.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) - supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values + supply_energy = supply_energy.reindex( + supply_energy.index.union( + pd.MultiIndex.from_product([[i], [c.list_name], s.index]) + ) + ) + supply_energy.loc[idx[i, c.list_name, list(s.index)], label] = s.values return supply_energy -def calculate_metrics(n,label,metrics): +def calculate_metrics(n, label, metrics): + metrics = metrics.reindex( + metrics.index.union( + pd.Index( + [ + "line_volume", + "line_volume_limit", + "line_volume_AC", + "line_volume_DC", + "line_volume_shadow", + "co2_shadow", + ] + ) + ) + ) - metrics = metrics.reindex(metrics.index.union(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]))) + metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[ + n.links.carrier == "DC" + ].sum() + metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum() + metrics.at["line_volume", label] = metrics.loc[ + ["line_volume_AC", "line_volume_DC"], label + ].sum() - metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum() - metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum() - metrics.at["line_volume",label] = metrics.loc[["line_volume_AC","line_volume_DC"],label].sum() + if hasattr(n, "line_volume_limit"): + metrics.at["line_volume_limit", label] = n.line_volume_limit - if hasattr(n,"line_volume_limit"): - metrics.at["line_volume_limit",label] = n.line_volume_limit - - if hasattr(n,"line_volume_limit_dual"): - metrics.at["line_volume_shadow",label] = n.line_volume_limit_dual + if hasattr(n, "line_volume_limit_dual"): + metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual if "CO2Limit" in n.global_constraints.index: - metrics.at["co2_shadow",label] = n.global_constraints.at["CO2Limit","mu"] + metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"] return metrics -def calculate_prices(n,label,prices): - - bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity") +def calculate_prices(n, label, prices): + bus_type = pd.Series(n.buses.index.str[3:], n.buses.index).replace( + "", "electricity" + ) prices = prices.reindex(prices.index.union(bus_type.value_counts().index)) @@ -307,19 +400,37 @@ def calculate_prices(n,label,prices): return prices -def calculate_weighted_prices(n,label,weighted_prices): - +def calculate_weighted_prices(n, label, weighted_prices): logger.warning("Weighted prices don't include storage units as loads") - weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"])) + weighted_prices = weighted_prices.reindex( + pd.Index( + [ + "electricity", + "heat", + "space heat", + "urban heat", + "space urban heat", + "gas", + "H2", + ] + ) + ) - link_loads = {"electricity" : ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"], - "heat" : ["water tanks charger"], - "urban heat" : ["water tanks charger"], - "space heat" : [], - "space urban heat" : [], - "gas" : ["OCGT","gas boiler","CHP electric","CHP heat"], - "H2" : ["Sabatier", "H2 Fuel Cell"]} + link_loads = { + "electricity": [ + "heat pump", + "resistive heater", + "battery charger", + "H2 Electrolysis", + ], + "heat": ["water tanks charger"], + "urban heat": ["water tanks charger"], + "space heat": [], + "space urban heat": [], + "gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"], + "H2": ["Sabatier", "H2 Fuel Cell"], + } for carrier in link_loads: @@ -328,64 +439,77 @@ def calculate_weighted_prices(n,label,weighted_prices): elif carrier[:5] == "space": suffix = carrier[5:] else: - suffix = " " + carrier + suffix = " " + carrier buses = n.buses.index[n.buses.index.str[2:] == suffix] if buses.empty: continue - if carrier in ["H2","gas"]: - load = pd.DataFrame(index=n.snapshots,columns=buses,data=0.) + if carrier in ["H2", "gas"]: + load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0) elif carrier[:5] == "space": - load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix) + load = heat_demand_df[buses.str[:2]].rename( + columns=lambda i: str(i) + suffix + ) else: load = n.loads_t.p_set[buses] - for tech in link_loads[carrier]: - names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech] + names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] if names.empty: continue - load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1) + load += ( + n.links_t.p0[names] + .groupby(n.links.loc[names, "bus0"], axis=1) + .sum(axis=1) + ) # Add H2 Store when charging if carrier == "H2": - stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1) - stores[stores > 0.] = 0. + stores = ( + n.stores_t.p[buses + " Store"] + .groupby(n.stores.loc[buses + " Store", "bus"], axis=1) + .sum(axis=1) + ) + stores[stores > 0.0] = 0.0 load += -stores - weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum() + weighted_prices.loc[carrier, label] = ( + load * n.buses_t.marginal_price[buses] + ).sum().sum() / load.sum().sum() if carrier[:5] == "space": - print(load*n.buses_t.marginal_price[buses]) + print(load * n.buses_t.marginal_price[buses]) return weighted_prices -outputs = ["costs", - "curtailment", - "energy", - "capacity", - "supply", - "supply_energy", - "prices", - "weighted_prices", - "metrics", - ] +outputs = [ + "costs", + "curtailment", + "energy", + "capacity", + "supply", + "supply_energy", + "prices", + "weighted_prices", + "metrics", +] -def make_summaries(networks_dict, paths, config, country='all'): - - columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"]) +def make_summaries(networks_dict, paths, config, country="all"): + columns = pd.MultiIndex.from_tuples( + networks_dict.keys(), names=["simpl", "clusters", "ll", "opts"] + ) dfs = {} for output in outputs: - dfs[output] = pd.DataFrame(columns=columns,dtype=float) + dfs[output] = pd.DataFrame(columns=columns, dtype=float) for label, filename in networks_dict.items(): print(label, filename) @@ -399,12 +523,12 @@ def make_summaries(networks_dict, paths, config, country='all'): logger.warning("Skipping {filename}".format(filename=filename)) continue - if country != 'all': + if country != "all": n = n[n.buses.country == country] - Nyears = n.snapshot_weightings.objective.sum() / 8760. - costs = load_costs(paths[0], config['costs'], config['electricity'], Nyears) - update_transmission_costs(n, costs, simple_hvdc_costs=False) + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs(paths[0], config["costs"], config["electricity"], Nyears) + update_transmission_costs(n, costs) assign_carriers(n) @@ -421,13 +545,24 @@ def to_csv(dfs, dir): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('make_summary', network='elec', simpl='', - clusters='5', ll='copt', opts='Co2L-24H', country='all') - network_dir = os.path.join('..', 'results', 'networks') + + snakemake = mock_snakemake( + "make_summary", + simpl="", + clusters="5", + ll="copt", + opts="Co2L-24H", + country="all", + ) + network_dir = os.path.join( + "..", "results", "networks", snakemake.config["run"]["name"] + ) else: - network_dir = os.path.join('results', 'networks') + network_dir = os.path.join( + "results", "networks", snakemake.config["run"]["name"] + ) configure_logging(snakemake) config = snakemake.config @@ -444,14 +579,18 @@ if __name__ == "__main__": else: ll = [wildcards.ll] - networks_dict = {(simpl,clusters,l,opts) : - os.path.join(network_dir, f'elec_s{simpl}_' - f'{clusters}_ec_l{l}_{opts}.nc') - for simpl in expand_from_wildcard("simpl", config) - for clusters in expand_from_wildcard("clusters", config) - for l in ll - for opts in expand_from_wildcard("opts", config)} + networks_dict = { + (simpl, clusters, l, opts): os.path.join( + network_dir, f"elec_s{simpl}_" f"{clusters}_ec_l{l}_{opts}.nc" + ) + for simpl in expand_from_wildcard("simpl", config) + for clusters in expand_from_wildcard("clusters", config) + for l in ll + for opts in expand_from_wildcard("opts", config) + } - dfs = make_summaries(networks_dict, snakemake.input, config, country=wildcards.country) + dfs = make_summaries( + networks_dict, snakemake.input, config, country=wildcards.country + ) to_csv(dfs, snakemake.output[0]) diff --git a/scripts/plot_network.py b/scripts/plot_network.py index 645c8c39..ff4d34cc 100755 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -1,4 +1,5 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -16,21 +17,24 @@ Outputs Description ----------- - """ import logging -from _helpers import (retrieve_snakemake_keys, load_network_for_plots, - aggregate_p, aggregate_costs, configure_logging) - -import pandas as pd -import numpy as np import cartopy.crs as ccrs -import matplotlib.pyplot as plt import matplotlib as mpl -from matplotlib.patches import Circle, Ellipse +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from _helpers import ( + aggregate_costs, + aggregate_p, + configure_logging, + load_network_for_plots, +) from matplotlib.legend_handler import HandlerPatch +from matplotlib.patches import Circle, Ellipse + to_rgba = mpl.colors.colorConverter.to_rgba logger = logging.getLogger(__name__) @@ -38,253 +42,368 @@ logger = logging.getLogger(__name__) def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): fig = ax.get_figure() + def axes2pt(): - return np.diff(ax.transData.transform([(0,0), (1,1)]), axis=0)[0] * (72./fig.dpi) + return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * ( + 72.0 / fig.dpi + ) ellipses = [] if not dont_resize_actively: + def update_width_height(event): dist = axes2pt() - for e, radius in ellipses: e.width, e.height = 2. * radius * dist - fig.canvas.mpl_connect('resize_event', update_width_height) - ax.callbacks.connect('xlim_changed', update_width_height) - ax.callbacks.connect('ylim_changed', update_width_height) + for e, radius in ellipses: + e.width, e.height = 2.0 * radius * dist - def legend_circle_handler(legend, orig_handle, xdescent, ydescent, - width, height, fontsize): - w, h = 2. * orig_handle.get_radius() * axes2pt() - e = Ellipse(xy=(0.5*width-0.5*xdescent, 0.5*height-0.5*ydescent), width=w, height=w) + fig.canvas.mpl_connect("resize_event", update_width_height) + ax.callbacks.connect("xlim_changed", update_width_height) + ax.callbacks.connect("ylim_changed", update_width_height) + + def legend_circle_handler( + legend, orig_handle, xdescent, ydescent, width, height, fontsize + ): + w, h = 2.0 * orig_handle.get_radius() * axes2pt() + e = Ellipse( + xy=(0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent), + width=w, + height=w, + ) ellipses.append((e, orig_handle.get_radius())) return e + return {Circle: HandlerPatch(patch_func=legend_circle_handler)} def make_legend_circles_for(sizes, scale=1.0, **kw): - return [Circle((0,0), radius=(s/scale)**0.5, **kw) for s in sizes] + return [Circle((0, 0), radius=(s / scale) ** 0.5, **kw) for s in sizes] def set_plot_style(): - plt.style.use(['classic', 'seaborn-white', - {'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6', - 'hatch.color': 'white', - 'patch.linewidth': 0.5, - 'font.size': 12, - 'legend.fontsize': 'medium', - 'lines.linewidth': 1.5, - 'pdf.fonttype': 42, - }]) + plt.style.use( + [ + "classic", + "seaborn-white", + { + "axes.grid": False, + "grid.linestyle": "--", + "grid.color": "0.6", + "hatch.color": "white", + "patch.linewidth": 0.5, + "font.size": 12, + "legend.fontsize": "medium", + "lines.linewidth": 1.5, + "pdf.fonttype": 42, + }, + ] + ) -def plot_map(n, ax=None, attribute='p_nom', opts={}): +def plot_map(n, opts, ax=None, attribute="p_nom"): if ax is None: ax = plt.gca() ## DATA - line_colors = {'cur': "purple", - 'exp': mpl.colors.rgb2hex(to_rgba("red", 0.7), True)} - tech_colors = opts['tech_colors'] + line_colors = { + "cur": "purple", + "exp": mpl.colors.rgb2hex(to_rgba("red", 0.7), True), + } + tech_colors = opts["tech_colors"] - if attribute == 'p_nom': + if attribute == "p_nom": # bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum() - bus_sizes = pd.concat((n.generators.query('carrier != "load"').groupby(['bus', 'carrier']).p_nom_opt.sum(), - n.storage_units.groupby(['bus', 'carrier']).p_nom_opt.sum())) + bus_sizes = pd.concat( + ( + n.generators.query('carrier != "load"') + .groupby(["bus", "carrier"]) + .p_nom_opt.sum(), + n.storage_units.groupby(["bus", "carrier"]).p_nom_opt.sum(), + ) + ) line_widths_exp = n.lines.s_nom_opt line_widths_cur = n.lines.s_nom_min link_widths_exp = n.links.p_nom_opt link_widths_cur = n.links.p_nom_min else: - raise 'plotting of {} has not been implemented yet'.format(attribute) + raise "plotting of {} has not been implemented yet".format(attribute) - - line_colors_with_alpha = \ - ((line_widths_cur / n.lines.s_nom > 1e-3) - .map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)})) - link_colors_with_alpha = \ - ((link_widths_cur / n.links.p_nom > 1e-3) - .map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)})) - + line_colors_with_alpha = (line_widths_cur / n.lines.s_nom > 1e-3).map( + {True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)} + ) + link_colors_with_alpha = (link_widths_cur / n.links.p_nom > 1e-3).map( + {True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)} + ) ## FORMAT - linewidth_factor = opts['map'][attribute]['linewidth_factor'] - bus_size_factor = opts['map'][attribute]['bus_size_factor'] + linewidth_factor = opts["map"][attribute]["linewidth_factor"] + bus_size_factor = opts["map"][attribute]["bus_size_factor"] ## PLOT - n.plot(line_widths=line_widths_exp/linewidth_factor, - link_widths=link_widths_exp/linewidth_factor, - line_colors=line_colors['exp'], - link_colors=line_colors['exp'], - bus_sizes=bus_sizes/bus_size_factor, - bus_colors=tech_colors, - boundaries=map_boundaries, - color_geomap=True, geomap=True, - ax=ax) - n.plot(line_widths=line_widths_cur/linewidth_factor, - link_widths=link_widths_cur/linewidth_factor, - line_colors=line_colors_with_alpha, - link_colors=link_colors_with_alpha, - bus_sizes=0, - boundaries=map_boundaries, - color_geomap=True, geomap=False, - ax=ax) - ax.set_aspect('equal') - ax.axis('off') + n.plot( + line_widths=line_widths_exp / linewidth_factor, + link_widths=link_widths_exp / linewidth_factor, + line_colors=line_colors["exp"], + link_colors=line_colors["exp"], + bus_sizes=bus_sizes / bus_size_factor, + bus_colors=tech_colors, + boundaries=map_boundaries, + color_geomap=True, + geomap=True, + ax=ax, + ) + n.plot( + line_widths=line_widths_cur / linewidth_factor, + link_widths=link_widths_cur / linewidth_factor, + line_colors=line_colors_with_alpha, + link_colors=link_colors_with_alpha, + bus_sizes=0, + boundaries=map_boundaries, + color_geomap=True, + geomap=True, + ax=ax, + ) + ax.set_aspect("equal") + ax.axis("off") # Rasterize basemap # TODO : Check if this also works with cartopy - for c in ax.collections[:2]: c.set_rasterized(True) + for c in ax.collections[:2]: + c.set_rasterized(True) # LEGEND handles = [] labels = [] for s in (10, 1): - handles.append(plt.Line2D([0],[0],color=line_colors['exp'], - linewidth=s*1e3/linewidth_factor)) + handles.append( + plt.Line2D( + [0], [0], color=line_colors["exp"], linewidth=s * 1e3 / linewidth_factor + ) + ) labels.append("{} GW".format(s)) - l1_1 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.24, 1.01), - frameon=False, - labelspacing=0.8, handletextpad=1.5, - title='Transmission Exp./Exist. ') + l1_1 = ax.legend( + handles, + labels, + loc="upper left", + bbox_to_anchor=(0.24, 1.01), + frameon=False, + labelspacing=0.8, + handletextpad=1.5, + title="Transmission Exp./Exist. ", + ) ax.add_artist(l1_1) handles = [] labels = [] for s in (10, 5): - handles.append(plt.Line2D([0],[0],color=line_colors['cur'], - linewidth=s*1e3/linewidth_factor)) + handles.append( + plt.Line2D( + [0], [0], color=line_colors["cur"], linewidth=s * 1e3 / linewidth_factor + ) + ) labels.append("/") - l1_2 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.26, 1.01), - frameon=False, - labelspacing=0.8, handletextpad=0.5, - title=' ') + l1_2 = ax.legend( + handles, + labels, + loc="upper left", + bbox_to_anchor=(0.26, 1.01), + frameon=False, + labelspacing=0.8, + handletextpad=0.5, + title=" ", + ) ax.add_artist(l1_2) - handles = make_legend_circles_for([10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w") + handles = make_legend_circles_for( + [10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w" + ) labels = ["{} GW".format(s) for s in (10, 5, 3)] - l2 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.01, 1.01), - frameon=False, labelspacing=1.0, - title='Generation', - handler_map=make_handler_map_to_scale_circles_as_in(ax)) + l2 = ax.legend( + handles, + labels, + loc="upper left", + bbox_to_anchor=(0.01, 1.01), + frameon=False, + labelspacing=1.0, + title="Generation", + handler_map=make_handler_map_to_scale_circles_as_in(ax), + ) ax.add_artist(l2) - techs = (bus_sizes.index.levels[1]).intersection(pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs'])) + techs = (bus_sizes.index.levels[1]).intersection( + pd.Index(opts["vre_techs"] + opts["conv_techs"] + opts["storage_techs"]) + ) handles = [] labels = [] for t in techs: - handles.append(plt.Line2D([0], [0], color=tech_colors[t], marker='o', markersize=8, linewidth=0)) - labels.append(opts['nice_names'].get(t, t)) - l3 = ax.legend(handles, labels, loc="upper center", bbox_to_anchor=(0.5, -0.), # bbox_to_anchor=(0.72, -0.05), - handletextpad=0., columnspacing=0.5, ncol=4, title='Technology') + handles.append( + plt.Line2D( + [0], [0], color=tech_colors[t], marker="o", markersize=8, linewidth=0 + ) + ) + labels.append(opts["nice_names"].get(t, t)) + l3 = ax.legend( + handles, + labels, + loc="upper center", + bbox_to_anchor=(0.5, -0.0), # bbox_to_anchor=(0.72, -0.05), + handletextpad=0.0, + columnspacing=0.5, + ncol=4, + title="Technology", + ) return fig -def plot_total_energy_pie(n, ax=None): - if ax is None: ax = plt.gca() +def plot_total_energy_pie(n, opts, ax=None): + if ax is None: + ax = plt.gca() - ax.set_title('Energy per technology', fontdict=dict(fontsize="medium")) + ax.set_title("Energy per technology", fontdict=dict(fontsize="medium")) - e_primary = aggregate_p(n).drop('load', errors='ignore').loc[lambda s: s>0] + e_primary = aggregate_p(n).drop("load", errors="ignore").loc[lambda s: s > 0] - patches, texts, autotexts = ax.pie(e_primary, + patches, texts, autotexts = ax.pie( + e_primary, startangle=90, - labels = e_primary.rename(opts['nice_names']).index, - autopct='%.0f%%', + labels=e_primary.rename(opts["nice_names"]).index, + autopct="%.0f%%", shadow=False, - colors = [opts['tech_colors'][tech] for tech in e_primary.index]) + colors=[opts["tech_colors"][tech] for tech in e_primary.index], + ) for t1, t2, i in zip(texts, autotexts, e_primary.index): if e_primary.at[i] < 0.04 * e_primary.sum(): t1.remove() t2.remove() -def plot_total_cost_bar(n, ax=None): - if ax is None: ax = plt.gca() + +def plot_total_cost_bar(n, opts, ax=None): + if ax is None: + ax = plt.gca() total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum() - tech_colors = opts['tech_colors'] + tech_colors = opts["tech_colors"] def split_costs(n): costs = aggregate_costs(n).reset_index(level=0, drop=True) - costs_ex = aggregate_costs(n, existing_only=True).reset_index(level=0, drop=True) - return (costs['capital'].add(costs['marginal'], fill_value=0.), - costs_ex['capital'], costs['capital'] - costs_ex['capital'], costs['marginal']) + costs_ex = aggregate_costs(n, existing_only=True).reset_index( + level=0, drop=True + ) + return ( + costs["capital"].add(costs["marginal"], fill_value=0.0), + costs_ex["capital"], + costs["capital"] - costs_ex["capital"], + costs["marginal"], + ) costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n) - costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore')), - index=['AC-AC', 'AC line', 'onwind', 'offwind-ac', - 'offwind-dc', 'solar', 'OCGT','CCGT', 'battery', 'H2']).dropna() - bottom = np.array([0., 0.]) + costs_graph = pd.DataFrame( + dict(a=costs.drop("load", errors="ignore")), + index=[ + "AC-AC", + "AC line", + "onwind", + "offwind-ac", + "offwind-dc", + "solar", + "OCGT", + "CCGT", + "battery", + "H2", + ], + ).dropna() + bottom = np.array([0.0, 0.0]) texts = [] - for i,ind in enumerate(costs_graph.index): - data = np.asarray(costs_graph.loc[ind])/total_load - ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], - width=0.7, zorder=-1) + for i, ind in enumerate(costs_graph.index): + data = np.asarray(costs_graph.loc[ind]) / total_load + ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], width=0.7, zorder=-1) bottom_sub = bottom - bottom = bottom+data + bottom = bottom + data - if ind in opts['conv_techs'] + ['AC line']: + if ind in opts["conv_techs"] + ["AC line"]: for c in [costs_cap_ex, costs_marg]: if ind in c: - data_sub = np.asarray([c.loc[ind]])/total_load - ax.bar([0.5], data_sub, linewidth=0, - bottom=bottom_sub, color=tech_colors[ind], - width=0.7, zorder=-1, alpha=0.8) + data_sub = np.asarray([c.loc[ind]]) / total_load + ax.bar( + [0.5], + data_sub, + linewidth=0, + bottom=bottom_sub, + color=tech_colors[ind], + width=0.7, + zorder=-1, + alpha=0.8, + ) bottom_sub += data_sub if abs(data[-1]) < 5: continue - text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind)) + text = ax.text( + 1.1, (bottom - 0.5 * data)[-1] - 3, opts["nice_names"].get(ind, ind) + ) texts.append(text) ax.set_ylabel("Average system cost [Eur/MWh]") - ax.set_ylim([0, opts.get('costs_max', 80)]) + ax.set_ylim([0, opts.get("costs_max", 80)]) ax.set_xlim([0, 1]) ax.set_xticklabels([]) - ax.grid(True, axis="y", color='k', linestyle='dotted') + ax.grid(True, axis="y", color="k", linestyle="dotted") if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('plot_network', network='elec', simpl='', - clusters='5', ll='copt', opts='Co2L-24H', - attr='p_nom', ext="pdf") + + snakemake = mock_snakemake( + "plot_network", + simpl="", + clusters="5", + ll="copt", + opts="Co2L-24H", + attr="p_nom", + ext="pdf", + ) configure_logging(snakemake) set_plot_style() - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) + config, wildcards = snakemake.config, snakemake.wildcards - map_figsize = config['map']['figsize'] - map_boundaries = config['map']['boundaries'] + map_figsize = config["plotting"]["map"]["figsize"] + map_boundaries = config["plotting"]["map"]["boundaries"] - n = load_network_for_plots(paths.network, paths.tech_costs, config) + n = load_network_for_plots( + snakemake.input.network, snakemake.input.tech_costs, config + ) - scenario_opts = wildcards.opts.split('-') + scenario_opts = wildcards.opts.split("-") - fig, ax = plt.subplots(figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()}) - plot_map(n, ax, wildcards.attr, config) + fig, ax = plt.subplots( + figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()} + ) + plot_map(n, config["plotting"], ax=ax, attribute=wildcards.attr) - fig.savefig(out.only_map, dpi=150, bbox_inches='tight') + fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches="tight") ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2]) - plot_total_energy_pie(n, ax1) + plot_total_energy_pie(n, config["plotting"], ax=ax1) ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45]) - plot_total_cost_bar(n, ax2) + plot_total_cost_bar(n, config["plotting"], ax=ax2) ll = wildcards.ll ll_type = ll[0] ll_factor = ll[1:] - lbl = dict(c='line cost', v='line volume')[ll_type] - amnt = '{ll} x today\'s'.format(ll=ll_factor) if ll_factor != 'opt' else 'optimal' - fig.suptitle('Expansion to {amount} {label} at {clusters} clusters' - .format(amount=amnt, label=lbl, clusters=wildcards.clusters)) + lbl = dict(c="line cost", v="line volume")[ll_type] + amnt = "{ll} x today's".format(ll=ll_factor) if ll_factor != "opt" else "optimal" + fig.suptitle( + "Expansion to {amount} {label} at {clusters} clusters".format( + amount=amnt, label=lbl, clusters=wildcards.clusters + ) + ) - fig.savefig(out.ext, transparent=True, bbox_inches='tight') + fig.savefig(snakemake.output.ext, transparent=True, bbox_inches="tight") diff --git a/scripts/plot_p_nom_max.py b/scripts/plot_p_nom_max.py index ea66d612..4577401d 100644 --- a/scripts/plot_p_nom_max.py +++ b/scripts/plot_p_nom_max.py @@ -1,4 +1,5 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -16,14 +17,13 @@ Outputs Description ----------- - """ import logging -from _helpers import configure_logging, retrieve_snakemake_keys -import pypsa -import pandas as pd import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging logger = logging.getLogger(__name__) @@ -31,11 +31,13 @@ logger = logging.getLogger(__name__) def cum_p_nom_max(net, tech, country=None): carrier_b = net.generators.carrier == tech - generators = pd.DataFrame(dict( - p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'], - p_max_pu=net.generators_t.p_max_pu.loc[:,carrier_b].mean(), - country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country) - )).sort_values("p_max_pu", ascending=False) + generators = pd.DataFrame( + dict( + p_nom_max=net.generators.loc[carrier_b, "p_nom_max"], + p_max_pu=net.generators_t.p_max_pu.loc[:, carrier_b].mean(), + country=net.generators.loc[carrier_b, "bus"].map(net.buses.country), + ) + ).sort_values("p_max_pu", ascending=False) if country is not None: generators = generators.loc[generators.country == country] @@ -46,33 +48,38 @@ def cum_p_nom_max(net, tech, country=None): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('plot_p_nom_max', network='elec', simpl='', - techs='solar,onwind,offwind-dc', ext='png', - clusts= '5,full', country= 'all') - configure_logging(snakemake) - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) + snakemake = mock_snakemake( + "plot_p_nom_max", + simpl="", + techs="solar,onwind,offwind-dc", + ext="png", + clusts="5,full", + country="all", + ) + configure_logging(snakemake) plot_kwds = dict(drawstyle="steps-post") - clusters = wildcards.clusts.split(',') - techs = wildcards.techs.split(',') - country = wildcards.country - if country == 'all': + clusters = snakemake.wildcards.clusts.split(",") + techs = snakemake.wildcards.techs.split(",") + country = snakemake.wildcards.country + if country == "all": country = None else: - plot_kwds['marker'] = 'x' + plot_kwds["marker"] = "x" fig, axes = plt.subplots(1, len(techs)) for j, cluster in enumerate(clusters): - net = pypsa.Network(paths[j]) + net = pypsa.Network(snakemake.input[j]) for i, tech in enumerate(techs): - cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="cum_p_nom_max", - label=cluster, ax=axes[i], **plot_kwds) + cum_p_nom_max(net, tech, country).plot( + x="p_max_pu", y="cum_p_nom_max", label=cluster, ax=axes[i], **plot_kwds + ) for i, tech in enumerate(techs): ax = axes[i] @@ -81,4 +88,4 @@ if __name__ == "__main__": plt.legend(title="Cluster level") - fig.savefig(out[0], transparent=True, bbox_inches='tight') + fig.savefig(snakemake.output[0], transparent=True, bbox_inches="tight") diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 48f064b0..c05db561 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -1,4 +1,5 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -16,15 +17,14 @@ Outputs Description ----------- - """ -import os import logging -from _helpers import configure_logging, retrieve_snakemake_keys +import os -import pandas as pd import matplotlib.pyplot as plt +import pandas as pd +from _helpers import configure_logging logger = logging.getLogger(__name__) @@ -52,22 +52,37 @@ def rename_techs(label): return label -preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"]) +preferred_order = pd.Index( + [ + "transmission lines", + "hydroelectricity", + "hydro reservoir", + "run of river", + "pumped hydro storage", + "onshore wind", + "offshore wind ac", + "offshore wind dc", + "solar PV", + "solar thermal", + "OCGT", + "hydrogen storage", + "battery storage", + ] +) def plot_costs(infn, config, fn=None): - ## For now ignore the simpl header - cost_df = pd.read_csv(infn,index_col=list(range(3)),header=[1,2,3]) + cost_df = pd.read_csv(infn, index_col=list(range(3)), header=[1, 2, 3]) df = cost_df.groupby(cost_df.index.get_level_values(2)).sum() - #convert to billions - df = df/1e9 + # convert to billions + df = df / 1e9 df = df.groupby(df.index.map(rename_techs)).sum() - to_drop = df.index[df.max(axis=1) < config['plotting']['costs_threshold']] + to_drop = df.index[df.max(axis=1) < config["plotting"]["costs_threshold"]] print("dropping") @@ -77,22 +92,28 @@ def plot_costs(infn, config, fn=None): print(df.sum()) - new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) + new_index = (preferred_order.intersection(df.index)).append( + df.index.difference(preferred_order) + ) new_columns = df.sum().sort_values().index fig, ax = plt.subplots() - fig.set_size_inches((12,8)) + fig.set_size_inches((12, 8)) - df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index]) + df.loc[new_index, new_columns].T.plot( + kind="bar", + ax=ax, + stacked=True, + color=[config["plotting"]["tech_colors"][i] for i in new_index], + ) - - handles,labels = ax.get_legend_handles_labels() + handles, labels = ax.get_legend_handles_labels() handles.reverse() labels.reverse() - ax.set_ylim([0,config['plotting']['costs_max']]) + ax.set_ylim([0, config["plotting"]["costs_max"]]) ax.set_ylabel("System Cost [EUR billion per year]") @@ -100,8 +121,7 @@ def plot_costs(infn, config, fn=None): ax.grid(axis="y") - ax.legend(handles,labels,ncol=4,loc="upper left") - + ax.legend(handles, labels, ncol=4, loc="upper left") fig.tight_layout() @@ -110,17 +130,16 @@ def plot_costs(infn, config, fn=None): def plot_energy(infn, config, fn=None): - - energy_df = pd.read_csv(infn, index_col=list(range(2)),header=[1,2,3]) + energy_df = pd.read_csv(infn, index_col=list(range(2)), header=[1, 2, 3]) df = energy_df.groupby(energy_df.index.get_level_values(1)).sum() - #convert MWh to TWh - df = df/1e6 + # convert MWh to TWh + df = df / 1e6 df = df.groupby(df.index.map(rename_techs)).sum() - to_drop = df.index[df.abs().max(axis=1) < config['plotting']['energy_threshold']] + to_drop = df.index[df.abs().max(axis=1) < config["plotting"]["energy_threshold"]] print("dropping") @@ -130,22 +149,28 @@ def plot_energy(infn, config, fn=None): print(df.sum()) - new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) + new_index = (preferred_order.intersection(df.index)).append( + df.index.difference(preferred_order) + ) new_columns = df.columns.sort_values() fig, ax = plt.subplots() - fig.set_size_inches((12,8)) + fig.set_size_inches((12, 8)) - df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index]) + df.loc[new_index, new_columns].T.plot( + kind="bar", + ax=ax, + stacked=True, + color=[config["plotting"]["tech_colors"][i] for i in new_index], + ) - - handles,labels = ax.get_legend_handles_labels() + handles, labels = ax.get_legend_handles_labels() handles.reverse() labels.reverse() - ax.set_ylim([config['plotting']['energy_min'], config['plotting']['energy_max']]) + ax.set_ylim([config["plotting"]["energy_min"], config["plotting"]["energy_max"]]) ax.set_ylabel("Energy [TWh/a]") @@ -153,8 +178,7 @@ def plot_energy(infn, config, fn=None): ax.grid(axis="y") - ax.legend(handles,labels,ncol=4,loc="upper left") - + ax.legend(handles, labels, ncol=4, loc="upper left") fig.tight_layout() @@ -163,19 +187,30 @@ def plot_energy(infn, config, fn=None): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('plot_summary', summary='energy', network='elec', - simpl='', clusters=5, ll='copt', opts='Co2L-24H', - attr='', ext='png', country='all') + + snakemake = mock_snakemake( + "plot_summary", + summary="energy", + simpl="", + clusters=5, + ll="copt", + opts="Co2L-24H", + attr="", + ext="png", + country="all", + ) configure_logging(snakemake) - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) + config = snakemake.config - summary = wildcards.summary + summary = snakemake.wildcards.summary try: func = globals()[f"plot_{summary}"] except KeyError: raise RuntimeError(f"plotting function for {summary} has not been defined") - func(os.path.join(paths[0], f"{summary}.csv"), config, out[0]) + func( + os.path.join(snakemake.input[0], f"{summary}.csv"), config, snakemake.output[0] + ) diff --git a/scripts/prepare_links_p_nom.py b/scripts/prepare_links_p_nom.py index 6bd4bca4..55da75eb 100644 --- a/scripts/prepare_links_p_nom.py +++ b/scripts/prepare_links_p_nom.py @@ -1,11 +1,14 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ -Extracts capacities of HVDC links from `Wikipedia `_. +Extracts capacities of HVDC links from `Wikipedia. + +`_. Relevant Settings ----------------- @@ -33,13 +36,12 @@ Description ----------- *None* - """ import logging -from _helpers import configure_logging, retrieve_snakemake_keys import pandas as pd +from _helpers import configure_logging logger = logging.getLogger(__name__) @@ -49,31 +51,45 @@ def multiply(s): def extract_coordinates(s): - regex = (r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(N|S) " - r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(E|W)") + regex = ( + r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(N|S) " r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(E|W)" + ) e = s.str.extract(regex, expand=True) - lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.}) - lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.}) + lat = ( + e[0].astype(float) + (e[1].astype(float) + e[2].astype(float) / 60.0) / 60.0 + ) * e[3].map({"N": +1.0, "S": -1.0}) + lon = ( + e[4].astype(float) + (e[5].astype(float) + e[6].astype(float) / 60.0) / 60.0 + ) * e[7].map({"E": +1.0, "W": -1.0}) return lon, lat if __name__ == "__main__": - if 'snakemake' not in globals(): - from _helpers import mock_snakemake #rule must be enabled in config - snakemake = mock_snakemake('prepare_links_p_nom', simpl='', network='elec') + if "snakemake" not in globals(): + from _helpers import mock_snakemake # rule must be enabled in config + + snakemake = mock_snakemake("prepare_links_p_nom", simpl="") configure_logging(snakemake) - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) - - links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0] + links_p_nom = pd.read_html( + "https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol" + )[0] mw = "Power (MW)" - m_b = links_p_nom[mw].str.contains('x').fillna(False) + m_b = links_p_nom[mw].str.contains("x").fillna(False) - links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split('x').pipe(multiply) - links_p_nom[mw] = links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float) + links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split("x").pipe(multiply) + links_p_nom[mw] = ( + links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float) + ) - links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1']) - links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2']) + links_p_nom["x1"], links_p_nom["y1"] = extract_coordinates( + links_p_nom["Converterstation 1"] + ) + links_p_nom["x2"], links_p_nom["y2"] = extract_coordinates( + links_p_nom["Converterstation 2"] + ) - links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(out[0], index=False) + links_p_nom.dropna(subset=["x1", "y1", "x2", "y2"]).to_csv( + snakemake.output[0], index=False + ) diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index f984ace6..645f8c34 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -1,10 +1,12 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT # coding: utf-8 """ -Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such as +Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such +as. - adding an annual **limit** of carbon-dioxide emissions, - adding an exogenous **price** per tonne emissions of carbon-dioxide (or other kinds), @@ -20,9 +22,10 @@ Relevant Settings .. code:: yaml costs: + year: + version: + fill_values: emission_prices: - USD2013_to_EUR2013: - discountrate: marginal_cost: capital_cost: @@ -37,7 +40,7 @@ Relevant Settings Inputs ------ -- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. +- ``resources/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. - ``networks/elec_s{simpl}_{clusters}.nc``: confer :ref:`cluster` Outputs @@ -52,17 +55,15 @@ Description The rule :mod:`prepare_all_networks` runs for all ``scenario`` s in the configuration file the rule :mod:`prepare_network`. - """ import logging -from _helpers import configure_logging - import re -import pypsa + import numpy as np import pandas as pd - +import pypsa +from _helpers import configure_logging from add_electricity import load_costs, update_transmission_costs idx = pd.IndexSlice @@ -70,55 +71,84 @@ idx = pd.IndexSlice logger = logging.getLogger(__name__) -def add_co2limit(n, co2limit, Nyears=1.): - - n.add("GlobalConstraint", "CO2Limit", - carrier_attribute="co2_emissions", sense="<=", - constant=co2limit * Nyears) +def add_co2limit(n, co2limit, Nyears=1.0): + n.add( + "GlobalConstraint", + "CO2Limit", + carrier_attribute="co2_emissions", + sense="<=", + constant=co2limit * Nyears, + ) -def add_emission_prices(n, emission_prices={'co2': 0.}, exclude_co2=False): - if exclude_co2: emission_prices.pop('co2') - ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') * - n.carriers.filter(like='_emissions')).sum(axis=1) +def add_gaslimit(n, gaslimit, Nyears=1.0): + sel = n.carriers.index.intersection(["OCGT", "CCGT", "CHP"]) + n.carriers.loc[sel, "gas_usage"] = 1.0 + + n.add( + "GlobalConstraint", + "GasLimit", + carrier_attribute="gas_usage", + sense="<=", + constant=gaslimit * Nyears, + ) + + +def add_emission_prices(n, emission_prices={"co2": 0.0}, exclude_co2=False): + if exclude_co2: + emission_prices.pop("co2") + ep = ( + pd.Series(emission_prices).rename(lambda x: x + "_emissions") + * n.carriers.filter(like="_emissions") + ).sum(axis=1) gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency - n.generators['marginal_cost'] += gen_ep + n.generators["marginal_cost"] += gen_ep su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch - n.storage_units['marginal_cost'] += su_ep + n.storage_units["marginal_cost"] += su_ep -def set_line_s_max_pu(n, s_max_pu = 0.7): - n.lines['s_max_pu'] = s_max_pu +def set_line_s_max_pu(n, s_max_pu=0.7): + n.lines["s_max_pu"] = s_max_pu logger.info(f"N-1 security margin of lines set to {s_max_pu}") def set_transmission_limit(n, ll_type, factor, costs, Nyears=1): - links_dc_b = n.links.carrier == 'DC' if not n.links.empty else pd.Series() + links_dc_b = n.links.carrier == "DC" if not n.links.empty else pd.Series() - _lines_s_nom = (np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) * - n.lines.num_parallel * n.lines.bus0.map(n.buses.v_nom)) - lines_s_nom = n.lines.s_nom.where(n.lines.type == '', _lines_s_nom) + _lines_s_nom = ( + np.sqrt(3) + * n.lines.type.map(n.line_types.i_nom) + * n.lines.num_parallel + * n.lines.bus0.map(n.buses.v_nom) + ) + lines_s_nom = n.lines.s_nom.where(n.lines.type == "", _lines_s_nom) + col = "capital_cost" if ll_type == "c" else "length" + ref = ( + lines_s_nom @ n.lines[col] + + n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col] + ) - col = 'capital_cost' if ll_type == 'c' else 'length' - ref = (lines_s_nom @ n.lines[col] + - n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col]) + update_transmission_costs(n, costs) - update_transmission_costs(n, costs, simple_hvdc_costs=False) + if factor == "opt" or float(factor) > 1.0: + n.lines["s_nom_min"] = lines_s_nom + n.lines["s_nom_extendable"] = True - if factor == 'opt' or float(factor) > 1.0: - n.lines['s_nom_min'] = lines_s_nom - n.lines['s_nom_extendable'] = True + n.links.loc[links_dc_b, "p_nom_min"] = n.links.loc[links_dc_b, "p_nom"] + n.links.loc[links_dc_b, "p_nom_extendable"] = True - n.links.loc[links_dc_b, 'p_nom_min'] = n.links.loc[links_dc_b, 'p_nom'] - n.links.loc[links_dc_b, 'p_nom_extendable'] = True - - if factor != 'opt': - con_type = 'expansion_cost' if ll_type == 'c' else 'volume_expansion' + if factor != "opt": + con_type = "expansion_cost" if ll_type == "c" else "volume_expansion" rhs = float(factor) * ref - n.add('GlobalConstraint', f'l{ll_type}_limit', - type=f'transmission_{con_type}_limit', - sense='<=', constant=rhs, carrier_attribute='AC, DC') + n.add( + "GlobalConstraint", + f"l{ll_type}_limit", + type=f"transmission_{con_type}_limit", + sense="<=", + constant=rhs, + carrier_attribute="AC, DC", + ) return n @@ -132,7 +162,7 @@ def average_every_nhours(n, offset): m.snapshot_weightings = snapshot_weightings for c in n.iterate_components(): - pnl = getattr(m, c.list_name+"_t") + pnl = getattr(m, c.list_name + "_t") for k, df in c.pnl.items(): if not df.empty: pnl[k] = df.resample(offset).mean() @@ -145,23 +175,29 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): try: import tsam.timeseriesaggregation as tsam except: - raise ModuleNotFoundError("Optional dependency 'tsam' not found." - "Install via 'pip install tsam'") + raise ModuleNotFoundError( + "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" + ) p_max_pu_norm = n.generators_t.p_max_pu.max() p_max_pu = n.generators_t.p_max_pu / p_max_pu_norm load_norm = n.loads_t.p_set.max() load = n.loads_t.p_set / load_norm - + inflow_norm = n.storage_units_t.inflow.max() inflow = n.storage_units_t.inflow / inflow_norm raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False) - agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw), - noTypicalPeriods=1, noSegments=int(segments), - segmentation=True, solver=solver_name) + agg = tsam.TimeSeriesAggregation( + raw, + hoursPerPeriod=len(raw), + noTypicalPeriods=1, + noSegments=int(segments), + segmentation=True, + solver=solver_name, + ) segmented = agg.createTypicalPeriods() @@ -169,9 +205,11 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0) snapshots = [n.snapshots[0] + pd.Timedelta(f"{offset}h") for offset in offsets] - n.set_snapshots(pd.DatetimeIndex(snapshots, name='name')) - n.snapshot_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64") - + n.set_snapshots(pd.DatetimeIndex(snapshots, name="name")) + n.snapshot_weightings = pd.Series( + weightings, index=snapshots, name="weightings", dtype="float64" + ) + segmented.index = snapshots n.generators_t.p_max_pu = segmented[n.generators_t.p_max_pu.columns] * p_max_pu_norm n.loads_t.p_set = segmented[n.loads_t.p_set.columns] * load_norm @@ -179,49 +217,57 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): return n + def enforce_autarky(n, only_crossborder=False): if only_crossborder: lines_rm = n.lines.loc[ - n.lines.bus0.map(n.buses.country) != - n.lines.bus1.map(n.buses.country) - ].index + n.lines.bus0.map(n.buses.country) != n.lines.bus1.map(n.buses.country) + ].index links_rm = n.links.loc[ - n.links.bus0.map(n.buses.country) != - n.links.bus1.map(n.buses.country) - ].index + n.links.bus0.map(n.buses.country) != n.links.bus1.map(n.buses.country) + ].index else: lines_rm = n.lines.index - links_rm = n.links.loc[n.links.carrier=="DC"].index + links_rm = n.links.loc[n.links.carrier == "DC"].index n.mremove("Line", lines_rm) n.mremove("Link", links_rm) + def set_line_nom_max(n, s_nom_max_set=np.inf, p_nom_max_set=np.inf): n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) + if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('prepare_network', network='elec', simpl='', - clusters='40', ll='v0.3', opts='Co2L-24H') + + snakemake = mock_snakemake( + "prepare_network", simpl="", clusters="40", ll="v0.3", opts="Co2L-24H" + ) configure_logging(snakemake) - opts = snakemake.wildcards.opts.split('-') + opts = snakemake.wildcards.opts.split("-") n = pypsa.Network(snakemake.input[0]) - Nyears = n.snapshot_weightings.objective.sum() / 8760. - costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears) + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs( + snakemake.input.tech_costs, + snakemake.config["costs"], + snakemake.config["electricity"], + Nyears, + ) - set_line_s_max_pu(n, snakemake.config['lines']['s_max_pu']) + set_line_s_max_pu(n, snakemake.config["lines"]["s_max_pu"]) for o in opts: - m = re.match(r'^\d+h$', o, re.IGNORECASE) + m = re.match(r"^\d+h$", o, re.IGNORECASE) if m is not None: n = average_every_nhours(n, m.group(0)) break for o in opts: - m = re.match(r'^\d+seg$', o, re.IGNORECASE) + m = re.match(r"^\d+seg$", o, re.IGNORECASE) if m is not None: solver_name = snakemake.config["solving"]["solver"]["name"] n = apply_time_segmentation(n, m.group(0)[:-3], solver_name) @@ -231,10 +277,24 @@ if __name__ == "__main__": if "Co2L" in o: m = re.findall("[0-9]*\.?[0-9]+$", o) if len(m) > 0: - co2limit = float(m[0]) * snakemake.config['electricity']['co2base'] + co2limit = float(m[0]) * snakemake.config["electricity"]["co2base"] add_co2limit(n, co2limit, Nyears) + logger.info("Setting CO2 limit according to wildcard value.") else: - add_co2limit(n, snakemake.config['electricity']['co2limit'], Nyears) + add_co2limit(n, snakemake.config["electricity"]["co2limit"], Nyears) + logger.info("Setting CO2 limit according to config value.") + break + + for o in opts: + if "CH4L" in o: + m = re.findall("[0-9]*\.?[0-9]+$", o) + if len(m) > 0: + limit = float(m[0]) * 1e6 + add_gaslimit(n, limit, Nyears) + logger.info("Setting gas usage limit according to wildcard value.") + else: + add_gaslimit(n, snakemake.config["electricity"].get("gaslimit"), Nyears) + logger.info("Setting gas usage limit according to config value.") break for o in opts: @@ -243,7 +303,7 @@ if __name__ == "__main__": if oo[0].startswith(tuple(suptechs)): carrier = oo[0] # handles only p_nom_max as stores and lines have no potentials - attr_lookup = {"p": "p_nom_max", "c": "capital_cost"} + attr_lookup = {"p": "p_nom_max", "c": "capital_cost", "m": "marginal_cost"} attr = attr_lookup[oo[1][0]] factor = float(oo[1][1:]) if carrier == "AC": # lines do not have carrier @@ -252,20 +312,32 @@ if __name__ == "__main__": comps = {"Generator", "Link", "StorageUnit", "Store"} for c in n.iterate_components(comps): sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel,attr] *= factor + c.df.loc[sel, attr] *= factor - if 'Ep' in opts: - add_emission_prices(n, snakemake.config['costs']['emission_prices']) + for o in opts: + if "Ep" in o: + m = re.findall("[0-9]*\.?[0-9]+$", o) + if len(m) > 0: + logger.info("Setting emission prices according to wildcard value.") + add_emission_prices(n, dict(co2=float(m[0]))) + else: + logger.info("Setting emission prices according to config value.") + add_emission_prices(n, snakemake.config["costs"]["emission_prices"]) + break ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:] set_transmission_limit(n, ll_type, factor, costs, Nyears) - set_line_nom_max(n, s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf), - p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf)) + set_line_nom_max( + n, + s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf), + p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf), + ) if "ATK" in opts: enforce_autarky(n) elif "ATKc" in opts: enforce_autarky(n, only_crossborder=True) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index 86869879..f16a196c 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -1,5 +1,6 @@ -# Copyright 2019-2020 Fabian Hofmann (FIAS) -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# Copyright 2019-2022 Fabian Hofmann (TUB, FIAS) +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -11,7 +12,7 @@ The data bundle (1.4 GB) contains common GIS datasets like NUTS3 shapes, EEZ sha This rule downloads the data bundle from `zenodo `_ and extracts it in the ``data`` sub-directory, such that all files of the bundle are stored in the ``data/bundle`` subdirectory. -The :ref:`tutorial` uses a smaller `data bundle `_ than required for the full model (19 MB) +The :ref:`tutorial` uses a smaller `data bundle `_ than required for the full model (188 MB) .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3517921.svg :target: https://doi.org/10.5281/zenodo.3517921 @@ -28,29 +29,32 @@ The :ref:`tutorial` uses a smaller `data bundle 0] + costs = ( + n.generators.loc[tech_b, "bus"] + .map(connection_costs_to_bus[tech]) + .loc[lambda s: s > 0] + ) if not costs.empty: n.generators.loc[costs.index, "capital_cost"] += costs - logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} " - .format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems()))) + logger.info( + "Displacing {} generator(s) and adding connection costs to capital_costs: {} ".format( + tech, + ", ".join( + "{:.0f} Eur/MW/a for `{}`".format(d, b) + for b, d in costs.items() + ), + ) + ) connection_costs[tech] = costs - pd.DataFrame(connection_costs).to_csv(output.connection_costs) - + pd.DataFrame(connection_costs).to_csv(output.connection_costs) -def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, aggregate_one_ports={"Load", "StorageUnit"}): +def _aggregate_and_move_components( + n, + busmap, + connection_costs_to_bus, + output, + aggregate_one_ports={"Load", "StorageUnit"}, + aggregation_strategies=dict(), + exclude_carriers=None, +): def replace_components(n, c, df, pnl): n.mremove(c, n.df(c).index) @@ -200,7 +244,13 @@ def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, a _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output) - generators, generators_pnl = aggregategenerators(n, busmap, custom_strategies={'p_nom_min': np.sum}) + _, generator_strategies = get_aggregation_strategies(aggregation_strategies) + + carriers = set(n.generators.carrier) - set(exclude_carriers) + generators, generators_pnl = aggregategenerators( + n, busmap, carriers=carriers, custom_strategies=generator_strategies + ) + replace_components(n, "Generator", generators, generators_pnl) for one_port in aggregate_one_ports: @@ -214,7 +264,7 @@ def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, a n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)]) -def simplify_links(n, costs, config, output): +def simplify_links(n, costs, config, output, aggregation_strategies=dict()): ## Complex multi-node links are folded into end-points logger.info("Simplifying connected link components") @@ -222,8 +272,10 @@ def simplify_links(n, costs, config, output): return n, n.buses.index.to_series() # Determine connected link components, ignore all links but DC - adjacency_matrix = n.adjacency_matrix(branch_components=['Link'], - weights=dict(Link=(n.links.carrier == 'DC').astype(float))) + adjacency_matrix = n.adjacency_matrix( + branch_components=["Link"], + weights=dict(Link=(n.links.carrier == "DC").astype(float)), + ) _, labels = connected_components(adjacency_matrix, directed=False) labels = pd.Series(labels, n.buses.index) @@ -234,22 +286,23 @@ def simplify_links(n, costs, config, output): nodes = frozenset(nodes) seen = set() - supernodes = {m for m in nodes - if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)} + supernodes = {m for m in nodes if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)} for u in supernodes: for m, ls in G.adj[u].items(): - if m not in nodes or m in seen: continue + if m not in nodes or m in seen: + continue buses = [u, m] - links = [list(ls)] #[name for name in ls]] + links = [list(ls)] # [name for name in ls]] while m not in (supernodes | seen): seen.add(m) for m2, ls in G.adj[m].items(): - if m2 in seen or m2 == u: continue + if m2 in seen or m2 == u: + continue buses.append(m2) - links.append(list(ls)) # [name for name in ls]) + links.append(list(ls)) # [name for name in ls]) break else: # stub @@ -262,81 +315,135 @@ def simplify_links(n, costs, config, output): busmap = n.buses.index.to_series() connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config) - connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link)) + connection_costs_to_bus = pd.DataFrame( + 0.0, index=n.buses.index, columns=list(connection_costs_per_link) + ) for lbl in labels.value_counts().loc[lambda s: s > 2].index: for b, buses, links in split_links(labels.index[labels == lbl]): - if len(buses) <= 2: continue + if len(buses) <= 2: + continue - logger.debug('nodes = {}'.format(labels.index[labels == lbl])) - logger.debug('b = {}\nbuses = {}\nlinks = {}'.format(b, buses, links)) + logger.debug("nodes = {}".format(labels.index[labels == lbl])) + logger.debug("b = {}\nbuses = {}\nlinks = {}".format(b, buses, links)) - m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']], - n.buses.loc[buses[1:-1], ['x', 'y']]) + m = sp.spatial.distance_matrix( + n.buses.loc[b, ["x", "y"]], n.buses.loc[buses[1:-1], ["x", "y"]] + ) busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]] - connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link, buses) + connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus( + n, busmap, costs, config, connection_costs_per_link, buses + ) all_links = [i for _, i in sum(links, [])] - p_max_pu = config['links'].get('p_max_pu', 1.) - lengths = n.links.loc[all_links, 'length'] - name = lengths.idxmax() + '+{}'.format(len(links) - 1) + p_max_pu = config["links"].get("p_max_pu", 1.0) + lengths = n.links.loc[all_links, "length"] + name = lengths.idxmax() + "+{}".format(len(links) - 1) params = dict( - carrier='DC', - bus0=b[0], bus1=b[1], - length=sum(n.links.loc[[i for _, i in l], 'length'].mean() for l in links), - p_nom=min(n.links.loc[[i for _, i in l], 'p_nom'].sum() for l in links), - underwater_fraction=sum(lengths/lengths.sum() * n.links.loc[all_links, 'underwater_fraction']), + carrier="DC", + bus0=b[0], + bus1=b[1], + length=sum( + n.links.loc[[i for _, i in l], "length"].mean() for l in links + ), + p_nom=min(n.links.loc[[i for _, i in l], "p_nom"].sum() for l in links), + underwater_fraction=sum( + lengths + / lengths.sum() + * n.links.loc[all_links, "underwater_fraction"] + ), p_max_pu=p_max_pu, p_min_pu=-p_max_pu, underground=False, - under_construction=False + under_construction=False, ) - logger.info("Joining the links {} connecting the buses {} to simple link {}".format(", ".join(all_links), ", ".join(buses), name)) + logger.info( + "Joining the links {} connecting the buses {} to simple link {}".format( + ", ".join(all_links), ", ".join(buses), name + ) + ) n.mremove("Link", all_links) static_attrs = n.components["Link"]["attrs"].loc[lambda df: df.static] - for attr, default in static_attrs.default.iteritems(): params.setdefault(attr, default) + for attr, default in static_attrs.default.items(): + params.setdefault(attr, default) n.links.loc[name] = pd.Series(params) # n.add("Link", **params) logger.debug("Collecting all components using the busmap") - _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output) + exclude_carriers = config["clustering"]["simplify_network"].get( + "exclude_carriers", [] + ) + + _aggregate_and_move_components( + n, + busmap, + connection_costs_to_bus, + output, + aggregation_strategies=aggregation_strategies, + exclude_carriers=exclude_carriers, + ) return n, busmap -def remove_stubs(n, costs, config, output): + +def remove_stubs(n, costs, config, output, aggregation_strategies=dict()): logger.info("Removing stubs") - busmap = busmap_by_stubs(n) # ['country']) + across_borders = config["clustering"]["simplify_network"].get("remove_stubs_across_borders", True) + matching_attrs = [] if across_borders else ['country'] + busmap = busmap_by_stubs(n, matching_attrs) connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap, costs, config) - _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output) + exclude_carriers = config["clustering"]["simplify_network"].get( + "exclude_carriers", [] + ) + + _aggregate_and_move_components( + n, + busmap, + connection_costs_to_bus, + output, + aggregation_strategies=aggregation_strategies, + exclude_carriers=exclude_carriers, + ) return n, busmap -def aggregate_to_substations(n, buses_i=None): + +def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None): # can be used to aggregate a selection of buses to electrically closest neighbors # if no buses are given, nodes that are no substations or without offshore connection are aggregated - + if buses_i is None: - logger.info("Aggregating buses that are no substations or have no valid offshore connection") - buses_i = list(set(n.buses.index)-set(n.generators.bus)-set(n.loads.bus)) + logger.info( + "Aggregating buses that are no substations or have no valid offshore connection" + ) + buses_i = list(set(n.buses.index) - set(n.generators.bus) - set(n.loads.bus)) - weight = pd.concat({'Line': n.lines.length/n.lines.s_nom.clip(1e-3), - 'Link': n.links.length/n.links.p_nom.clip(1e-3)}) + weight = pd.concat( + { + "Line": n.lines.length / n.lines.s_nom.clip(1e-3), + "Link": n.links.length / n.links.p_nom.clip(1e-3), + } + ) - adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight) + adj = n.adjacency_matrix(branch_components=["Line", "Link"], weights=weight) bus_indexer = n.buses.index.get_indexer(buses_i) - dist = pd.DataFrame(dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index) + dist = pd.DataFrame( + dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index + ) - dist[buses_i] = np.inf # bus in buses_i should not be assigned to different bus in buses_i + dist[ + buses_i + ] = np.inf # bus in buses_i should not be assigned to different bus in buses_i for c in n.buses.country.unique(): incountry_b = n.buses.country == c @@ -345,77 +452,150 @@ def aggregate_to_substations(n, buses_i=None): busmap = n.buses.index.to_series() busmap.loc[buses_i] = dist.idxmin(1) - clustering = get_clustering_from_busmap(n, busmap, - bus_strategies=dict(country=_make_consense("Bus", "country")), - aggregate_generators_weighted=True, - aggregate_generators_carriers=None, - aggregate_one_ports=["Load", "StorageUnit"], - line_length_factor=1.0, - generator_strategies={'p_nom_max': 'sum'}, - scale_link_capital_costs=False) - + bus_strategies, generator_strategies = get_aggregation_strategies( + aggregation_strategies + ) + + clustering = get_clustering_from_busmap( + n, + busmap, + bus_strategies=bus_strategies, + aggregate_generators_weighted=True, + aggregate_generators_carriers=None, + aggregate_one_ports=["Load", "StorageUnit"], + line_length_factor=1.0, + generator_strategies=generator_strategies, + scale_link_capital_costs=False, + ) return clustering.network, busmap -def cluster(n, n_clusters, config): +def cluster( + n, n_clusters, config, algorithm="hac", feature=None, aggregation_strategies=dict() +): logger.info(f"Clustering to {n_clusters} buses") - focus_weights = config.get('focus_weights', None) - - renewable_carriers = pd.Index([tech - for tech in n.generators.carrier.unique() - if tech.split('-', 2)[0] in config['renewable']]) - def consense(x): - v = x.iat[0] - assert ((x == v).all() or x.isnull().all()), ( - "The `potential` configuration option must agree for all renewable carriers, for now!" - ) - return v - potential_mode = (consense(pd.Series([config['renewable'][tech]['potential'] - for tech in renewable_carriers])) - if len(renewable_carriers) > 0 else 'conservative') - clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False, potential_mode=potential_mode, - solver_name=config['solving']['solver']['name'], - focus_weights=focus_weights) + focus_weights = config.get("focus_weights", None) + + renewable_carriers = pd.Index( + [ + tech + for tech in n.generators.carrier.unique() + if tech.split("-", 2)[0] in config["renewable"] + ] + ) + + clustering = clustering_for_n_clusters( + n, + n_clusters, + custom_busmap=False, + aggregation_strategies=aggregation_strategies, + solver_name=config["solving"]["solver"]["name"], + algorithm=algorithm, + feature=feature, + focus_weights=focus_weights, + ) return clustering.network, clustering.busmap if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('simplify_network', simpl='', network='elec') + + snakemake = mock_snakemake("simplify_network", simpl="") configure_logging(snakemake) n = pypsa.Network(snakemake.input.network) + aggregation_strategies = snakemake.config["clustering"].get( + "aggregation_strategies", {} + ) + # translate str entries of aggregation_strategies to pd.Series functions: + aggregation_strategies = { + p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} + for p in aggregation_strategies.keys() + } + n, trafo_map = simplify_network_to_380(n) Nyears = n.snapshot_weightings.objective.sum() / 8760 - technology_costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears) + technology_costs = load_costs( + snakemake.input.tech_costs, + snakemake.config["costs"], + snakemake.config["electricity"], + Nyears, + ) - n, simplify_links_map = simplify_links(n, technology_costs, snakemake.config, snakemake.output) + n, simplify_links_map = simplify_links( + n, technology_costs, snakemake.config, snakemake.output, aggregation_strategies + ) - n, stub_map = remove_stubs(n, technology_costs, snakemake.config, snakemake.output) + busmaps = [trafo_map, simplify_links_map] - busmaps = [trafo_map, simplify_links_map, stub_map] + cluster_config = snakemake.config["clustering"]["simplify_network"] + if cluster_config.get("remove_stubs", True): + n, stub_map = remove_stubs( + n, + technology_costs, + snakemake.config, + snakemake.output, + aggregation_strategies=aggregation_strategies, + ) + busmaps.append(stub_map) - if snakemake.config.get('clustering', {}).get('simplify', {}).get('to_substations', False): - n, substation_map = aggregate_to_substations(n) + if cluster_config.get("to_substations", False): + n, substation_map = aggregate_to_substations(n, aggregation_strategies) busmaps.append(substation_map) + # treatment of outliers (nodes without a profile for considered carrier): + # all nodes that have no profile of the given carrier are being aggregated to closest neighbor + if ( + snakemake.config.get("clustering", {}) + .get("cluster_network", {}) + .get("algorithm", "hac") + == "hac" + or cluster_config.get("algorithm", "hac") == "hac" + ): + carriers = ( + cluster_config.get("feature", "solar+onwind-time").split("-")[0].split("+") + ) + for carrier in carriers: + buses_i = list( + set(n.buses.index) - set(n.generators.query("carrier == @carrier").bus) + ) + logger.info( + f"clustering preparaton (hac): aggregating {len(buses_i)} buses of type {carrier}." + ) + n, busmap_hac = aggregate_to_substations(n, aggregation_strategies, buses_i) + busmaps.append(busmap_hac) + if snakemake.wildcards.simpl: - n, cluster_map = cluster(n, int(snakemake.wildcards.simpl), snakemake.config) + n, cluster_map = cluster( + n, + int(snakemake.wildcards.simpl), + snakemake.config, + cluster_config.get("algorithm", "hac"), + cluster_config.get("feature", None), + aggregation_strategies, + ) busmaps.append(cluster_map) # some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed # and are lost when clustering (for example with the simpl wildcard), we remove them for consistency: - buses_c = {'symbol', 'tags', 'under_construction', 'substation_lv', 'substation_off'}.intersection(n.buses.columns) + buses_c = { + "symbol", + "tags", + "under_construction", + "substation_lv", + "substation_off", + }.intersection(n.buses.columns) n.buses = n.buses.drop(buses_c, axis=1) update_p_nom_max(n) - + + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output.network) busmap_s = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index b902f525..bcb786f0 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -1,9 +1,11 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ -Solves linear optimal power flow for a network iteratively while updating reactances. +Solves linear optimal power flow for a network iteratively while updating +reactances. Relevant Settings ----------------- @@ -73,101 +75,123 @@ Details (and errors made through this heuristic) are discussed in the paper The rule :mod:`solve_all_networks` runs for all ``scenario`` s in the configuration file the rule :mod:`solve_network`. - """ import logging -from _helpers import configure_logging +import re +from pathlib import Path import numpy as np import pandas as pd -import re - import pypsa -from pypsa.linopf import (get_var, define_constraints, linexpr, join_exprs, - network_lopf, ilopf) - -from pathlib import Path +from _helpers import configure_logging +from pypsa.descriptors import get_switchable_as_dense as get_as_dense +from pypsa.linopf import ( + define_constraints, + define_variables, + get_var, + ilopf, + join_exprs, + linexpr, + network_lopf, +) from vresutils.benchmark import memory_logger logger = logging.getLogger(__name__) def prepare_network(n, solve_opts): - - if 'clip_p_max_pu' in solve_opts: + if "clip_p_max_pu" in solve_opts: for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow): - df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True) + df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True) - if solve_opts.get('load_shedding'): - n.add("Carrier", "Load") + load_shedding = solve_opts.get("load_shedding") + if load_shedding: + n.add("Carrier", "load", color="#dd2e23", nice_name="Load shedding") buses_i = n.buses.query("carrier == 'AC'").index - n.madd("Generator", buses_i, " load", - bus=buses_i, - carrier='load', - sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW - marginal_cost=1e2, # Eur/kWh - # intersect between macroeconomic and surveybased - # willingness to pay - # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full - p_nom=1e9 # kW - ) + if not np.isscalar(load_shedding): + load_shedding = 1e2 # Eur/kWh + # intersect between macroeconomic and surveybased + # willingness to pay + # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full) + n.madd( + "Generator", + buses_i, + " load", + bus=buses_i, + carrier="load", + sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW + marginal_cost=load_shedding, + p_nom=1e9, # kW + ) - if solve_opts.get('noisy_costs'): + if solve_opts.get("noisy_costs"): for t in n.iterate_components(n.one_port_components): - #if 'capital_cost' in t.df: + # if 'capital_cost' in t.df: # t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5) - if 'marginal_cost' in t.df: - t.df['marginal_cost'] += (1e-2 + 2e-3 * - (np.random.random(len(t.df)) - 0.5)) + if "marginal_cost" in t.df: + t.df["marginal_cost"] += 1e-2 + 2e-3 * ( + np.random.random(len(t.df)) - 0.5 + ) - for t in n.iterate_components(['Line', 'Link']): - t.df['capital_cost'] += (1e-1 + - 2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length'] + for t in n.iterate_components(["Line", "Link"]): + t.df["capital_cost"] += ( + 1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5) + ) * t.df["length"] - if solve_opts.get('nhours'): - nhours = solve_opts['nhours'] + if solve_opts.get("nhours"): + nhours = solve_opts["nhours"] n.set_snapshots(n.snapshots[:nhours]) - n.snapshot_weightings[:] = 8760. / nhours + n.snapshot_weightings[:] = 8760.0 / nhours return n def add_CCL_constraints(n, config): - agg_p_nom_limits = config['electricity'].get('agg_p_nom_limits') + agg_p_nom_limits = config["electricity"].get("agg_p_nom_limits") try: - agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, - index_col=list(range(2))) + agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, index_col=list(range(2))) except IOError: - logger.exception("Need to specify the path to a .csv file containing " - "aggregate capacity limits per country in " - "config['electricity']['agg_p_nom_limit'].") - logger.info("Adding per carrier generation capacity constraints for " - "individual countries") + logger.exception( + "Need to specify the path to a .csv file containing " + "aggregate capacity limits per country in " + "config['electricity']['agg_p_nom_limit']." + ) + logger.info( + "Adding per carrier generation capacity constraints for " "individual countries" + ) gen_country = n.generators.bus.map(n.buses.country) # cc means country and carrier - p_nom_per_cc = (pd.DataFrame( - {'p_nom': linexpr((1, get_var(n, 'Generator', 'p_nom'))), - 'country': gen_country, 'carrier': n.generators.carrier}) - .dropna(subset=['p_nom']) - .groupby(['country', 'carrier']).p_nom - .apply(join_exprs)) - minimum = agg_p_nom_minmax['min'].dropna() + p_nom_per_cc = ( + pd.DataFrame( + { + "p_nom": linexpr((1, get_var(n, "Generator", "p_nom"))), + "country": gen_country, + "carrier": n.generators.carrier, + } + ) + .dropna(subset=["p_nom"]) + .groupby(["country", "carrier"]) + .p_nom.apply(join_exprs) + ) + minimum = agg_p_nom_minmax["min"].dropna() if not minimum.empty: - minconstraint = define_constraints(n, p_nom_per_cc[minimum.index], - '>=', minimum, 'agg_p_nom', 'min') - maximum = agg_p_nom_minmax['max'].dropna() + minconstraint = define_constraints( + n, p_nom_per_cc[minimum.index], ">=", minimum, "agg_p_nom", "min" + ) + maximum = agg_p_nom_minmax["max"].dropna() if not maximum.empty: - maxconstraint = define_constraints(n, p_nom_per_cc[maximum.index], - '<=', maximum, 'agg_p_nom', 'max') + maxconstraint = define_constraints( + n, p_nom_per_cc[maximum.index], "<=", maximum, "agg_p_nom", "max" + ) def add_EQ_constraints(n, o, scaling=1e-1): float_regex = "[0-9]*\.?[0-9]+" level = float(re.findall(float_regex, o)[0]) - if o[-1] == 'c': + if o[-1] == "c": ggrouper = n.generators.bus.map(n.buses.country) lgrouper = n.loads.bus.map(n.buses.country) sgrouper = n.storage_units.bus.map(n.buses.country) @@ -175,116 +199,239 @@ def add_EQ_constraints(n, o, scaling=1e-1): ggrouper = n.generators.bus lgrouper = n.loads.bus sgrouper = n.storage_units.bus - load = n.snapshot_weightings.generators @ \ - n.loads_t.p_set.groupby(lgrouper, axis=1).sum() - inflow = n.snapshot_weightings.stores @ \ - n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum() - inflow = inflow.reindex(load.index).fillna(0.) - rhs = scaling * ( level * load - inflow ) - lhs_gen = linexpr((n.snapshot_weightings.generators * scaling, - get_var(n, "Generator", "p").T) - ).T.groupby(ggrouper, axis=1).apply(join_exprs) - lhs_spill = linexpr((-n.snapshot_weightings.stores * scaling, - get_var(n, "StorageUnit", "spill").T) - ).T.groupby(sgrouper, axis=1).apply(join_exprs) + load = ( + n.snapshot_weightings.generators + @ n.loads_t.p_set.groupby(lgrouper, axis=1).sum() + ) + inflow = ( + n.snapshot_weightings.stores + @ n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum() + ) + inflow = inflow.reindex(load.index).fillna(0.0) + rhs = scaling * (level * load - inflow) + lhs_gen = ( + linexpr( + (n.snapshot_weightings.generators * scaling, get_var(n, "Generator", "p").T) + ) + .T.groupby(ggrouper, axis=1) + .apply(join_exprs) + ) + lhs_spill = ( + linexpr( + ( + -n.snapshot_weightings.stores * scaling, + get_var(n, "StorageUnit", "spill").T, + ) + ) + .T.groupby(sgrouper, axis=1) + .apply(join_exprs) + ) lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("") lhs = lhs_gen + lhs_spill define_constraints(n, lhs, ">=", rhs, "equity", "min") def add_BAU_constraints(n, config): - mincaps = pd.Series(config['electricity']['BAU_mincapacities']) - lhs = (linexpr((1, get_var(n, 'Generator', 'p_nom'))) - .groupby(n.generators.carrier).apply(join_exprs)) - define_constraints(n, lhs, '>=', mincaps[lhs.index], 'Carrier', 'bau_mincaps') + mincaps = pd.Series(config["electricity"]["BAU_mincapacities"]) + lhs = ( + linexpr((1, get_var(n, "Generator", "p_nom"))) + .groupby(n.generators.carrier) + .apply(join_exprs) + ) + define_constraints(n, lhs, ">=", mincaps[lhs.index], "Carrier", "bau_mincaps") def add_SAFE_constraints(n, config): - peakdemand = (1. + config['electricity']['SAFE_reservemargin']) *\ - n.loads_t.p_set.sum(axis=1).max() - conv_techs = config['plotting']['conv_techs'] - exist_conv_caps = n.generators.query('~p_nom_extendable & carrier in @conv_techs')\ - .p_nom.sum() - ext_gens_i = n.generators.query('carrier in @conv_techs & p_nom_extendable').index - lhs = linexpr((1, get_var(n, 'Generator', 'p_nom')[ext_gens_i])).sum() + peakdemand = ( + 1.0 + config["electricity"]["SAFE_reservemargin"] + ) * n.loads_t.p_set.sum(axis=1).max() + conv_techs = config["plotting"]["conv_techs"] + exist_conv_caps = n.generators.query( + "~p_nom_extendable & carrier in @conv_techs" + ).p_nom.sum() + ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index + lhs = linexpr((1, get_var(n, "Generator", "p_nom")[ext_gens_i])).sum() rhs = peakdemand - exist_conv_caps - define_constraints(n, lhs, '>=', rhs, 'Safe', 'mintotalcap') + define_constraints(n, lhs, ">=", rhs, "Safe", "mintotalcap") + + +def add_operational_reserve_margin_constraint(n, config): + reserve_config = config["electricity"]["operational_reserve"] + EPSILON_LOAD = reserve_config["epsilon_load"] + EPSILON_VRES = reserve_config["epsilon_vres"] + CONTINGENCY = reserve_config["contingency"] + + # Reserve Variables + reserve = get_var(n, "Generator", "r") + lhs = linexpr((1, reserve)).sum(1) + + # Share of extendable renewable capacities + ext_i = n.generators.query("p_nom_extendable").index + vres_i = n.generators_t.p_max_pu.columns + if not ext_i.empty and not vres_i.empty: + capacity_factor = n.generators_t.p_max_pu[vres_i.intersection(ext_i)] + renewable_capacity_variables = get_var(n, "Generator", "p_nom")[ + vres_i.intersection(ext_i) + ] + lhs += linexpr( + (-EPSILON_VRES * capacity_factor, renewable_capacity_variables) + ).sum(1) + + # Total demand at t + demand = n.loads_t.p_set.sum(1) + + # VRES potential of non extendable generators + capacity_factor = n.generators_t.p_max_pu[vres_i.difference(ext_i)] + renewable_capacity = n.generators.p_nom[vres_i.difference(ext_i)] + potential = (capacity_factor * renewable_capacity).sum(1) + + # Right-hand-side + rhs = EPSILON_LOAD * demand + EPSILON_VRES * potential + CONTINGENCY + + define_constraints(n, lhs, ">=", rhs, "Reserve margin") + + +def update_capacity_constraint(n): + gen_i = n.generators.index + ext_i = n.generators.query("p_nom_extendable").index + fix_i = n.generators.query("not p_nom_extendable").index + + dispatch = get_var(n, "Generator", "p") + reserve = get_var(n, "Generator", "r") + + capacity_fixed = n.generators.p_nom[fix_i] + + p_max_pu = get_as_dense(n, "Generator", "p_max_pu") + + lhs = linexpr((1, dispatch), (1, reserve)) + + if not ext_i.empty: + capacity_variable = get_var(n, "Generator", "p_nom") + lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex( + columns=gen_i, fill_value="" + ) + + rhs = (p_max_pu[fix_i] * capacity_fixed).reindex(columns=gen_i, fill_value=0) + + define_constraints(n, lhs, "<=", rhs, "Generators", "updated_capacity_constraint") + + +def add_operational_reserve_margin(n, sns, config): + """ + Build reserve margin constraints based on the formulation given in + https://genxproject.github.io/GenX/dev/core/#Reserves. + """ + + define_variables(n, 0, np.inf, "Generator", "r", axes=[sns, n.generators.index]) + + add_operational_reserve_margin_constraint(n, config) + + update_capacity_constraint(n) def add_battery_constraints(n): nodes = n.buses.index[n.buses.carrier == "battery"] - if nodes.empty or ('Link', 'p_nom') not in n.variables.index: + if nodes.empty or ("Link", "p_nom") not in n.variables.index: return link_p_nom = get_var(n, "Link", "p_nom") - lhs = linexpr((1,link_p_nom[nodes + " charger"]), - (-n.links.loc[nodes + " discharger", "efficiency"].values, - link_p_nom[nodes + " discharger"].values)) - define_constraints(n, lhs, "=", 0, 'Link', 'charger_ratio') + lhs = linexpr( + (1, link_p_nom[nodes + " charger"]), + ( + -n.links.loc[nodes + " discharger", "efficiency"].values, + link_p_nom[nodes + " discharger"].values, + ), + ) + define_constraints(n, lhs, "=", 0, "Link", "charger_ratio") def extra_functionality(n, snapshots): """ - Collects supplementary constraints which will be passed to ``pypsa.linopf.network_lopf``. - If you want to enforce additional custom constraints, this is a good location to add them. - The arguments ``opts`` and ``snakemake.config`` are expected to be attached to the network. + Collects supplementary constraints which will be passed to + ``pypsa.linopf.network_lopf``. + + If you want to enforce additional custom constraints, this is a good + location to add them. The arguments ``opts`` and + ``snakemake.config`` are expected to be attached to the network. """ opts = n.opts config = n.config - if 'BAU' in opts and n.generators.p_nom_extendable.any(): + if "BAU" in opts and n.generators.p_nom_extendable.any(): add_BAU_constraints(n, config) - if 'SAFE' in opts and n.generators.p_nom_extendable.any(): + if "SAFE" in opts and n.generators.p_nom_extendable.any(): add_SAFE_constraints(n, config) - if 'CCL' in opts and n.generators.p_nom_extendable.any(): + if "CCL" in opts and n.generators.p_nom_extendable.any(): add_CCL_constraints(n, config) + reserve = config["electricity"].get("operational_reserve", {}) + if reserve.get("activate"): + add_operational_reserve_margin(n, snapshots, config) for o in opts: if "EQ" in o: add_EQ_constraints(n, o) add_battery_constraints(n) -def solve_network(n, config, opts='', **kwargs): - solver_options = config['solving']['solver'].copy() - solver_name = solver_options.pop('name') - cf_solving = config['solving']['options'] - track_iterations = cf_solving.get('track_iterations', False) - min_iterations = cf_solving.get('min_iterations', 4) - max_iterations = cf_solving.get('max_iterations', 6) +def solve_network(n, config, opts="", **kwargs): + solver_options = config["solving"]["solver"].copy() + solver_name = solver_options.pop("name") + cf_solving = config["solving"]["options"] + track_iterations = cf_solving.get("track_iterations", False) + min_iterations = cf_solving.get("min_iterations", 4) + max_iterations = cf_solving.get("max_iterations", 6) # add to network for extra_functionality n.config = config n.opts = opts - if cf_solving.get('skip_iterations', False): - network_lopf(n, solver_name=solver_name, solver_options=solver_options, - extra_functionality=extra_functionality, **kwargs) + skip_iterations = cf_solving.get("skip_iterations", False) + if not n.lines.s_nom_extendable.any(): + skip_iterations = True + logger.info("No expandable lines found. Skipping iterative solving.") + + if skip_iterations: + network_lopf( + n, solver_name=solver_name, solver_options=solver_options, **kwargs + ) else: - ilopf(n, solver_name=solver_name, solver_options=solver_options, - track_iterations=track_iterations, - min_iterations=min_iterations, - max_iterations=max_iterations, - extra_functionality=extra_functionality, **kwargs) + ilopf( + n, + solver_name=solver_name, + solver_options=solver_options, + track_iterations=track_iterations, + min_iterations=min_iterations, + max_iterations=max_iterations, + **kwargs + ) return n if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('solve_network', network='elec', simpl='', - clusters='5', ll='copt', opts='Co2L-BAU-CCL-24H') + + snakemake = mock_snakemake( + "solve_network", simpl="", clusters="5", ll="copt", opts="Co2L-BAU-CCL-24H" + ) configure_logging(snakemake) - tmpdir = snakemake.config['solving'].get('tmpdir') + tmpdir = snakemake.config["solving"].get("tmpdir") if tmpdir is not None: Path(tmpdir).mkdir(parents=True, exist_ok=True) - opts = snakemake.wildcards.opts.split('-') - solve_opts = snakemake.config['solving']['options'] + opts = snakemake.wildcards.opts.split("-") + solve_opts = snakemake.config["solving"]["options"] - fn = getattr(snakemake.log, 'memory', None) - with memory_logger(filename=fn, interval=30.) as mem: + fn = getattr(snakemake.log, "memory", None) + with memory_logger(filename=fn, interval=30.0) as mem: n = pypsa.Network(snakemake.input[0]) n = prepare_network(n, solve_opts) - n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir, - solver_logfile=snakemake.log.solver) + n = solve_network( + n, + snakemake.config, + opts, + extra_functionality=extra_functionality, + solver_dir=tmpdir, + solver_logfile=snakemake.log.solver, + ) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) logger.info("Maximum memory usage: {}".format(mem.mem_usage)) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index 47bb713f..76918b5a 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -1,10 +1,11 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ -Solves linear optimal dispatch in hourly resolution -using the capacities of previous capacity expansion in rule :mod:`solve_network`. +Solves linear optimal dispatch in hourly resolution using the capacities of +previous capacity expansion in rule :mod:`solve_network`. Relevant Settings ----------------- @@ -42,65 +43,80 @@ Outputs Description ----------- - """ import logging -from _helpers import configure_logging - -import pypsa -import numpy as np - from pathlib import Path + +import numpy as np +import pypsa +from _helpers import configure_logging +from solve_network import prepare_network, solve_network from vresutils.benchmark import memory_logger -from solve_network import solve_network, prepare_network logger = logging.getLogger(__name__) -def set_parameters_from_optimized(n, n_optim): - lines_typed_i = n.lines.index[n.lines.type != ''] - n.lines.loc[lines_typed_i, 'num_parallel'] = \ - n_optim.lines['num_parallel'].reindex(lines_typed_i, fill_value=0.) - n.lines.loc[lines_typed_i, 's_nom'] = ( - np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * - n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel) - lines_untyped_i = n.lines.index[n.lines.type == ''] - for attr in ('s_nom', 'r', 'x'): - n.lines.loc[lines_untyped_i, attr] = \ - n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.) - n.lines['s_nom_extendable'] = False +def set_parameters_from_optimized(n, n_optim): + lines_typed_i = n.lines.index[n.lines.type != ""] + n.lines.loc[lines_typed_i, "num_parallel"] = n_optim.lines["num_parallel"].reindex( + lines_typed_i, fill_value=0.0 + ) + n.lines.loc[lines_typed_i, "s_nom"] = ( + np.sqrt(3) + * n.lines["type"].map(n.line_types.i_nom) + * n.lines.bus0.map(n.buses.v_nom) + * n.lines.num_parallel + ) + + lines_untyped_i = n.lines.index[n.lines.type == ""] + for attr in ("s_nom", "r", "x"): + n.lines.loc[lines_untyped_i, attr] = n_optim.lines[attr].reindex( + lines_untyped_i, fill_value=0.0 + ) + n.lines["s_nom_extendable"] = False links_dc_i = n.links.index[n.links.p_nom_extendable] - n.links.loc[links_dc_i, 'p_nom'] = \ - n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.) - n.links.loc[links_dc_i, 'p_nom_extendable'] = False + n.links.loc[links_dc_i, "p_nom"] = n_optim.links["p_nom_opt"].reindex( + links_dc_i, fill_value=0.0 + ) + n.links.loc[links_dc_i, "p_nom_extendable"] = False gen_extend_i = n.generators.index[n.generators.p_nom_extendable] - n.generators.loc[gen_extend_i, 'p_nom'] = \ - n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.) - n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False + n.generators.loc[gen_extend_i, "p_nom"] = n_optim.generators["p_nom_opt"].reindex( + gen_extend_i, fill_value=0.0 + ) + n.generators.loc[gen_extend_i, "p_nom_extendable"] = False stor_units_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable] - n.storage_units.loc[stor_units_extend_i, 'p_nom'] = \ - n_optim.storage_units['p_nom_opt'].reindex(stor_units_extend_i, fill_value=0.) - n.storage_units.loc[stor_units_extend_i, 'p_nom_extendable'] = False + n.storage_units.loc[stor_units_extend_i, "p_nom"] = n_optim.storage_units[ + "p_nom_opt" + ].reindex(stor_units_extend_i, fill_value=0.0) + n.storage_units.loc[stor_units_extend_i, "p_nom_extendable"] = False stor_extend_i = n.stores.index[n.stores.e_nom_extendable] - n.stores.loc[stor_extend_i, 'e_nom'] = \ - n_optim.stores['e_nom_opt'].reindex(stor_extend_i, fill_value=0.) - n.stores.loc[stor_extend_i, 'e_nom_extendable'] = False + n.stores.loc[stor_extend_i, "e_nom"] = n_optim.stores["e_nom_opt"].reindex( + stor_extend_i, fill_value=0.0 + ) + n.stores.loc[stor_extend_i, "e_nom_extendable"] = False return n + if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('solve_operations_network', network='elec', - simpl='', clusters='5', ll='copt', opts='Co2L-BAU-24H') + + snakemake = mock_snakemake( + "solve_operations_network", + simpl="", + clusters="5", + ll="copt", + opts="Co2L-BAU-24H", + ) configure_logging(snakemake) - tmpdir = snakemake.config['solving'].get('tmpdir') + tmpdir = snakemake.config["solving"].get("tmpdir") if tmpdir is not None: Path(tmpdir).mkdir(parents=True, exist_ok=True) @@ -109,14 +125,20 @@ if __name__ == "__main__": n = set_parameters_from_optimized(n, n_optim) del n_optim - opts = snakemake.wildcards.opts.split('-') - snakemake.config['solving']['options']['skip_iterations'] = False + opts = snakemake.wildcards.opts.split("-") + snakemake.config["solving"]["options"]["skip_iterations"] = False - fn = getattr(snakemake.log, 'memory', None) - with memory_logger(filename=fn, interval=30.) as mem: - n = prepare_network(n, snakemake.config['solving']['options']) - n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir, - solver_logfile=snakemake.log.solver) + fn = getattr(snakemake.log, "memory", None) + with memory_logger(filename=fn, interval=30.0) as mem: + n = prepare_network(n, snakemake.config["solving"]["options"]) + n = solve_network( + n, + snakemake.config, + opts, + solver_dir=tmpdir, + solver_logfile=snakemake.log.solver, + ) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) logger.info("Maximum memory usage: {}".format(mem.mem_usage)) diff --git a/test/config.test1.yaml b/test/config.test1.yaml index 2986037b..7c9b0896 100755 --- a/test/config.test1.yaml +++ b/test/config.test1.yaml @@ -1,14 +1,15 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 -version: 0.4.0 +version: 0.6.1 tutorial: true logging: level: INFO format: '%(levelname)s:%(name)s:%(message)s' -summary_dir: results +run: + name: "" scenario: simpl: [''] @@ -16,11 +17,7 @@ scenario: clusters: [5] opts: [Co2L-24H] -countries: ['DE'] - -clustering: - simplify: - to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections) +countries: ['BE'] snapshots: start: "2013-03-01" @@ -30,6 +27,7 @@ snapshots: enable: prepare_links_p_nom: false retrieve_databundle: true + retrieve_cost_data: true build_cutout: false retrieve_cutout: true build_natura_raster: false @@ -56,8 +54,9 @@ electricity: atlite: nprocesses: 4 + show_progress: false # false saves time cutouts: - europe-2013-era5-tutorial: + be-03-2013-era5: module: era5 x: [4., 15.] y: [46., 56.] @@ -65,7 +64,7 @@ atlite: renewable: onwind: - cutout: europe-2013-era5-tutorial + cutout: be-03-2013-era5 resource: method: wind turbine: Vestas_V112_3MW @@ -74,15 +73,15 @@ renewable: corine: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # development of the REMix model and application for Europe. ( p.42 / p.28) - grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 31, 32] + grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] distance: 1000 distance_grid_codes: [1, 2, 3, 4, 5, 6] natura: true + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-ac: - cutout: europe-2013-era5-tutorial + cutout: be-03-2013-era5 resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore @@ -90,11 +89,13 @@ renewable: # correction_factor: 0.93 corine: [44, 255] natura: true + ship_threshold: 400 max_shore_distance: 30000 + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-dc: - cutout: europe-2013-era5-tutorial + cutout: be-03-2013-era5 resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore @@ -103,11 +104,13 @@ renewable: # correction_factor: 0.93 corine: [44, 255] natura: true + ship_threshold: 400 min_shore_distance: 30000 + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 solar: - cutout: europe-2013-era5-tutorial + cutout: be-03-2013-era5 resource: method: pv panel: CSi @@ -121,9 +124,9 @@ renewable: # sector: The economic potential of photovoltaics and concentrating solar # power." Applied Energy 135 (2014): 704-720. correction_factor: 0.854337 - corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] + corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] natura: true + excluder_resolution: 200 potential: simple # or conservative clip_p_max_pu: 1.e-2 @@ -149,7 +152,7 @@ transformers: type: '' load: - power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data interpolate_limit: 3 # data gaps up until this size are interpolated linearly time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from manual_adjustments: true # false @@ -157,8 +160,17 @@ load: costs: year: 2030 - discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016 - USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html + version: v0.4.0 + rooftop_share: 0.14 + fill_values: + FOM: 0 + VOM: 0 + efficiency: 1 + fuel: 0 + investment: 0 + lifetime: 25 + "CO2 intensity": 0 + "discount rate": 0.07 marginal_cost: solar: 0.01 onwind: 0.015 @@ -168,6 +180,26 @@ costs: emission_prices: # only used with the option Ep co2: 0. +clustering: + simplify_network: + to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections) + algorithm: kmeans # choose from: [hac, kmeans] + feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc. + cluster_network: + algorithm: kmeans + feature: solar+onwind-time + exclude_carriers: ["OCGT", "offwind-ac", "coal"] + aggregation_strategies: + generators: + p_nom_max: sum # use "min" for more conservative assumptions + p_nom_min: sum + p_min_pu: mean + marginal_cost: mean + committable: any + ramp_limit_up: max + ramp_limit_down: max + efficiency: mean + solving: options: formulation: kirchhoff @@ -200,7 +232,7 @@ solving: plotting: map: figsize: [7, 7] - boundaries: [-10.2, 29, 35, 72] + boundaries: [-10.2, 29, 35, 72] p_nom: bus_size_factor: 5.e+4 linewidth_factor: 3.e+3 @@ -219,50 +251,50 @@ plotting: AC_carriers: ["AC line", "AC transformer"] link_carriers: ["DC line", "Converter AC-DC"] tech_colors: - "onwind" : "#235ebc" - "onshore wind" : "#235ebc" - 'offwind' : "#6895dd" - 'offwind-ac' : "#6895dd" - 'offshore wind' : "#6895dd" - 'offshore wind ac' : "#6895dd" - 'offwind-dc' : "#74c6f2" - 'offshore wind dc' : "#74c6f2" - "hydro" : "#08ad97" - "hydro+PHS" : "#08ad97" - "PHS" : "#08ad97" - "hydro reservoir" : "#08ad97" - 'hydroelectricity' : '#08ad97' - "ror" : "#4adbc8" - "run of river" : "#4adbc8" - 'solar' : "#f9d002" - 'solar PV' : "#f9d002" - 'solar thermal' : '#ffef60' - 'biomass' : '#0c6013' - 'solid biomass' : '#06540d' - 'biogas' : '#23932d' - 'waste' : '#68896b' - 'geothermal' : '#ba91b1' - "OCGT" : "#d35050" - "gas" : "#d35050" - "natural gas" : "#d35050" - "CCGT" : "#b20101" - "nuclear" : "#ff9000" - "coal" : "#707070" - "lignite" : "#9e5a01" - "oil" : "#262626" - "H2" : "#ea048a" - "hydrogen storage" : "#ea048a" - "battery" : "#b8ea04" - "Electric load" : "#f9d002" - "electricity" : "#f9d002" - "lines" : "#70af1d" - "transmission lines" : "#70af1d" - "AC-AC" : "#70af1d" - "AC line" : "#70af1d" - "links" : "#8a1caf" - "HVDC links" : "#8a1caf" - "DC-DC" : "#8a1caf" - "DC link" : "#8a1caf" + "onwind": "#235ebc" + "onshore wind": "#235ebc" + 'offwind': "#6895dd" + 'offwind-ac': "#6895dd" + 'offshore wind': "#6895dd" + 'offshore wind ac': "#6895dd" + 'offwind-dc': "#74c6f2" + 'offshore wind dc': "#74c6f2" + "hydro": "#08ad97" + "hydro+PHS": "#08ad97" + "PHS": "#08ad97" + "hydro reservoir": "#08ad97" + 'hydroelectricity': '#08ad97' + "ror": "#4adbc8" + "run of river": "#4adbc8" + 'solar': "#f9d002" + 'solar PV': "#f9d002" + 'solar thermal': '#ffef60' + 'biomass': '#0c6013' + 'solid biomass': '#06540d' + 'biogas': '#23932d' + 'waste': '#68896b' + 'geothermal': '#ba91b1' + "OCGT": "#d35050" + "gas": "#d35050" + "natural gas": "#d35050" + "CCGT": "#b20101" + "nuclear": "#ff9000" + "coal": "#707070" + "lignite": "#9e5a01" + "oil": "#262626" + "H2": "#ea048a" + "hydrogen storage": "#ea048a" + "battery": "#b8ea04" + "Electric load": "#f9d002" + "electricity": "#f9d002" + "lines": "#70af1d" + "transmission lines": "#70af1d" + "AC-AC": "#70af1d" + "AC line": "#70af1d" + "links": "#8a1caf" + "HVDC links": "#8a1caf" + "DC-DC": "#8a1caf" + "DC link": "#8a1caf" nice_names: OCGT: "Open-Cycle Gas" CCGT: "Combined-Cycle Gas"