[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2023-03-06 08:27:45 +00:00
parent 8d92461c65
commit 13769f90af
57 changed files with 4679 additions and 3223 deletions

View File

@ -16,7 +16,7 @@ on:
branches: branches:
- master - master
schedule: schedule:
- cron: "0 5 * * TUE" - cron: "0 5 * * TUE"
env: env:
CONDA_CACHE_NUMBER: 1 # Change this value to manually reset the environment cache CONDA_CACHE_NUMBER: 1 # Change this value to manually reset the environment cache
@ -29,9 +29,9 @@ jobs:
matrix: matrix:
include: include:
# Matrix required to handle caching with Mambaforge # Matrix required to handle caching with Mambaforge
- os: ubuntu-latest - os: ubuntu-latest
label: ubuntu-latest label: ubuntu-latest
prefix: /usr/share/miniconda3/envs/pypsa-eur prefix: /usr/share/miniconda3/envs/pypsa-eur
# - os: macos-latest # - os: macos-latest
# label: macos-latest # label: macos-latest
@ -44,63 +44,63 @@ jobs:
name: ${{ matrix.label }} name: ${{ matrix.label }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
defaults: defaults:
run: run:
shell: bash -l {0} shell: bash -l {0}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Clone pypsa-eur subworkflow - name: Clone pypsa-eur subworkflow
run: | run: |
git clone https://github.com/pypsa/pypsa-eur ../pypsa-eur git clone https://github.com/pypsa/pypsa-eur ../pypsa-eur
cp ../pypsa-eur/test/config.test1.yaml ../pypsa-eur/config.yaml cp ../pypsa-eur/test/config.test1.yaml ../pypsa-eur/config.yaml
- name: Setup secrets
run: |
echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
- name: Add solver to environment - name: Setup secrets
run: | run: |
echo -e "- coincbc\n- ipopt<3.13.3" >> ../pypsa-eur/envs/environment.yaml echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
- name: Setup Mambaforge - name: Add solver to environment
uses: conda-incubator/setup-miniconda@v2 run: |
with: echo -e "- coincbc\n- ipopt<3.13.3" >> ../pypsa-eur/envs/environment.yaml
miniforge-variant: Mambaforge
miniforge-version: latest
activate-environment: pypsa-eur
use-mamba: true
- name: Set cache dates
run: |
echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV
echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV
- name: Cache data and cutouts folders - name: Setup Mambaforge
uses: actions/cache@v3 uses: conda-incubator/setup-miniconda@v2
with: with:
path: | miniforge-variant: Mambaforge
data miniforge-version: latest
../pypsa-eur/cutouts activate-environment: pypsa-eur
../pypsa-eur/data use-mamba: true
key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }}
- name: Create environment cache - name: Set cache dates
uses: actions/cache@v2 run: |
id: cache echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV
with: echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV
path: ${{ matrix.prefix }}
key: ${{ matrix.label }}-conda-${{ env.DATE }}-${{ env.CONDA_CACHE_NUMBER }}
- name: Update environment due to outdated or unavailable cache - name: Cache data and cutouts folders
run: mamba env update -n pypsa-eur -f ../pypsa-eur/envs/environment.yaml uses: actions/cache@v3
if: steps.cache.outputs.cache-hit != 'true' with:
path: |
data
../pypsa-eur/cutouts
../pypsa-eur/data
key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }}
- name: Test snakemake workflow - name: Create environment cache
run: | uses: actions/cache@v2
conda activate pypsa-eur id: cache
conda list with:
snakemake -call --configfile test/config.overnight.yaml path: ${{ matrix.prefix }}
snakemake -call --configfile test/config.myopic.yaml key: ${{ matrix.label }}-conda-${{ env.DATE }}-${{ env.CONDA_CACHE_NUMBER }}
- name: Update environment due to outdated or unavailable cache
run: mamba env update -n pypsa-eur -f ../pypsa-eur/envs/environment.yaml
if: steps.cache.outputs.cache-hit != 'true'
- name: Test snakemake workflow
run: |
conda activate pypsa-eur
conda list
snakemake -call --configfile test/config.overnight.yaml
snakemake -call --configfile test/config.myopic.yaml

2
.gitignore vendored
View File

@ -58,4 +58,4 @@ doc/_build
*.ipynb *.ipynb
data/costs_* data/costs_*

View File

@ -11,4 +11,4 @@ __pycache__
data data
notebooks notebooks
benchmarks benchmarks
*.nc *.nc

View File

@ -11,4 +11,4 @@ __pycache__
notebooks notebooks
benchmarks benchmarks
resources resources
results results

View File

@ -17,4 +17,4 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -13,17 +13,17 @@ foresight: overnight # options are overnight, myopic, perfect (perfect is not ye
scenario: scenario:
simpl: # only relevant for PyPSA-Eur simpl: # only relevant for PyPSA-Eur
- '' - ''
lv: # allowed transmission line volume expansion, can be any float >= 1.0 (today) or "opt" lv: # allowed transmission line volume expansion, can be any float >= 1.0 (today) or "opt"
- 1.0 - 1.0
- 1.5 - 1.5
clusters: # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred clusters: # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred
- 45 - 45
- 50 - 50
opts: # only relevant for PyPSA-Eur opts: # only relevant for PyPSA-Eur
- '' - ''
sector_opts: # this is where the main scenario settings are sector_opts: # this is where the main scenario settings are
- Co2L0-3H-T-H-B-I-A-solar+p3-dist1 - Co2L0-3H-T-H-B-I-A-solar+p3-dist1
# to really understand the options here, look in scripts/prepare_sector_network.py # to really understand the options here, look in scripts/prepare_sector_network.py
# Co2Lx specifies the CO2 target in x% of the 1990 values; default will give default (5%); # Co2Lx specifies the CO2 target in x% of the 1990 values; default will give default (5%);
# Co2L0p25 will give 25% CO2 emissions; Co2Lm0p05 will give 5% negative emissions # Co2L0p25 will give 25% CO2 emissions; Co2Lm0p05 will give 5% negative emissions
@ -41,7 +41,7 @@ scenario:
# cb40ex0 distributes a carbon budget of 40 GtCO2 following an exponential # cb40ex0 distributes a carbon budget of 40 GtCO2 following an exponential
# decay with initial growth rate 0 # decay with initial growth rate 0
planning_horizons: # investment years for myopic and perfect; for overnight, year of cost assumptions can be different and is defined under 'costs' planning_horizons: # investment years for myopic and perfect; for overnight, year of cost assumptions can be different and is defined under 'costs'
- 2050 - 2050
# for example, set to # for example, set to
# - 2020 # - 2020
# - 2030 # - 2030
@ -84,18 +84,18 @@ electricity:
# in PyPSA-Eur-Sec # in PyPSA-Eur-Sec
pypsa_eur: pypsa_eur:
Bus: Bus:
- AC - AC
Link: Link:
- DC - DC
Generator: Generator:
- onwind - onwind
- offwind-ac - offwind-ac
- offwind-dc - offwind-dc
- solar - solar
- ror - ror
StorageUnit: StorageUnit:
- PHS - PHS
- hydro - hydro
Store: [] Store: []
@ -110,25 +110,25 @@ biomass:
scenario: ENS_Med scenario: ENS_Med
classes: classes:
solid biomass: solid biomass:
- Agricultural waste - Agricultural waste
- Fuelwood residues - Fuelwood residues
- Secondary Forestry residues - woodchips - Secondary Forestry residues - woodchips
- Sawdust - Sawdust
- Residues from landscape care - Residues from landscape care
- Municipal waste - Municipal waste
not included: not included:
- Sugar from sugar beet - Sugar from sugar beet
- Rape seed - Rape seed
- "Sunflower, soya seed " - "Sunflower, soya seed "
- Bioethanol barley, wheat, grain maize, oats, other cereals and rye - Bioethanol barley, wheat, grain maize, oats, other cereals and rye
- Miscanthus, switchgrass, RCG - Miscanthus, switchgrass, RCG
- Willow - Willow
- Poplar - Poplar
- FuelwoodRW - FuelwoodRW
- C&P_RW - C&P_RW
biogas: biogas:
- Manure solid, liquid - Manure solid, liquid
- Sludge - Sludge
solar_thermal: solar_thermal:
@ -143,10 +143,10 @@ existing_capacities:
grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020 grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020
threshold_capacity: 10 threshold_capacity: 10
conventional_carriers: conventional_carriers:
- lignite - lignite
- coal - coal
- oil - oil
- uranium - uranium
sector: sector:
@ -238,7 +238,7 @@ sector:
2040: 0.16 2040: 0.16
2045: 0.21 2045: 0.21
2050: 0.29 2050: 0.29
retrofitting : # co-optimises building renovation to reduce space heat demand retrofitting: # co-optimises building renovation to reduce space heat demand
retro_endogen: false # co-optimise space heat savings retro_endogen: false # co-optimise space heat savings
cost_factor: 1.0 # weight costs for building renovation cost_factor: 1.0 # weight costs for building renovation
interest_rate: 0.04 # for investment in building components interest_rate: 0.04 # for investment in building components
@ -279,7 +279,7 @@ sector:
hydrogen_underground_storage: true hydrogen_underground_storage: true
hydrogen_underground_storage_locations: hydrogen_underground_storage_locations:
# - onshore # more than 50 km from sea # - onshore # more than 50 km from sea
- nearshore # within 50 km of sea - nearshore # within 50 km of sea
# - offshore # - offshore
ammonia: false # can be false (no NH3 carrier), true (copperplated NH3), "regional" (regionalised NH3 without network) ammonia: false # can be false (no NH3 carrier), true (copperplated NH3), "regional" (regionalised NH3 without network)
min_part_load_fischer_tropsch: 0.9 # p_min_pu min_part_load_fischer_tropsch: 0.9 # p_min_pu
@ -401,14 +401,14 @@ solving:
min_iterations: 4 min_iterations: 4
max_iterations: 6 max_iterations: 6
keep_shadowprices: keep_shadowprices:
- Bus - Bus
- Line - Line
- Link - Link
- Transformer - Transformer
- GlobalConstraint - GlobalConstraint
- Generator - Generator
- Store - Store
- StorageUnit - StorageUnit
solver: solver:
name: gurobi name: gurobi
@ -486,47 +486,47 @@ plotting:
energy_min: -20000 energy_min: -20000
energy_threshold: 50 energy_threshold: 50
vre_techs: vre_techs:
- onwind - onwind
- offwind-ac - offwind-ac
- offwind-dc - offwind-dc
- solar - solar
- ror - ror
renewable_storage_techs: renewable_storage_techs:
- PHS - PHS
- hydro - hydro
conv_techs: conv_techs:
- OCGT - OCGT
- CCGT - CCGT
- Nuclear - Nuclear
- Coal - Coal
storage_techs: storage_techs:
- hydro+PHS - hydro+PHS
- battery - battery
- H2 - H2
load_carriers: load_carriers:
- AC load - AC load
AC_carriers: AC_carriers:
- AC line - AC line
- AC transformer - AC transformer
link_carriers: link_carriers:
- DC line - DC line
- Converter AC-DC - Converter AC-DC
heat_links: heat_links:
- heat pump - heat pump
- resistive heater - resistive heater
- CHP heat - CHP heat
- CHP electric - CHP electric
- gas boiler - gas boiler
- central heat pump - central heat pump
- central resistive heater - central resistive heater
- central CHP heat - central CHP heat
- central CHP electric - central CHP electric
- central gas boiler - central gas boiler
heat_generators: heat_generators:
- gas boiler - gas boiler
- central gas boiler - central gas boiler
- solar thermal collector - solar thermal collector
- central solar thermal collector - central solar thermal collector
tech_colors: tech_colors:
# wind # wind
onwind: "#235ebc" onwind: "#235ebc"

View File

@ -1,3 +1,3 @@
attribute,type,unit,default,description,status attribute,type,unit,default,description,status
location,string,n/a,n/a,Reference to original electricity bus,Input (optional) location,string,n/a,n/a,Reference to original electricity bus,Input (optional)
unit,string,n/a,MWh,Unit of the bus (descriptive only), Input (optional) unit,string,n/a,MWh,Unit of the bus (descriptive only), Input (optional)

1 attribute type unit default description status
2 location string n/a n/a Reference to original electricity bus Input (optional)
3 unit string n/a MWh Unit of the bus (descriptive only) Input (optional)

View File

@ -1,2 +1,2 @@
attribute,type,unit,default,description,status attribute,type,unit,default,description,status
carrier,string,n/a,n/a,carrier,Input (optional) carrier,string,n/a,n/a,carrier,Input (optional)

1 attribute type unit default description status
2 carrier string n/a n/a carrier Input (optional)

View File

@ -12,19 +12,19 @@
# All configuration values have a default; values that are commented out # All configuration values have a default; values that are commented out
# serve to show the default. # serve to show the default.
import sys
import os import os
import shlex import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory, # If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../scripts')) sys.path.insert(0, os.path.abspath("../scripts"))
# -- General configuration ------------------------------------------------ # -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here. # If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0' # needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be # Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@ -32,48 +32,48 @@ sys.path.insert(0, os.path.abspath('../scripts'))
extensions = [ extensions = [
#'sphinx.ext.autodoc', #'sphinx.ext.autodoc',
#'sphinx.ext.autosummary', #'sphinx.ext.autosummary',
'sphinx.ext.autosectionlabel', "sphinx.ext.autosectionlabel",
'sphinx.ext.intersphinx', "sphinx.ext.intersphinx",
'sphinx.ext.todo', "sphinx.ext.todo",
'sphinx.ext.mathjax', "sphinx.ext.mathjax",
'sphinx.ext.napoleon', "sphinx.ext.napoleon",
'sphinx.ext.graphviz', "sphinx.ext.graphviz",
#'sphinx.ext.pngmath', #'sphinx.ext.pngmath',
#'sphinxcontrib.tikz', #'sphinxcontrib.tikz',
#'rinoh.frontend.sphinx', #'rinoh.frontend.sphinx',
'sphinx.ext.imgconverter', # for SVG conversion "sphinx.ext.imgconverter", # for SVG conversion
] ]
autodoc_default_flags = ['members'] autodoc_default_flags = ["members"]
autosummary_generate = True autosummary_generate = True
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates'] templates_path = ["_templates"]
# The suffix(es) of source filenames. # The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string: # You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md'] # source_suffix = ['.rst', '.md']
source_suffix = '.rst' source_suffix = ".rst"
# The encoding of source files. # The encoding of source files.
#source_encoding = 'utf-8-sig' # source_encoding = 'utf-8-sig'
# The master toctree document. # The master toctree document.
master_doc = 'index' master_doc = "index"
# General information about the project. # General information about the project.
project = u'PyPSA-Eur-Sec' project = "PyPSA-Eur-Sec"
copyright = u'2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)' copyright = "2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)"
author = u'2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)' author = "2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)"
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = u'0.7' version = "0.7"
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = u'0.7.0' release = "0.7.0"
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.
@ -84,37 +84,37 @@ language = None
# There are two options for replacing |today|: either, you set today to some # There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: # non-false value, then it is used:
#today = '' # today = ''
# Else, today_fmt is used as the format for a strftime call. # Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y' # today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
exclude_patterns = ['_build'] exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all # The reST default role (used for this markup: `text`) to use for all
# documents. # documents.
#default_role = None # default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text. # If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True # add_function_parentheses = True
# If true, the current module name will be prepended to all description # If true, the current module name will be prepended to all description
# unit titles (such as .. function::). # unit titles (such as .. function::).
#add_module_names = True # add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the # If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default. # output. They are ignored by default.
#show_authors = False # show_authors = False
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx' pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting. # A list of ignored prefixes for module index sorting.
#modindex_common_prefix = [] # modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents. # If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False # keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing. # If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True todo_include_todos = True
@ -124,174 +124,177 @@ todo_include_todos = True
# The theme to use for HTML and HTML Help pages. See the documentation for # The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. # a list of builtin themes.
html_theme = 'sphinx_rtd_theme' html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme # Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the # further. For a list of options available for each theme, see the
# documentation. # documentation.
html_theme_options = { html_theme_options = {
'display_version': True, "display_version": True,
'sticky_navigation': True, "sticky_navigation": True,
} }
# Add any paths that contain custom themes here, relative to this directory. # Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [] # html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to # The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation". # "<project> v<release> documentation".
#html_title = None # html_title = None
# A shorter title for the navigation bar. Default is the same as html_title. # A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None # html_short_title = None
# The name of an image file (relative to this directory) to place at the top # The name of an image file (relative to this directory) to place at the top
# of the sidebar. # of the sidebar.
#html_logo = None # html_logo = None
# The name of an image file (within the static path) to use as favicon of the # The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large. # pixels large.
#html_favicon = None # html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here, # Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files, # relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] html_static_path = ["_static"]
html_context = { html_context = {
'css_files': [ "css_files": [
'_static/theme_overrides.css', # override wide tables in RTD theme "_static/theme_overrides.css", # override wide tables in RTD theme
], ],
} }
# Add any extra paths that contain custom files (such as robots.txt or # Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied # .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation. # directly to the root of the documentation.
#html_extra_path = [] # html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format. # using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y' # html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to # If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities. # typographically correct entities.
#html_use_smartypants = True # html_use_smartypants = True
# Custom sidebar templates, maps document names to template names. # Custom sidebar templates, maps document names to template names.
#html_sidebars = {} # html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to # Additional templates that should be rendered to pages, maps page names to
# template names. # template names.
#html_additional_pages = {} # html_additional_pages = {}
# If false, no module index is generated. # If false, no module index is generated.
#html_domain_indices = True # html_domain_indices = True
# If false, no index is generated. # If false, no index is generated.
#html_use_index = True # html_use_index = True
# If true, the index is split into individual pages for each letter. # If true, the index is split into individual pages for each letter.
#html_split_index = False # html_split_index = False
# If true, links to the reST sources are added to the pages. # If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True # html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True # html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True # html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will # If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the # contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served. # base URL from which the finished HTML is served.
#html_use_opensearch = '' # html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml"). # This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None # html_file_suffix = None
# Language to be used for generating the HTML full-text search index. # Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages: # Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en' # html_search_language = 'en'
# A dictionary with options for the search language support, empty by default. # A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value # Now only 'ja' uses this config value
#html_search_options = {'type': 'default'} # html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that # The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used. # implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js' # html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = 'PyPSAEurSecdoc' htmlhelp_basename = "PyPSAEurSecdoc"
# -- Options for LaTeX output --------------------------------------------- # -- Options for LaTeX output ---------------------------------------------
latex_elements = { latex_elements = {
# The paper size ('letterpaper' or 'a4paper'). # The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper', #'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt',
#'pointsize': '10pt', # Additional stuff for the LaTeX preamble.
#'preamble': '',
# Additional stuff for the LaTeX preamble. # Latex figure (float) alignment
#'preamble': '', #'figure_align': 'htbp',
# Latex figure (float) alignment
#'figure_align': 'htbp',
} }
# Grouping the document tree into LaTeX files. List of tuples # Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, # (source start file, target name, title,
# author, documentclass [howto, manual, or own class]). # author, documentclass [howto, manual, or own class]).
latex_documents = [ latex_documents = [
(master_doc, 'PyPSA-Eur-Sec.tex', u'PyPSA-Eur-Sec Documentation', (
u'author', 'manual'), master_doc,
"PyPSA-Eur-Sec.tex",
"PyPSA-Eur-Sec Documentation",
"author",
"manual",
),
] ]
#Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html # Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html
rinoh_documents = [(master_doc, # top-level file (index.rst) rinoh_documents = [
'PyPSA-Eur-Sec', # output (target.pdf) (
'PyPSA-Eur-Sec Documentation', # document title master_doc, # top-level file (index.rst)
'author')] # document author "PyPSA-Eur-Sec", # output (target.pdf)
"PyPSA-Eur-Sec Documentation", # document title
"author",
)
] # document author
# The name of an image file (relative to this directory) to place at the top of # The name of an image file (relative to this directory) to place at the top of
# the title page. # the title page.
#latex_logo = None # latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts, # For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters. # not chapters.
#latex_use_parts = False # latex_use_parts = False
# If true, show page references after internal links. # If true, show page references after internal links.
#latex_show_pagerefs = False # latex_show_pagerefs = False
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
#latex_show_urls = False # latex_show_urls = False
# Documents to append as an appendix to all manuals. # Documents to append as an appendix to all manuals.
#latex_appendices = [] # latex_appendices = []
# If false, no module index is generated. # If false, no module index is generated.
#latex_domain_indices = True # latex_domain_indices = True
# -- Options for manual page output --------------------------------------- # -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
man_pages = [ man_pages = [(master_doc, "pypsa-eur-sec", "PyPSA-Eur-Sec Documentation", [author], 1)]
(master_doc, 'pypsa-eur-sec', u'PyPSA-Eur-Sec Documentation',
[author], 1)
]
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
#man_show_urls = False # man_show_urls = False
# -- Options for Texinfo output ------------------------------------------- # -- Options for Texinfo output -------------------------------------------
@ -300,23 +303,29 @@ man_pages = [
# (source start file, target name, title, author, # (source start file, target name, title, author,
# dir menu entry, description, category) # dir menu entry, description, category)
texinfo_documents = [ texinfo_documents = [
(master_doc, 'PyPSA-Eur-Sec', u'PyPSA-Eur-Sec Documentation', (
author, 'PyPSA-Eur-Sec', 'One line description of project.', master_doc,
'Miscellaneous'), "PyPSA-Eur-Sec",
"PyPSA-Eur-Sec Documentation",
author,
"PyPSA-Eur-Sec",
"One line description of project.",
"Miscellaneous",
),
] ]
# Documents to append as an appendix to all manuals. # Documents to append as an appendix to all manuals.
#texinfo_appendices = [] # texinfo_appendices = []
# If false, no module index is generated. # If false, no module index is generated.
#texinfo_domain_indices = True # texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'. # How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote' # texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu. # If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False # texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library. # Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None} intersphinx_mapping = {"https://docs.python.org/": None}

View File

@ -27,4 +27,3 @@ Building topologies and corresponding standard values,tabula-calculator-calcsetb
Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unknown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/ Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unknown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/
District heating most countries,jrc-idees-2015/,CC BY 4.0,https://ec.europa.eu/jrc/en/potencia/jrc-idees,, District heating most countries,jrc-idees-2015/,CC BY 4.0,https://ec.europa.eu/jrc/en/potencia/jrc-idees,,
District heating missing countries,district_heat_share.csv,unknown,https://www.euroheat.org/knowledge-hub/country-profiles,, District heating missing countries,district_heat_share.csv,unknown,https://www.euroheat.org/knowledge-hub/country-profiles,,

Can't render this file because it has a wrong number of fields in line 28.

View File

@ -21,9 +21,9 @@ transmission network level that covers the full ENTSO-E area.
PyPSA-Eur-Sec builds on the electricity generation and transmission PyPSA-Eur-Sec builds on the electricity generation and transmission
model `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>`_ to add demand model `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>`_ to add demand
and supply for the following sectors: transport, space and water and supply for the following sectors: transport, space and water
heating, biomass, energy consumption in the agriculture, industry heating, biomass, energy consumption in the agriculture, industry
and industrial feedstocks, carbon management, carbon capture and usage/sequestration. and industrial feedstocks, carbon management, carbon capture and usage/sequestration.
This completes the energy system and includes all greenhouse gas emitters except waste management, agriculture, This completes the energy system and includes all greenhouse gas emitters except waste management, agriculture,
forestry and land use. forestry and land use.
@ -37,7 +37,7 @@ patchy.
We cannot support this model if you choose to use it. We cannot support this model if you choose to use it.
.. note:: .. note::
You can find showcases of the model's capabilities in the Supplementary Materials of the You can find showcases of the model's capabilities in the Supplementary Materials of the
preprint `Benefits of a Hydrogen Network in Europe preprint `Benefits of a Hydrogen Network in Europe
<https://arxiv.org/abs/2207.05816>`_, the Supplementary Materials of the `paper in Joule with a <https://arxiv.org/abs/2207.05816>`_, the Supplementary Materials of the `paper in Joule with a
description of the industry sector description of the industry sector

View File

@ -82,7 +82,7 @@ The data licences and sources are given in the following table.
Set up the default configuration Set up the default configuration
================================ ================================
First make your own copy of the ``config.yaml`` based on First make your own copy of the ``config.yaml`` based on
``config.default.yaml``. For example: ``config.default.yaml``. For example:
.. code:: bash .. code:: bash

View File

@ -36,7 +36,7 @@ See also the `GitHub repository issues <https://github.com/PyPSA/pypsa-eur-sec/i
industry away from fossil fuels is determined exogenously. industry away from fossil fuels is determined exogenously.
- **Industry materials production constant and inelastic:** - **Industry materials production constant and inelastic:**
For industry, the production of different materials per country is For industry, the production of different materials per country is
assumed to remain constant and no industry demand elasticity is included in the modelled. assumed to remain constant and no industry demand elasticity is included in the modelled.
- **Energy demand distribution within countries:** - **Energy demand distribution within countries:**

View File

@ -28,13 +28,13 @@ To activate the myopic option select ``foresight: 'myopic'`` in ``config.yaml``.
The {planning_horizons} wildcard indicates the year in which the network is optimized. For a myopic optimization, this is equivalent to the investment year. To set the investment years which are sequentially simulated for the myopic investment planning, select for example: The {planning_horizons} wildcard indicates the year in which the network is optimized. For a myopic optimization, this is equivalent to the investment year. To set the investment years which are sequentially simulated for the myopic investment planning, select for example:
planning_horizons: planning_horizons:
\- 2020 \- 2020
\- 2030 \- 2030
\- 2040 \- 2040
\- 2050 \- 2050
in ``config.yaml``. in ``config.yaml``.
@ -42,7 +42,7 @@ in ``config.yaml``.
**existing capacities** **existing capacities**
Grouping years indicates the bins limits for grouping the existing capacities of different technologies. Note that separate bins are defined for the power and heating plants due to different data sources. Grouping years indicates the bins limits for grouping the existing capacities of different technologies. Note that separate bins are defined for the power and heating plants due to different data sources.
``grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]`` ``grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]``
@ -66,13 +66,13 @@ If for a technology, node, and grouping bin, the capacity is lower than threshol
Conventional carriers indicate carriers used in the existing conventional technologies. Conventional carriers indicate carriers used in the existing conventional technologies.
conventional_carriers: conventional_carriers:
\- lignite \- lignite
\- coal \- coal
\- oil \- oil
\- uranium \- uranium
@ -80,7 +80,7 @@ Conventional carriers indicate carriers used in the existing conventional techno
Options Options
============= =============
The total carbon budget for the entire transition path can be indicated in the `sector_opts <https://github.com/PyPSA/pypsa-eur-sec/blob/f13902510010b734c510c38c4cae99356f683058/config.default.yaml#L25>`_ in ``config.yaml``. The carbon budget can be split among the ``planning_horizons`` following an exponential or beta decay. The total carbon budget for the entire transition path can be indicated in the `sector_opts <https://github.com/PyPSA/pypsa-eur-sec/blob/f13902510010b734c510c38c4cae99356f683058/config.default.yaml#L25>`_ in ``config.yaml``. The carbon budget can be split among the ``planning_horizons`` following an exponential or beta decay.
E.g. ``'cb40ex0'`` splits a carbon budget equal to 40 Gt :math:`_{CO_2}` following an exponential decay whose initial linear growth rate r is zero. E.g. ``'cb40ex0'`` splits a carbon budget equal to 40 Gt :math:`_{CO_2}` following an exponential decay whose initial linear growth rate r is zero.
They can also follow some user-specified path, if defined `here <https://github.com/PyPSA/pypsa-eur-sec/blob/413254e241fb37f55b41caba7264644805ad8e97/config.default.yaml#L56>`_. They can also follow some user-specified path, if defined `here <https://github.com/PyPSA/pypsa-eur-sec/blob/413254e241fb37f55b41caba7264644805ad8e97/config.default.yaml#L56>`_.
The paper `Speed of technological transformations required in Europe to achieve different climate goals (2022) <https://doi.org/10.1016/j.joule.2022.04.016>`__ defines CO_2 budgets corresponding to global temperature increases (1.5C 2C) as response to the emissions. Here, global carbon budgets are converted to European budgets assuming equal-per capita distribution which translates into a 6.43% share for Europe. The carbon budgets are in this paper distributed throughout the transition paths assuming an exponential decay. Emissions e(t) in every year t are limited by The paper `Speed of technological transformations required in Europe to achieve different climate goals (2022) <https://doi.org/10.1016/j.joule.2022.04.016>`__ defines CO_2 budgets corresponding to global temperature increases (1.5C 2C) as response to the emissions. Here, global carbon budgets are converted to European budgets assuming equal-per capita distribution which translates into a 6.43% share for Europe. The carbon budgets are in this paper distributed throughout the transition paths assuming an exponential decay. Emissions e(t) in every year t are limited by
@ -99,7 +99,7 @@ General myopic code structure
The myopic code solves the network for the time steps included in ``planning_horizons`` in a recursive loop, so that: The myopic code solves the network for the time steps included in ``planning_horizons`` in a recursive loop, so that:
1. The existing capacities (those installed before the base year are added as fixed capacities with p_nom=value, p_nom_extendable=False). E.g. for baseyear=2020, capacities installed before 2020 are added. In addition, the network comprises additional generator, storage, and link capacities with p_nom_extendable=True. The non-solved network is saved in ``results/run_name/networks/prenetworks-brownfield``. 1. The existing capacities (those installed before the base year are added as fixed capacities with p_nom=value, p_nom_extendable=False). E.g. for baseyear=2020, capacities installed before 2020 are added. In addition, the network comprises additional generator, storage, and link capacities with p_nom_extendable=True. The non-solved network is saved in ``results/run_name/networks/prenetworks-brownfield``.
The base year is the first element in ``planning_horizons``. Step 1 is implemented with the rule add_baseyear for the base year and with the rule add_brownfield for the remaining planning_horizons. The base year is the first element in ``planning_horizons``. Step 1 is implemented with the rule add_baseyear for the base year and with the rule add_brownfield for the remaining planning_horizons.
2. The 2020 network is optimized. The solved network is saved in ``results/run_name/networks/postnetworks`` 2. The 2020 network is optimized. The solved network is saved in ``results/run_name/networks/postnetworks``

View File

@ -10,7 +10,7 @@ The total number of nodes for Europe is set in the ``config.yaml`` file under ``
Exemplary unsolved network clustered to 512 nodes: Exemplary unsolved network clustered to 512 nodes:
.. image:: ../graphics/elec_s_512.png .. image:: ../graphics/elec_s_512.png
Exemplary unsolved network clustered to 37 nodes: Exemplary unsolved network clustered to 37 nodes:
@ -38,12 +38,12 @@ Here are some examples of how spatial resolution is set for different sectors in
• Solid biomass: It can be modeled as a single node for Europe or it can be nodally resolved if activated in the `config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L270>`_. Nodal modeling includes modeling biomass potential per country (given per country, then distributed by population density within) and the transport of solid biomass between countries. • Solid biomass: It can be modeled as a single node for Europe or it can be nodally resolved if activated in the `config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L270>`_. Nodal modeling includes modeling biomass potential per country (given per country, then distributed by population density within) and the transport of solid biomass between countries.
• CO2: It can be modeled as a single node for Europe or it can be nodally resolved with CO2 transport pipelines if activated in the `config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L248>`_. It should mentioned that in single node mode a transport and storage cost is added for sequestered CO2, the cost of which can be adjusted in the `config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L247>`_. • CO2: It can be modeled as a single node for Europe or it can be nodally resolved with CO2 transport pipelines if activated in the `config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L248>`_. It should mentioned that in single node mode a transport and storage cost is added for sequestered CO2, the cost of which can be adjusted in the `config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L247>`_.
• Liquid hydrocarbons: Modeled as a single node for Europe, since transport costs for liquids are low and no bottlenecks are expected. • Liquid hydrocarbons: Modeled as a single node for Europe, since transport costs for liquids are low and no bottlenecks are expected.
**Electricity distribution network** **Electricity distribution network**
Contrary to the transmission grid, the grid topology at the distribution level (at and below 110 kV) is not included due to the very high computational burden. However, a link per node can be used (if activated in the `Config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L257>`_ file) to represent energy transferred between distribution and transmission levels at every node. In essence, the total energy capacity connecting the transmission grid and the low-voltage level is optimized. The cost assumptions for this link can be adjusted in Config file `options <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L258>`_ , and is currently assumed to be 500 Eur/kW. Contrary to the transmission grid, the grid topology at the distribution level (at and below 110 kV) is not included due to the very high computational burden. However, a link per node can be used (if activated in the `Config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L257>`_ file) to represent energy transferred between distribution and transmission levels at every node. In essence, the total energy capacity connecting the transmission grid and the low-voltage level is optimized. The cost assumptions for this link can be adjusted in Config file `options <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L258>`_ , and is currently assumed to be 500 Eur/kW.
Rooftop PV, heat pumps, resistive heater, home batteries chargers for passenger EVs, as well as individual heating technologies (heat pumps and resistive heaters) are connected to low-voltage level. All the remaining generation and storage technologies are connected to the transmission grid. In practice, this means that the distribution grid capacity is only extended if it is necessary to balance the mismatch between local generation and demand. Rooftop PV, heat pumps, resistive heater, home batteries chargers for passenger EVs, as well as individual heating technologies (heat pumps and resistive heaters) are connected to low-voltage level. All the remaining generation and storage technologies are connected to the transmission grid. In practice, this means that the distribution grid capacity is only extended if it is necessary to balance the mismatch between local generation and demand.

View File

@ -40,7 +40,7 @@ Heat demand
=========== ===========
Building heating in residential and services sectors is resolved regionally, both for individual buildings and district heating systems, which include different supply options (see :ref:`heat-supply`.) Building heating in residential and services sectors is resolved regionally, both for individual buildings and district heating systems, which include different supply options (see :ref:`heat-supply`.)
Annual heat demands per country are retrieved from `JRC-IDEES <https://op.europa.eu/en/publication-detail/-/publication/989282db-ad65-11e7-837e-01aa75ed71a1/language-en>`_ and split into space and water heating. For space heating, the annual demands are converted to daily values based on the population-weighted Heating Degree Day (HDD) using the `atlite tool <https://github.com/PyPSA/atlite>`_, where space heat demand is proportional to the difference between the daily average ambient temperature (read from `ERA5 <https://doi.org/10.1002/qj.3803>`_) and a threshold temperature above which space heat demand is zero. A threshold temperature of 15 °C is assumed by default. The daily space heat demand is distributed to the hours of the day following heat demand profiles from `BDEW <https://github.com/oemof/demandlib>`_. These differ for weekdays and weekends/holidays and between residential and services demand. Annual heat demands per country are retrieved from `JRC-IDEES <https://op.europa.eu/en/publication-detail/-/publication/989282db-ad65-11e7-837e-01aa75ed71a1/language-en>`_ and split into space and water heating. For space heating, the annual demands are converted to daily values based on the population-weighted Heating Degree Day (HDD) using the `atlite tool <https://github.com/PyPSA/atlite>`_, where space heat demand is proportional to the difference between the daily average ambient temperature (read from `ERA5 <https://doi.org/10.1002/qj.3803>`_) and a threshold temperature above which space heat demand is zero. A threshold temperature of 15 °C is assumed by default. The daily space heat demand is distributed to the hours of the day following heat demand profiles from `BDEW <https://github.com/oemof/demandlib>`_. These differ for weekdays and weekends/holidays and between residential and services demand.
*Space heating* *Space heating*
@ -62,7 +62,7 @@ Hot water demand is assumed to be constant throughout the year.
*Urban and rural heating* *Urban and rural heating*
For every country, heat demand is split between low and high population density areas. These country-level totals are then distributed to each region in proportion to their rural and urban populations respectively. Urban areas with dense heat demand can be supplied with large-scale district heating systems. The percentage of urban heat demand that can be supplied by district heating networks as well as lump-sum losses in district heating systems is exogenously determined in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L153>`_. For every country, heat demand is split between low and high population density areas. These country-level totals are then distributed to each region in proportion to their rural and urban populations respectively. Urban areas with dense heat demand can be supplied with large-scale district heating systems. The percentage of urban heat demand that can be supplied by district heating networks as well as lump-sum losses in district heating systems is exogenously determined in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L153>`_.
*Cooling demand* *Cooling demand*
@ -74,21 +74,21 @@ As below figure shows, the current total heat demand in Europe is similar to the
.. image:: ../graphics/Heat_and_el_demand_timeseries.png .. image:: ../graphics/Heat_and_el_demand_timeseries.png
In practice, in PyPSA-Eur-Sec, there are heat demand buses to which the corresponding heat demands are added. In practice, in PyPSA-Eur-Sec, there are heat demand buses to which the corresponding heat demands are added.
1) Urban central heat: large-scale district heating networks in urban areas with dense heat population. Residential and services demand in these areas are added as demands to this bus 1) Urban central heat: large-scale district heating networks in urban areas with dense heat population. Residential and services demand in these areas are added as demands to this bus
2) Residential urban decentral heat: heating for residential buildings in urban areas not using district heating 2) Residential urban decentral heat: heating for residential buildings in urban areas not using district heating
3) Services urban decentral heat: heating for services buildings in urban areas not using district heating 3) Services urban decentral heat: heating for services buildings in urban areas not using district heating
4) Residential rural heat: heating for residential buildings in rural areas with low population density. 4) Residential rural heat: heating for residential buildings in rural areas with low population density.
5) Services rural heat: heating for residential services buildings in rural areas with low population density. Heat demand from agriculture sector is also included here. 5) Services rural heat: heating for residential services buildings in rural areas with low population density. Heat demand from agriculture sector is also included here.
.. _heat-supply: .. _heat-supply:
Heat supply Heat supply
======================= =======================
Different supply options are available depending on whether demand is met centrally through district heating systems, or decentrally through appliances in individual buildings. Different supply options are available depending on whether demand is met centrally through district heating systems, or decentrally through appliances in individual buildings.
**Urban central heat** **Urban central heat**
@ -106,7 +106,7 @@ Below are more detailed explanations for each heating supply component, all of w
**Large-scale CHP** **Large-scale CHP**
Large Combined Heat and Power plants are included in the model if it is specified in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L235>`_. Large Combined Heat and Power plants are included in the model if it is specified in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L235>`_.
CHPs are based on back pressure plants operating with a fixed ratio of electricity to heat output. The efficiencies of each are given on the back pressure line, where the back pressure coefficient cb is the electricity output divided by the heat output. (For a more complete explanation of the operation of CHPs refer to the study by Dahl et al. : `Cost sensitivity of optimal sector-coupled district heating production systems <https://arxiv.org/pdf/1804.07557.pdf>`_. CHPs are based on back pressure plants operating with a fixed ratio of electricity to heat output. The efficiencies of each are given on the back pressure line, where the back pressure coefficient cb is the electricity output divided by the heat output. (For a more complete explanation of the operation of CHPs refer to the study by Dahl et al. : `Cost sensitivity of optimal sector-coupled district heating production systems <https://arxiv.org/pdf/1804.07557.pdf>`_.
PyPSA-Eur-Sec includes CHP plants fueled by methane and solid biomass from waste and residues. Hydrogen fuel cells also produce both electricity and heat. PyPSA-Eur-Sec includes CHP plants fueled by methane and solid biomass from waste and residues. Hydrogen fuel cells also produce both electricity and heat.
@ -129,37 +129,37 @@ The coefficient of performance (COP) of air- and ground-sourced heat pumps depen
For the sink water temperature Tsink we assume 55 °C [`Config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L207>`_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 <https://doi.org/10.1002/qj.3803>`_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. <https://pubs.rsc.org/en/content/articlelanding/2012/EE/c2ee22653g>`_. For air-sourced heat pumps (ASHP), we use the function: For the sink water temperature Tsink we assume 55 °C [`Config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L207>`_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 <https://doi.org/10.1002/qj.3803>`_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. <https://pubs.rsc.org/en/content/articlelanding/2012/EE/c2ee22653g>`_. For air-sourced heat pumps (ASHP), we use the function:
.. math:: .. math::
COP (\Delta T) = 6.81 + 0.121\Delta T + 0.000630\Delta T^2 COP (\Delta T) = 6.81 + 0.121\Delta T + 0.000630\Delta T^2
for ground-sourced heat pumps (GSHP), we use the function: for ground-sourced heat pumps (GSHP), we use the function:
.. math:: .. math::
COP(\Delta T) = 8.77 + 0.150\Delta T + 0.000734\Delta T^2 COP(\Delta T) = 8.77 + 0.150\Delta T + 0.000734\Delta T^2
**Resistive heaters** **Resistive heaters**
Can be activated in Config from the `boilers <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L232>`_ option. Can be activated in Config from the `boilers <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L232>`_ option.
Resistive heaters produce heat with a fixed conversion efficiency (refer to `Technology-data repository <https://github.com/PyPSA/technology-data>`_ ). Resistive heaters produce heat with a fixed conversion efficiency (refer to `Technology-data repository <https://github.com/PyPSA/technology-data>`_ ).
**Gas, oil, and biomass boilers** **Gas, oil, and biomass boilers**
Can be activated in Config from the `boilers <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L232>`_ , `oil boilers <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L233>`_ , and `biomass boiler <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L234>`_ option. Can be activated in Config from the `boilers <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L232>`_ , `oil boilers <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L233>`_ , and `biomass boiler <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L234>`_ option.
Similar to resistive heaters, boilers have a fixed efficiency and produce heat using gas, oil or biomass. Similar to resistive heaters, boilers have a fixed efficiency and produce heat using gas, oil or biomass.
**Solar thermal collectors** **Solar thermal collectors**
Can be activated in the config file from the `solar_thermal <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L237>`_ option. Can be activated in the config file from the `solar_thermal <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L237>`_ option.
Solar thermal profiles are built based on weather data and also have the `options <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L134>`_ for setting the sky model and the orientation of the panel in the config file, which are then used by the atlite tool to calculate the solar resource time series. Solar thermal profiles are built based on weather data and also have the `options <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L134>`_ for setting the sky model and the orientation of the panel in the config file, which are then used by the atlite tool to calculate the solar resource time series.
**Waste heat from Fuel Cells, Methanation and Fischer-Tropsch plants** **Waste heat from Fuel Cells, Methanation and Fischer-Tropsch plants**
Waste heat from `fuel cells <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L256>`_ in addition to processes like `Fischer-Tropsch <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L255>`_, methanation, and Direct Air Capture (DAC) is dumped into district heating networks. Waste heat from `fuel cells <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L256>`_ in addition to processes like `Fischer-Tropsch <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L255>`_, methanation, and Direct Air Capture (DAC) is dumped into district heating networks.
**Existing heating capacities and decommissioning** **Existing heating capacities and decommissioning**
For the myopic transition paths, capacities already existing for technologies supplying heat are retrieved from `“Mapping and analyses of the current and future (2020 - 2030)” <https://ec.europa.eu/energy/en/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment>`_ . For the sake of simplicity, coal, oil and gas boiler capacities are assimilated to gas boilers. Besides that, existing capacities for heat resistors, air-sourced and ground-sourced heat pumps are included in the model. For heating capacities, 25% of existing capacities in 2015 are assumed to be decommissioned in every 5-year time step after 2020. For the myopic transition paths, capacities already existing for technologies supplying heat are retrieved from `“Mapping and analyses of the current and future (2020 - 2030)” <https://ec.europa.eu/energy/en/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment>`_ . For the sake of simplicity, coal, oil and gas boiler capacities are assimilated to gas boilers. Besides that, existing capacities for heat resistors, air-sourced and ground-sourced heat pumps are included in the model. For heating capacities, 25% of existing capacities in 2015 are assumed to be decommissioned in every 5-year time step after 2020.
**Thermal Energy Storage** **Thermal Energy Storage**
Activated in Config from the `tes <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L228>`_ option. Activated in Config from the `tes <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L228>`_ option.
@ -195,7 +195,7 @@ Further information are given in the study by Zeyen et al. : `Mitigating heat de
Hydrogen demand Hydrogen demand
============================= =============================
Hydrogen is consumed in the industry sector (see :ref:`Industry demand`) to produce ammonia (see :ref:`Chemicals Industry`) and direct reduced iron (DRI) (see :ref:`Iron and Steel`). Hydrogen is also consumed to produce synthetic methane (see :ref:`Methane supply`) and liquid hydrocarbons (see :ref:`Oil-based products supply`) which have multiple uses in industry and other sectors. Hydrogen is consumed in the industry sector (see :ref:`Industry demand`) to produce ammonia (see :ref:`Chemicals Industry`) and direct reduced iron (DRI) (see :ref:`Iron and Steel`). Hydrogen is also consumed to produce synthetic methane (see :ref:`Methane supply`) and liquid hydrocarbons (see :ref:`Oil-based products supply`) which have multiple uses in industry and other sectors.
Hydrogen is also used for transport applications (see :ref:`Transportation`), where it is exogenously fixed. It is used in `heavy-duty land transport <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L181>`_ and as liquified hydrogen in the shipping sector (see :ref:`Shipping`). Furthermore, stationary fuel cells may re-electrify hydrogen (with waste heat as a byproduct) to balance renewable fluctuations (see :ref:`Electricity supply and demand`). The waste heat from the stationary fuel cells can be used in `district-heating systems <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L256>`_. Hydrogen is also used for transport applications (see :ref:`Transportation`), where it is exogenously fixed. It is used in `heavy-duty land transport <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L181>`_ and as liquified hydrogen in the shipping sector (see :ref:`Shipping`). Furthermore, stationary fuel cells may re-electrify hydrogen (with waste heat as a byproduct) to balance renewable fluctuations (see :ref:`Electricity supply and demand`). The waste heat from the stationary fuel cells can be used in `district-heating systems <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L256>`_.
.. _Hydrogen supply: .. _Hydrogen supply:
@ -205,7 +205,7 @@ Hydrogen supply
Today, most of the :math:`H_2` consumed globally is produced from natural gas by steam methane reforming (SMR) Today, most of the :math:`H_2` consumed globally is produced from natural gas by steam methane reforming (SMR)
.. math:: .. math::
CH_4 + H_2O \xrightarrow{} CO + 3H_2 CH_4 + H_2O \xrightarrow{} CO + 3H_2
@ -216,12 +216,12 @@ combined with a water-gas shift reaction
CO + H_2O \xrightarrow{} CO_2 + H_2 CO + H_2O \xrightarrow{} CO_2 + H_2
SMR is included `here <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L245>`_. SMR is included `here <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L245>`_.
PyPSA-Eur-Sec allows this route of :math:`H_2` production with and without [carbon capture (CC)] (see :ref:`Carbon dioxide capture, usage and sequestration (CCU/S)`). These routes are often referred to as blue and grey hydrogen. Here, methane input can be both of fossil or synthetic origin. PyPSA-Eur-Sec allows this route of :math:`H_2` production with and without [carbon capture (CC)] (see :ref:`Carbon dioxide capture, usage and sequestration (CCU/S)`). These routes are often referred to as blue and grey hydrogen. Here, methane input can be both of fossil or synthetic origin.
Green hydrogen can be produced by electrolysis to split water into hydrogen and oxygen Green hydrogen can be produced by electrolysis to split water into hydrogen and oxygen
.. math:: .. math::
2H_2O \xrightarrow{} 2H_2 + O_2 2H_2O \xrightarrow{} 2H_2 + O_2
@ -239,32 +239,32 @@ Hydrogen can be stored in overground steel tanks or `underground salt caverns <h
.. _Methane demand: .. _Methane demand:
Methane demand Methane demand
==================================== ====================================
Methane is used in individual and large-scale gas boilers, in CHP plants with and without carbon capture, in OCGT and CCGT power plants, and in some industry subsectors for the provision of high temperature heat (see :ref:`Industry demand`). Methane is not used in the transport sector because of engine slippage. Methane is used in individual and large-scale gas boilers, in CHP plants with and without carbon capture, in OCGT and CCGT power plants, and in some industry subsectors for the provision of high temperature heat (see :ref:`Industry demand`). Methane is not used in the transport sector because of engine slippage.
.. _Methane supply: .. _Methane supply:
Methane supply Methane supply
=================================== ===================================
In addition to methane from fossil origins, the model also considers biogenic and synthetic sources. `The gas network can either be modelled, or it can be assumed that gas transport is not limited <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L261>`_. If gas infrastructure is regionally resolved, fossil gas can enter the system only at existing and planned LNG terminals, pipeline entry-points, and intra- European gas extraction sites, which are retrieved from the SciGRID Gas IGGIELGN dataset and the GEM Wiki. In addition to methane from fossil origins, the model also considers biogenic and synthetic sources. `The gas network can either be modelled, or it can be assumed that gas transport is not limited <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L261>`_. If gas infrastructure is regionally resolved, fossil gas can enter the system only at existing and planned LNG terminals, pipeline entry-points, and intra- European gas extraction sites, which are retrieved from the SciGRID Gas IGGIELGN dataset and the GEM Wiki.
Biogas can be upgraded to methane. Biogas can be upgraded to methane.
Synthetic methane can be produced by processing hydrogen and captures :math:`CO_2` in the Sabatier reaction Synthetic methane can be produced by processing hydrogen and captures :math:`CO_2` in the Sabatier reaction
.. math:: .. math::
CO_2 + 4H_2 \xrightarrow{} CH_4 + 2H_2O CO_2 + 4H_2 \xrightarrow{} CH_4 + 2H_2O
Direct power-to-methane conversion with efficient heat integration developed in the HELMETH project is also an option. The share of synthetic, biogenic and fossil methane is an optimisation result depending on the techno-economic assumptions. Direct power-to-methane conversion with efficient heat integration developed in the HELMETH project is also an option. The share of synthetic, biogenic and fossil methane is an optimisation result depending on the techno-economic assumptions.
*Methane transport* *Methane transport*
The existing European gas transmission network is represented based on the SciGRID Gas IGGIELGN dataset. This dataset is based on compiled and merged data from the ENTSOG maps and other publicly available data sources. It includes data on the capacity, diameter, pressure, length, and directionality of pipelines. Missing capacity data is conservatively inferred from the pipe diameter following conversion factors derived from an EHB report. The gas network is clustered to the selected number of model regions. Gas pipelines can be endogenously expanded or repurposed for hydrogen transport. Gas flows are represented by a lossless transport model. Methane is assumed to be transmitted without cost or capacity constraints because future demand is predicted to be low compared to available transport capacities. The existing European gas transmission network is represented based on the SciGRID Gas IGGIELGN dataset. This dataset is based on compiled and merged data from the ENTSOG maps and other publicly available data sources. It includes data on the capacity, diameter, pressure, length, and directionality of pipelines. Missing capacity data is conservatively inferred from the pipe diameter following conversion factors derived from an EHB report. The gas network is clustered to the selected number of model regions. Gas pipelines can be endogenously expanded or repurposed for hydrogen transport. Gas flows are represented by a lossless transport model. Methane is assumed to be transmitted without cost or capacity constraints because future demand is predicted to be low compared to available transport capacities.
The following figure shows the unclustered European gas transmission network based on the SciGRID Gas IGGIELGN dataset. Pipelines are color-coded by estimated capacities. Markers indicate entry-points, sites of fossil resource extraction, and LNG terminals. The following figure shows the unclustered European gas transmission network based on the SciGRID Gas IGGIELGN dataset. Pipelines are color-coded by estimated capacities. Markers indicate entry-points, sites of fossil resource extraction, and LNG terminals.
.. image:: ../graphics/gas_pipeline_figure.png .. image:: ../graphics/gas_pipeline_figure.png
.. _Biomass supply: .. _Biomass supply:
@ -319,14 +319,14 @@ The model can only use biogas by first upgrading it to natural gas quality [see
Oil-based products demand Oil-based products demand
======================== ========================
Naphtha is used as a feedstock in the chemicals industry (see :ref:`Chemicals Industry`). Furthermore, kerosene is used as transport fuel in the aviation sector (see :ref:`Aviation`). Non-electrified agriculture machinery also consumes gasoline. Naphtha is used as a feedstock in the chemicals industry (see :ref:`Chemicals Industry`). Furthermore, kerosene is used as transport fuel in the aviation sector (see :ref:`Aviation`). Non-electrified agriculture machinery also consumes gasoline.
Land transport [(see :ref:`Land transport`) that is not electrified or converted into using :math:`H_2`-fuel cells also consumes oil-based products. While there is regional distribution of demand, the carrier is copperplated in the model, which means that transport costs and constraints are neglected. Land transport [(see :ref:`Land transport`) that is not electrified or converted into using :math:`H_2`-fuel cells also consumes oil-based products. While there is regional distribution of demand, the carrier is copperplated in the model, which means that transport costs and constraints are neglected.
.. _Oil-based products supply: .. _Oil-based products supply:
Oil-based products supply Oil-based products supply
======================== ========================
Oil-based products can be either of fossil origin or synthetically produced by combining :math:`H_2` (see :ref:`Hydrogen supply`) and captured :math:`CO_2` (see :ref:`Carbon dioxide capture, usage and sequestration (CCU/S)`) in Fischer-Tropsch plants Oil-based products can be either of fossil origin or synthetically produced by combining :math:`H_2` (see :ref:`Hydrogen supply`) and captured :math:`CO_2` (see :ref:`Carbon dioxide capture, usage and sequestration (CCU/S)`) in Fischer-Tropsch plants
.. math:: .. math::
𝑛CO+(2𝑛+1)H_2 → C_{n}H_{2n + 2} +𝑛H_2O 𝑛CO+(2𝑛+1)H_2 → C_{n}H_{2n + 2} +𝑛H_2O
@ -336,8 +336,8 @@ with costs as included from the `technology-data repository <https://github.com/
*Oil-based transport* *Oil-based transport*
Liquid hydrocarbons are assumed to be transported freely among the model region since future demand is predicted to be low, transport costs for liquids are low and no bottlenecks are expected. Liquid hydrocarbons are assumed to be transported freely among the model region since future demand is predicted to be low, transport costs for liquids are low and no bottlenecks are expected.
.. _Industry demand: .. _Industry demand:
@ -357,22 +357,22 @@ Greenhouse gas emissions associated with industry can be classified into energy-
The overarching modelling procedure can be described as follows. First, the energy demands and process emissions for every unit of material output are estimated based on data from the `JRC-IDEES database <https://data.europa.eu/doi/10.2760/182725>`_ and the fuel and process switching described in the subsequent sections. Second, the 2050 energy demands and process emissions are calculated using the per-unit-of-material ratios based on the industry transformations and the `country-level material production in 2015 <https://data.europa.eu/doi/10.2760/182725>`_, assuming constant material demand. The overarching modelling procedure can be described as follows. First, the energy demands and process emissions for every unit of material output are estimated based on data from the `JRC-IDEES database <https://data.europa.eu/doi/10.2760/182725>`_ and the fuel and process switching described in the subsequent sections. Second, the 2050 energy demands and process emissions are calculated using the per-unit-of-material ratios based on the industry transformations and the `country-level material production in 2015 <https://data.europa.eu/doi/10.2760/182725>`_, assuming constant material demand.
Missing or too coarsely aggregated data in the JRC-IDEES database is supplemented with additional datasets: `Eurostat energy balances <https://ec.europa.eu/eurostat/web/energy/data/energy-balances>`_, `United States <https://www.usgs.gov/media/files/%20nitrogen-2017-xlsx>`_, `Geological Survey <https://www.usgs.gov/media/files/%20nitrogen-2017-xlsx>`_ for ammonia production, `DECHEMA <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry.pdf>`_ for methanol and chlorine, and `national statistics from Switzerland <https://www.bfe.admin.ch/bfe/de/home/versorgung/statistik-und-geodaten/energiestatistiken.html>`_. Missing or too coarsely aggregated data in the JRC-IDEES database is supplemented with additional datasets: `Eurostat energy balances <https://ec.europa.eu/eurostat/web/energy/data/energy-balances>`_, `United States <https://www.usgs.gov/media/files/%20nitrogen-2017-xlsx>`_, `Geological Survey <https://www.usgs.gov/media/files/%20nitrogen-2017-xlsx>`_ for ammonia production, `DECHEMA <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry.pdf>`_ for methanol and chlorine, and `national statistics from Switzerland <https://www.bfe.admin.ch/bfe/de/home/versorgung/statistik-und-geodaten/energiestatistiken.html>`_.
Where there are fossil and electrified alternatives for the same process (e.g. in glass manufacture or drying), we assume that the process is completely electrified. Current electricity demands (lighting, air compressors, motor drives, fans, pumps) will remain electric. Processes that require temperatures below 500 °C are supplied with solid biomass, since we assume that residues and wastes are not suitable for high-temperature applications. We see solid biomass use primarily in the pulp and paper industry, where it is already widespread, and in food, beverages and tobacco, where it replaces natural gas. Industries which require high temperatures (above 500 °C), such as metals, chemicals and non-metallic minerals are either electrified where suitable processes already exist, or the heat is provided with synthetic methane. Where there are fossil and electrified alternatives for the same process (e.g. in glass manufacture or drying), we assume that the process is completely electrified. Current electricity demands (lighting, air compressors, motor drives, fans, pumps) will remain electric. Processes that require temperatures below 500 °C are supplied with solid biomass, since we assume that residues and wastes are not suitable for high-temperature applications. We see solid biomass use primarily in the pulp and paper industry, where it is already widespread, and in food, beverages and tobacco, where it replaces natural gas. Industries which require high temperatures (above 500 °C), such as metals, chemicals and non-metallic minerals are either electrified where suitable processes already exist, or the heat is provided with synthetic methane.
Hydrogen for high-temperature process heat is not part of the model currently. Hydrogen for high-temperature process heat is not part of the model currently.
Where process heat is required, our approach depends on the necessary temperature. For example, due to the high share of high-temperature process heat demand (see `Naegler et al. <https://doi.org/10.1002/er.3436>`_ and `Rehfeldt el al. <https://link.springer.com/article/10.1007/s12053-017-9571-y>`_), we disregard geothermal and solar thermal energy as sources for process heat since they cannot attain high-temperature heat. Where process heat is required, our approach depends on the necessary temperature. For example, due to the high share of high-temperature process heat demand (see `Naegler et al. <https://doi.org/10.1002/er.3436>`_ and `Rehfeldt el al. <https://link.springer.com/article/10.1007/s12053-017-9571-y>`_), we disregard geothermal and solar thermal energy as sources for process heat since they cannot attain high-temperature heat.
The following figure shows the final consumption of energy and non-energy feedstocks in industry today in comparison to the scenario in 2050 assumed in `Neumann et al <https://arxiv.org/abs/2207.05816>`_. The following figure shows the final consumption of energy and non-energy feedstocks in industry today in comparison to the scenario in 2050 assumed in `Neumann et al <https://arxiv.org/abs/2207.05816>`_.
.. image:: ../graphics/fec_industry_today_tomorrow.png .. image:: ../graphics/fec_industry_today_tomorrow.png
The following figure shows the process emissions in industry today (top bar) and in 2050 without The following figure shows the process emissions in industry today (top bar) and in 2050 without
carbon capture (bottom bar) assumed in `Neumann et al <https://arxiv.org/abs/2207.05816>`_. carbon capture (bottom bar) assumed in `Neumann et al <https://arxiv.org/abs/2207.05816>`_.
@ -390,9 +390,9 @@ Inside each country the industrial demand is then distributed using the `Hotmaps
**Iron and Steel** **Iron and Steel**
Two alternative routes are used today to manufacture steel in Europe. The primary route (integrated steelworks) represents 60% of steel production, while the secondary route (electric arc furnaces, EAF), represents the other 40% `(Lechtenböhmer et. al) <https://doi.org/10.1016/j.energy.2016.07.110>`_. Two alternative routes are used today to manufacture steel in Europe. The primary route (integrated steelworks) represents 60% of steel production, while the secondary route (electric arc furnaces, EAF), represents the other 40% `(Lechtenböhmer et. al) <https://doi.org/10.1016/j.energy.2016.07.110>`_.
The primary route uses blast furnaces in which coke is used to reduce iron ore into molten iron, which is then converted into steel: The primary route uses blast furnaces in which coke is used to reduce iron ore into molten iron, which is then converted into steel:
.. math:: .. math::
CO_2 + C \xrightarrow{} 2 CO CO_2 + C \xrightarrow{} 2 CO
@ -408,13 +408,13 @@ The primary route uses blast furnaces in which coke is used to reduce iron ore i
.. math:: .. math::
FeO + CO \xrightarrow{} Fe + CO_2 FeO + CO \xrightarrow{} Fe + CO_2
The primary route of steelmaking implies large process emissions of 0.22 t :math:`_{CO_2}` /t of steel, amounting to 7% of global greenhouse gas emissions `(Vogl et. al) <https://doi.org/10.1016/j.joule.2021.09.007>`_. The primary route of steelmaking implies large process emissions of 0.22 t :math:`_{CO_2}` /t of steel, amounting to 7% of global greenhouse gas emissions `(Vogl et. al) <https://doi.org/10.1016/j.joule.2021.09.007>`_.
In the secondary route, electric arc furnaces are used to melt scrap metal. This limits the :math:`CO_2` emissions to the burning of graphite electrodes `(Friedrichsen et. al) <https://www.umweltbundesamt.de/en/publikationen/comparative-analysis-of-options-potential-for>`_, and reduces process emissions to 0.03 t :math:`_{CO_2}` /t of steel. In the secondary route, electric arc furnaces are used to melt scrap metal. This limits the :math:`CO_2` emissions to the burning of graphite electrodes `(Friedrichsen et. al) <https://www.umweltbundesamt.de/en/publikationen/comparative-analysis-of-options-potential-for>`_, and reduces process emissions to 0.03 t :math:`_{CO_2}` /t of steel.
We assume that the primary route can be replaced by a third route in 2050, using direct reduced iron (DRI) and subsequent processing in an EAF. We assume that the primary route can be replaced by a third route in 2050, using direct reduced iron (DRI) and subsequent processing in an EAF.
.. math:: .. math::
3 Fe_2O_3 + H_2 \xrightarrow{} 2 Fe_3O_4 + H_2O 3 Fe_2O_3 + H_2 \xrightarrow{} 2 Fe_3O_4 + H_2O
@ -427,25 +427,25 @@ We assume that the primary route can be replaced by a third route in 2050, using
FeO + H_2 \xrightarrow{} Fe + H_2O FeO + H_2 \xrightarrow{} Fe + H_2O
This circumvents the process emissions associated with the use of coke. For hydrogen- based DRI, we assume energy requirements of 1.7 MWh :math:`_{H_2}` /t steel `(Vogl et. al) <https://doi.org/10.1016/j.jclepro.2018.08.279>`_ and 0.322 MWh :math:`_{el}`/t steel `(HYBRIT 2016) <https://dh5k8ug1gwbyz.cloudfront.net/uploads/2021/02/Hybrit-broschure-engelska.pdf>`_. This circumvents the process emissions associated with the use of coke. For hydrogen- based DRI, we assume energy requirements of 1.7 MWh :math:`_{H_2}` /t steel `(Vogl et. al) <https://doi.org/10.1016/j.jclepro.2018.08.279>`_ and 0.322 MWh :math:`_{el}`/t steel `(HYBRIT 2016) <https://dh5k8ug1gwbyz.cloudfront.net/uploads/2021/02/Hybrit-broschure-engelska.pdf>`_.
The share of steel produced via the primary route is exogenously set in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L279>`_. The share of steel obtained via hydrogen-based DRI plus EAF is also set exogenously in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L287>`_. The remaining share is manufactured through the secondary route using scrap metal in EAF. Bioenergy as alternative to coke in blast furnaces is not considered in the model (`Mandova et.al <https://doi.org/10.1016/j.biombioe.2018.04.021>`_, `Suopajärvi et.al <https://doi.org/10.1016/j.apenergy.2018.01.060>`_). The share of steel produced via the primary route is exogenously set in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L279>`_. The share of steel obtained via hydrogen-based DRI plus EAF is also set exogenously in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L287>`_. The remaining share is manufactured through the secondary route using scrap metal in EAF. Bioenergy as alternative to coke in blast furnaces is not considered in the model (`Mandova et.al <https://doi.org/10.1016/j.biombioe.2018.04.021>`_, `Suopajärvi et.al <https://doi.org/10.1016/j.apenergy.2018.01.060>`_).
For the remaining subprocesses in this sector, the following transformations are assumed. Methane is used as energy source for the smelting process. Activities associated with furnaces, refining and rolling, and product finishing are electrified assuming the current efficiency values for these cases. These transformations result in changes in process emissions as outlined in the process emissions figure presented in the industry overview section (see :ref:`Overview`). For the remaining subprocesses in this sector, the following transformations are assumed. Methane is used as energy source for the smelting process. Activities associated with furnaces, refining and rolling, and product finishing are electrified assuming the current efficiency values for these cases. These transformations result in changes in process emissions as outlined in the process emissions figure presented in the industry overview section (see :ref:`Overview`).
.. _Chemicals Industry: .. _Chemicals Industry:
**Chemicals Industry** **Chemicals Industry**
The chemicals industry includes a wide range of diverse industries, including the production of basic organic compounds (olefins, alcohols, aromatics), basic inorganic compounds (ammonia, chlorine), polymers (plastics), and end-user products (cosmetics, pharmaceutics). The chemicals industry includes a wide range of diverse industries, including the production of basic organic compounds (olefins, alcohols, aromatics), basic inorganic compounds (ammonia, chlorine), polymers (plastics), and end-user products (cosmetics, pharmaceutics).
The chemicals industry consumes large amounts of fossil-fuel based feedstocks (see `Levi et. al <https://pubs.acs.org/doi/10.1021/acs.est.7b04573>`_), which can also be produced from renewables as outlined for hydrogen (see :ref:`Hydrogen supply`), for methane (see :ref:`Methane supply`), and for oil-based products (see :ref:`Oil-based products supply`). The ratio between synthetic and fossil-based fuels used in the industry is an endogenous result of the optimisation. The chemicals industry consumes large amounts of fossil-fuel based feedstocks (see `Levi et. al <https://pubs.acs.org/doi/10.1021/acs.est.7b04573>`_), which can also be produced from renewables as outlined for hydrogen (see :ref:`Hydrogen supply`), for methane (see :ref:`Methane supply`), and for oil-based products (see :ref:`Oil-based products supply`). The ratio between synthetic and fossil-based fuels used in the industry is an endogenous result of the optimisation.
The basic chemicals consumption data from the `JRC IDEES <https://op.europa.eu/en/publication-detail/-/publication/989282db-ad65-11e7-837e-01aa75ed71a1/language-en>`_ database comprises high- value chemicals (ethylene, propylene and BTX), chlorine, methanol and ammonia. However, it is necessary to separate out these chemicals because their current and future production routes are different. The basic chemicals consumption data from the `JRC IDEES <https://op.europa.eu/en/publication-detail/-/publication/989282db-ad65-11e7-837e-01aa75ed71a1/language-en>`_ database comprises high- value chemicals (ethylene, propylene and BTX), chlorine, methanol and ammonia. However, it is necessary to separate out these chemicals because their current and future production routes are different.
Statistics for the production of ammonia, which is commonly used as a fertilizer, are taken from the `USGS <https://www.usgs.gov/media/files/nitrogen-2017-xlsx>`_ for every country. Ammonia can be made from hydrogen and nitrogen using the Haber-Bosch process. Statistics for the production of ammonia, which is commonly used as a fertilizer, are taken from the `USGS <https://www.usgs.gov/media/files/nitrogen-2017-xlsx>`_ for every country. Ammonia can be made from hydrogen and nitrogen using the Haber-Bosch process.
.. math:: .. math::
N_2 + 3H_2 \xrightarrow{} 2NH_3 N_2 + 3H_2 \xrightarrow{} 2NH_3
@ -454,32 +454,32 @@ Statistics for the production of ammonia, which is commonly used as a fertilizer
The Haber-Bosch process is not explicitly represented in the model, such that demand for ammonia enters the model as a demand for hydrogen ( 6.5 MWh :math:`_{H_2}` / t :math:`_{NH_3}` ) and electricity ( 1.17 MWh :math:`_{el}` /t :math:`_{NH_3}` ) (see `Wang et. al <https://doi.org/10.1016/j.joule.2018.04.017>`_). Today, natural gas dominates in Europe as the source for the hydrogen used in the Haber-Bosch process, but the model can choose among the various hydrogen supply options described in the hydrogen section (see :ref:`Hydrogen supply`) The Haber-Bosch process is not explicitly represented in the model, such that demand for ammonia enters the model as a demand for hydrogen ( 6.5 MWh :math:`_{H_2}` / t :math:`_{NH_3}` ) and electricity ( 1.17 MWh :math:`_{el}` /t :math:`_{NH_3}` ) (see `Wang et. al <https://doi.org/10.1016/j.joule.2018.04.017>`_). Today, natural gas dominates in Europe as the source for the hydrogen used in the Haber-Bosch process, but the model can choose among the various hydrogen supply options described in the hydrogen section (see :ref:`Hydrogen supply`)
The total production and specific energy consumption of chlorine and methanol is taken from a `DECHEMA report <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry.pdf>`_. According to this source, the production of chlorine amounts to 9.58 MtCl/a, which is assumed to require electricity at 3.6 MWh :math:`_{el}`/t of chlorine and yield hydrogen at 0.937 MWh :math:`_{H_2}`/t of chlorine in the chloralkali process. The production of methanol adds up to 1.5 MtMeOH/a, requiring electricity at 0.167 MWh :math:`_{el}`/t of methanol and methane at 10.25 MWh :math:`_{CH_4}`/t of methanol. The total production and specific energy consumption of chlorine and methanol is taken from a `DECHEMA report <https://dechema.de/dechema_media/Downloads/Positionspapiere/Technology_study_Low_carbon_energy_and_feedstock_for_the_European_chemical_industry.pdf>`_. According to this source, the production of chlorine amounts to 9.58 MtCl/a, which is assumed to require electricity at 3.6 MWh :math:`_{el}`/t of chlorine and yield hydrogen at 0.937 MWh :math:`_{H_2}`/t of chlorine in the chloralkali process. The production of methanol adds up to 1.5 MtMeOH/a, requiring electricity at 0.167 MWh :math:`_{el}`/t of methanol and methane at 10.25 MWh :math:`_{CH_4}`/t of methanol.
The production of ammonia, methanol, and chlorine production is deducted from the JRC IDEES basic chemicals, leaving the production totals of high-value chemicals. For this, we assume that the liquid hydrocarbon feedstock comes from synthetic or fossil- origin naphtha (14 MWh :math:`_{naphtha}`/t of HVC, similar to `Lechtenböhmer et al <https://doi.org/10.1016/j.energy.2016.07.110>`_), ignoring the methanol-to-olefin route. Furthermore, we assume the following transformations of the energy-consuming processes in the production of plastics: the final energy consumption in steam processing is converted to methane since requires temperature above 500 °C (4.1 MWh :math:`_{CH_4}` /t of HVC, see `Rehfeldt et al. <https://doi.org/10.1007/s12053-017-9571-y>`_); and the remaining processes are electrified using the current efficiency of microwave for high-enthalpy heat processing, electric furnaces, electric process cooling and electric generic processes (2.85 MWh :math:`_{el}`/t of HVC). The production of ammonia, methanol, and chlorine production is deducted from the JRC IDEES basic chemicals, leaving the production totals of high-value chemicals. For this, we assume that the liquid hydrocarbon feedstock comes from synthetic or fossil- origin naphtha (14 MWh :math:`_{naphtha}`/t of HVC, similar to `Lechtenböhmer et al <https://doi.org/10.1016/j.energy.2016.07.110>`_), ignoring the methanol-to-olefin route. Furthermore, we assume the following transformations of the energy-consuming processes in the production of plastics: the final energy consumption in steam processing is converted to methane since requires temperature above 500 °C (4.1 MWh :math:`_{CH_4}` /t of HVC, see `Rehfeldt et al. <https://doi.org/10.1007/s12053-017-9571-y>`_); and the remaining processes are electrified using the current efficiency of microwave for high-enthalpy heat processing, electric furnaces, electric process cooling and electric generic processes (2.85 MWh :math:`_{el}`/t of HVC).
The process emissions from feedstock in the chemical industry are as high as 0.369 t :math:`_{CO_2}`/t of ethylene equivalent. We consider process emissions for all the material output, which is a conservative approach since it assumes that all plastic-embedded :math:`CO_2` will eventually be released into the atmosphere. However, plastic disposal in landfilling will avoid, or at least delay, associated :math:`CO_2` emissions. The process emissions from feedstock in the chemical industry are as high as 0.369 t :math:`_{CO_2}`/t of ethylene equivalent. We consider process emissions for all the material output, which is a conservative approach since it assumes that all plastic-embedded :math:`CO_2` will eventually be released into the atmosphere. However, plastic disposal in landfilling will avoid, or at least delay, associated :math:`CO_2` emissions.
Circular economy practices drastically reduce the amount of primary feedstock needed for the production of plastics in the model (see `Kullmann et al. <https://doi.org/10.1016/j.energy.2022.124660>`_, `Meys et al. (2021) <https://doi.org/10.1126/science.abg9853>`_, `Meys et al. (2020) <https://doi.org/10/gmxv6z>`_, `Gu et al. <https://doi.org/10/gf8n9w>`_) and consequently, also the energy demands and level of process emission. The percentage of plastics that are assumed to be mechanically recycled can be selected in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/776596ab9ac6a6cc93422ccfd0383abeffb0baa9/config.default.yaml#L315>`_, as well as Circular economy practices drastically reduce the amount of primary feedstock needed for the production of plastics in the model (see `Kullmann et al. <https://doi.org/10.1016/j.energy.2022.124660>`_, `Meys et al. (2021) <https://doi.org/10.1126/science.abg9853>`_, `Meys et al. (2020) <https://doi.org/10/gmxv6z>`_, `Gu et al. <https://doi.org/10/gf8n9w>`_) and consequently, also the energy demands and level of process emission. The percentage of plastics that are assumed to be mechanically recycled can be selected in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/776596ab9ac6a6cc93422ccfd0383abeffb0baa9/config.default.yaml#L315>`_, as well as
the percentage that is chemically recycled, see `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/776596ab9ac6a6cc93422ccfd0383abeffb0baa9/config.default.yaml#L316>`_ The energy consumption for those recycling processes are respectively 0.547 MWh :math:`_{el}`/t of HVC (as indicated in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/776596ab9ac6a6cc93422ccfd0383abeffb0baa9/config.default.yaml#L318>`_) (`Meys et al. (2020) <https://doi.org/10/gmxv6z>`_), and 6.9 MWh :math:`_{el}`/t of HVC (as indicated in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/776596ab9ac6a6cc93422ccfd0383abeffb0baa9/config.default.yaml#L319>`_) based on pyrolysis and electric steam cracking (see `Materials Economics <https://materialeconomics.com/publications/industrial-transformation-2050>`_ report). the percentage that is chemically recycled, see `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/776596ab9ac6a6cc93422ccfd0383abeffb0baa9/config.default.yaml#L316>`_ The energy consumption for those recycling processes are respectively 0.547 MWh :math:`_{el}`/t of HVC (as indicated in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/776596ab9ac6a6cc93422ccfd0383abeffb0baa9/config.default.yaml#L318>`_) (`Meys et al. (2020) <https://doi.org/10/gmxv6z>`_), and 6.9 MWh :math:`_{el}`/t of HVC (as indicated in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/776596ab9ac6a6cc93422ccfd0383abeffb0baa9/config.default.yaml#L319>`_) based on pyrolysis and electric steam cracking (see `Materials Economics <https://materialeconomics.com/publications/industrial-transformation-2050>`_ report).
**Non-metallic Mineral Products** **Non-metallic Mineral Products**
This subsector includes the manufacturing of cement, ceramics, and glass. This subsector includes the manufacturing of cement, ceramics, and glass.
*Cement* *Cement*
Cement is used in construction to make concrete. The production of cement involves high energy consumption and large process emissions. The calcination of limestone to chemically reactive calcium oxide, also known as lime, involves process emissions of 0.54 t :math:`_{CO_2}` /t cement (see `Akhtar et al. <https://doi.org/10.1109/CITCON.2013.6525276>`_. Cement is used in construction to make concrete. The production of cement involves high energy consumption and large process emissions. The calcination of limestone to chemically reactive calcium oxide, also known as lime, involves process emissions of 0.54 t :math:`_{CO_2}` /t cement (see `Akhtar et al. <https://doi.org/10.1109/CITCON.2013.6525276>`_.
.. math:: .. math::
CaCO_3 \xrightarrow{} CaO + CO_2 CaCO_3 \xrightarrow{} CaO + CO_2
Additionally, :math:`CO_2` is emitted from the combustion of fossil fuels to provide process heat. Thereby, cement constitutes the biggest source of industry process emissions in Europe. Additionally, :math:`CO_2` is emitted from the combustion of fossil fuels to provide process heat. Thereby, cement constitutes the biggest source of industry process emissions in Europe.
Cement process emissions can be captured assuming a capture rate of 90%. Whether emissions are captured is decided by the model taking into account the capital costs of carbon capture modules. The electricity and heat demand of process emission carbon capture is currently ignored. For net-zero emission scenarios, the remaining process emissions need to be compensated by negative emissions. Cement process emissions can be captured assuming a capture rate of 90%. Whether emissions are captured is decided by the model taking into account the capital costs of carbon capture modules. The electricity and heat demand of process emission carbon capture is currently ignored. For net-zero emission scenarios, the remaining process emissions need to be compensated by negative emissions.
With the exception of electricity demand and biomass demand for low-temperature heat (0.06 MWh/t and 0.2 MWh/t), the final energy consumption of this subsector is assumed to be supplied by methane (0.52 MWh/t), which is capable of delivering the required high-temperature heat. This implies a switch from burning solid fuels to burning gas which will require adjustments of the `kilns <10.1109/CITCON.2013.6525276>`_. The share of fossil vs. synthetic methane consumed is a result of the optimisation With the exception of electricity demand and biomass demand for low-temperature heat (0.06 MWh/t and 0.2 MWh/t), the final energy consumption of this subsector is assumed to be supplied by methane (0.52 MWh/t), which is capable of delivering the required high-temperature heat. This implies a switch from burning solid fuels to burning gas which will require adjustments of the `kilns <10.1109/CITCON.2013.6525276>`_. The share of fossil vs. synthetic methane consumed is a result of the optimisation
@ -495,21 +495,21 @@ The production of glass is assumed to be fully electrified based on the current
**Non-ferrous Metals** **Non-ferrous Metals**
The non-ferrous metal subsector includes the manufacturing of base metals (aluminium, copper, lead, zinc), precious metals (gold, silver), and technology metals (molybdenum, cobalt, silicon). The non-ferrous metal subsector includes the manufacturing of base metals (aluminium, copper, lead, zinc), precious metals (gold, silver), and technology metals (molybdenum, cobalt, silicon).
The manufacturing of aluminium accounts for more than half of the final energy consumption of this subsector. Two alternative processing routes are used today to manufacture aluminium in Europe. The primary route represents 40% of the aluminium pro- duction, while the secondary route represents the remaining 60%. The manufacturing of aluminium accounts for more than half of the final energy consumption of this subsector. Two alternative processing routes are used today to manufacture aluminium in Europe. The primary route represents 40% of the aluminium pro- duction, while the secondary route represents the remaining 60%.
The primary route involves two energy-intensive processes: the production of alumina from bauxite (aluminium ore) and the electrolysis to transform alumina into aluminium via the Hall-Héroult process The primary route involves two energy-intensive processes: the production of alumina from bauxite (aluminium ore) and the electrolysis to transform alumina into aluminium via the Hall-Héroult process
.. math:: .. math::
2Al_2O_3 +3C \xrightarrow{} 4Al+3CO_2 2Al_2O_3 +3C \xrightarrow{} 4Al+3CO_2
The primary route requires high-enthalpy heat (2.3 MWh/t) to produce alumina which is supplied by methane and causes process emissions of 1.5 t :math:`_{CO_2}`/t aluminium. According to `Friedrichsen et al. <http://www.umweltbundesamt.de/en/publikationen/comparative-analysis-of-options-potential-for>`_, inert anodes might become commercially available by 2030 that would eliminate the process emissions, but they are not included in the model. Assuming all subprocesses are electrified, the primary route requires 15.4 MWh :math:`_{el}`/t of aluminium. The primary route requires high-enthalpy heat (2.3 MWh/t) to produce alumina which is supplied by methane and causes process emissions of 1.5 t :math:`_{CO_2}`/t aluminium. According to `Friedrichsen et al. <http://www.umweltbundesamt.de/en/publikationen/comparative-analysis-of-options-potential-for>`_, inert anodes might become commercially available by 2030 that would eliminate the process emissions, but they are not included in the model. Assuming all subprocesses are electrified, the primary route requires 15.4 MWh :math:`_{el}`/t of aluminium.
In the secondary route, scrap aluminium is remelted. The energy demand for this process is only 10% of the primary route and there are no associated process emissions. Assuming all subprocesses are electrified, the secondary route requires 1.7 MWh/t of aluminium. The share of aliminum manufactured by the primary and secondary route can be selected in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L297>`_] In the secondary route, scrap aluminium is remelted. The energy demand for this process is only 10% of the primary route and there are no associated process emissions. Assuming all subprocesses are electrified, the secondary route requires 1.7 MWh/t of aluminium. The share of aliminum manufactured by the primary and secondary route can be selected in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L297>`_]
For the other non-ferrous metals, we assume the electrification of the entire manufacturing process with an average electricity demand of 3.2 MWh :math:`_{el}`/t lead equivalent. For the other non-ferrous metals, we assume the electrification of the entire manufacturing process with an average electricity demand of 3.2 MWh :math:`_{el}`/t lead equivalent.
**Other Industry Subsectors** **Other Industry Subsectors**
@ -552,7 +552,7 @@ The share of all land transport that is specified to be be FCEV will be converte
FCEVs are typically used to simulate demand for transport that is hard to electrify directly, e.g. heavy construction machinery. But it may also be used to investigate a more widespread adoption of the technology. FCEVs are typically used to simulate demand for transport that is hard to electrify directly, e.g. heavy construction machinery. But it may also be used to investigate a more widespread adoption of the technology.
*Internal combustion engine vehicles (ICE)* *Internal combustion engine vehicles (ICE)*
All land transport that is not specified to be either BEV or FCEV will be treated as conventional ICEs. The transport demand is converted to a demand for oil products (see :ref:`Oil-based products supply`) using the `ICE efficiency All land transport that is not specified to be either BEV or FCEV will be treated as conventional ICEs. The transport demand is converted to a demand for oil products (see :ref:`Oil-based products supply`) using the `ICE efficiency
<https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L192>`_. <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L192>`_.
@ -584,13 +584,13 @@ PyPSA-Eur-Sec includes carbon capture from air (i.e., direct air capture (DAC)),
**Carbon dioxide capture** **Carbon dioxide capture**
For the following point source emissions, carbon capture is applicable: For the following point source emissions, carbon capture is applicable:
• Industry process emissions, e.g., from limestone in cement production • Industry process emissions, e.g., from limestone in cement production
• Methane or biomass used for process heat in the industry • Methane or biomass used for process heat in the industry
• Hydrogen production by SMR • Hydrogen production by SMR
• CHP plants using biomass or methane • CHP plants using biomass or methane
@ -599,12 +599,12 @@ For the following point source emissions, carbon capture is applicable:
Point source emissions are captured assuming a capture rate, e.g. 90%, which can be specified in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L249>`_. The electricity and heat demand of process emission carbon capture Point source emissions are captured assuming a capture rate, e.g. 90%, which can be specified in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L249>`_. The electricity and heat demand of process emission carbon capture
is currently ignored. is currently ignored.
DAC (if `included <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L243>`_) includes the adsorption phase where electricity and heat consumptionsare required to assist the adsorption process and regenerate the adsorbent. It also includes the drying and compression of :math:`CO_2` prior to storage which consumes electricity and rejects heat. DAC (if `included <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L243>`_) includes the adsorption phase where electricity and heat consumptionsare required to assist the adsorption process and regenerate the adsorbent. It also includes the drying and compression of :math:`CO_2` prior to storage which consumes electricity and rejects heat.
*Carbon dioxide usage* *Carbon dioxide usage*
Captured :math:`CO_2` can be used to produce synthetic methane and synthetic oil products (e.g. Captured :math:`CO_2` can be used to produce synthetic methane and synthetic oil products (e.g.
naphtha). If captured carbon is used, the :math:`CO_2` emissions of the synthetic fuels are net-neutral. naphtha). If captured carbon is used, the :math:`CO_2` emissions of the synthetic fuels are net-neutral.
*Carbon dioxide sequestration* *Carbon dioxide sequestration*
@ -612,7 +612,4 @@ Captured :math:`CO_2` can also be sequestered underground up to an annual seques
*Carbon dioxide transport* *Carbon dioxide transport*
Carbon dioxide can be modelled as a single node for Europe (in this case, :math:`CO_2` transport constraints are neglected). A network for modelling the transport of :math:`CO_2` among the different nodes can also be created if selected in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L248>`_. Carbon dioxide can be modelled as a single node for Europe (in this case, :math:`CO_2` transport constraints are neglected). A network for modelling the transport of :math:`CO_2` among the different nodes can also be created if selected in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L248>`_.

View File

@ -1,3 +1,3 @@
font.family: sans-serif font.family: sans-serif
font.sans-serif: Ubuntu, DejaVu Sans font.sans-serif: Ubuntu, DejaVu Sans
image.cmap: viridis image.cmap: viridis

View File

@ -1,70 +1,70 @@
# coding: utf-8 # -*- coding: utf-8 -*-
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import pandas as pd import pandas as pd
idx = pd.IndexSlice idx = pd.IndexSlice
import numpy as np
import pypsa import pypsa
import yaml import yaml
import numpy as np
from add_existing_baseyear import add_build_year_to_new_assets from add_existing_baseyear import add_build_year_to_new_assets
from helper import override_component_attrs, update_config_with_sector_opts from helper import override_component_attrs, update_config_with_sector_opts
def add_brownfield(n, n_p, year): def add_brownfield(n, n_p, year):
logger.info(f"Preparing brownfield for the year {year}") logger.info(f"Preparing brownfield for the year {year}")
# electric transmission grid set optimised capacities of previous as minimum # electric transmission grid set optimised capacities of previous as minimum
n.lines.s_nom_min = n_p.lines.s_nom_opt n.lines.s_nom_min = n_p.lines.s_nom_opt
dc_i = n.links[n.links.carrier=="DC"].index dc_i = n.links[n.links.carrier == "DC"].index
n.links.loc[dc_i, "p_nom_min"] = n_p.links.loc[dc_i, "p_nom_opt"] n.links.loc[dc_i, "p_nom_min"] = n_p.links.loc[dc_i, "p_nom_opt"]
for c in n_p.iterate_components(["Link", "Generator", "Store"]): for c in n_p.iterate_components(["Link", "Generator", "Store"]):
attr = "e" if c.name == "Store" else "p" attr = "e" if c.name == "Store" else "p"
# first, remove generators, links and stores that track # first, remove generators, links and stores that track
# CO2 or global EU values since these are already in n # CO2 or global EU values since these are already in n
n_p.mremove( n_p.mremove(c.name, c.df.index[c.df.lifetime == np.inf])
c.name,
c.df.index[c.df.lifetime==np.inf]
)
# remove assets whose build_year + lifetime < year # remove assets whose build_year + lifetime < year
n_p.mremove( n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime < year])
c.name,
c.df.index[c.df.build_year + c.df.lifetime < year]
)
# remove assets if their optimized nominal capacity is lower than a threshold # remove assets if their optimized nominal capacity is lower than a threshold
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible # since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible
chp_heat = c.df.index[( chp_heat = c.df.index[
c.df[attr + "_nom_extendable"] (
& c.df.index.str.contains("urban central") c.df[attr + "_nom_extendable"]
& c.df.index.str.contains("CHP") & c.df.index.str.contains("urban central")
& c.df.index.str.contains("heat") & c.df.index.str.contains("CHP")
)] & c.df.index.str.contains("heat")
)
]
threshold = snakemake.config['existing_capacities']['threshold_capacity'] threshold = snakemake.config["existing_capacities"]["threshold_capacity"]
if not chp_heat.empty: if not chp_heat.empty:
threshold_chp_heat = (threshold threshold_chp_heat = (
threshold
* c.df.efficiency[chp_heat.str.replace("heat", "electric")].values * c.df.efficiency[chp_heat.str.replace("heat", "electric")].values
* c.df.p_nom_ratio[chp_heat.str.replace("heat", "electric")].values * c.df.p_nom_ratio[chp_heat.str.replace("heat", "electric")].values
/ c.df.efficiency[chp_heat].values / c.df.efficiency[chp_heat].values
) )
n_p.mremove( n_p.mremove(
c.name, c.name,
chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat] chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat],
) )
n_p.mremove( n_p.mremove(
c.name, c.name,
c.df.index[c.df[attr + "_nom_extendable"] & ~c.df.index.isin(chp_heat) & (c.df[attr + "_nom_opt"] < threshold)] c.df.index[
c.df[attr + "_nom_extendable"]
& ~c.df.index.isin(chp_heat)
& (c.df[attr + "_nom_opt"] < threshold)
],
) )
# copy over assets but fix their capacity # copy over assets but fix their capacity
@ -74,56 +74,68 @@ def add_brownfield(n, n_p, year):
n.import_components_from_dataframe(c.df, c.name) n.import_components_from_dataframe(c.df, c.name)
# copy time-dependent # copy time-dependent
selection = ( selection = n.component_attrs[c.name].type.str.contains(
n.component_attrs[c.name].type.str.contains("series") "series"
& n.component_attrs[c.name].status.str.contains("Input") ) & n.component_attrs[c.name].status.str.contains("Input")
)
for tattr in n.component_attrs[c.name].index[selection]: for tattr in n.component_attrs[c.name].index[selection]:
n.import_series_from_dataframe(c.pnl[tattr], c.name, tattr) n.import_series_from_dataframe(c.pnl[tattr], c.name, tattr)
# deal with gas network # deal with gas network
pipe_carrier = ['gas pipeline'] pipe_carrier = ["gas pipeline"]
if snakemake.config["sector"]['H2_retrofit']: if snakemake.config["sector"]["H2_retrofit"]:
# drop capacities of previous year to avoid duplicating # drop capacities of previous year to avoid duplicating
to_drop = n.links.carrier.isin(pipe_carrier) & (n.links.build_year!=year) to_drop = n.links.carrier.isin(pipe_carrier) & (n.links.build_year != year)
n.mremove("Link", n.links.loc[to_drop].index) n.mremove("Link", n.links.loc[to_drop].index)
# subtract the already retrofitted from today's gas grid capacity # subtract the already retrofitted from today's gas grid capacity
h2_retrofitted_fixed_i = n.links[(n.links.carrier=='H2 pipeline retrofitted') & (n.links.build_year!=year)].index h2_retrofitted_fixed_i = n.links[
gas_pipes_i = n.links[n.links.carrier.isin(pipe_carrier)].index (n.links.carrier == "H2 pipeline retrofitted")
& (n.links.build_year != year)
].index
gas_pipes_i = n.links[n.links.carrier.isin(pipe_carrier)].index
CH4_per_H2 = 1 / snakemake.config["sector"]["H2_retrofit_capacity_per_CH4"] CH4_per_H2 = 1 / snakemake.config["sector"]["H2_retrofit_capacity_per_CH4"]
fr = "H2 pipeline retrofitted" fr = "H2 pipeline retrofitted"
to = "gas pipeline" to = "gas pipeline"
# today's pipe capacity # today's pipe capacity
pipe_capacity = n.links.loc[gas_pipes_i, 'p_nom'] pipe_capacity = n.links.loc[gas_pipes_i, "p_nom"]
# already retrofitted capacity from gas -> H2 # already retrofitted capacity from gas -> H2
already_retrofitted = (n.links.loc[h2_retrofitted_fixed_i, 'p_nom'] already_retrofitted = (
.rename(lambda x: x.split("-2")[0].replace(fr, to)).groupby(level=0).sum()) n.links.loc[h2_retrofitted_fixed_i, "p_nom"]
remaining_capacity = pipe_capacity - CH4_per_H2 * already_retrofitted.reindex(index=pipe_capacity.index).fillna(0) .rename(lambda x: x.split("-2")[0].replace(fr, to))
.groupby(level=0)
.sum()
)
remaining_capacity = (
pipe_capacity
- CH4_per_H2
* already_retrofitted.reindex(index=pipe_capacity.index).fillna(0)
)
n.links.loc[gas_pipes_i, "p_nom"] = remaining_capacity n.links.loc[gas_pipes_i, "p_nom"] = remaining_capacity
else: else:
new_pipes = n.links.carrier.isin(pipe_carrier) & (n.links.build_year==year) new_pipes = n.links.carrier.isin(pipe_carrier) & (
n.links.loc[new_pipes, "p_nom"] = 0. n.links.build_year == year
n.links.loc[new_pipes, "p_nom_min"] = 0. )
n.links.loc[new_pipes, "p_nom"] = 0.0
n.links.loc[new_pipes, "p_nom_min"] = 0.0
# %%
#%%
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'add_brownfield', "add_brownfield",
simpl='', simpl="",
clusters="37", clusters="37",
opts="", opts="",
lv=1.0, lv=1.0,
sector_opts='168H-T-H-B-I-solar+p3-dist1', sector_opts="168H-T-H-B-I-solar+p3-dist1",
planning_horizons=2030, planning_horizons=2030,
) )
logging.basicConfig(level=snakemake.config['logging_level']) logging.basicConfig(level=snakemake.config["logging_level"])
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}") logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}")

View File

@ -1,23 +1,25 @@
# coding: utf-8 # -*- coding: utf-8 -*-
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import pandas as pd import pandas as pd
idx = pd.IndexSlice idx = pd.IndexSlice
import numpy as np
import xarray as xr
import pypsa
import yaml
from prepare_sector_network import prepare_costs, define_spatial, cluster_heat_buses
from helper import override_component_attrs, update_config_with_sector_opts
from types import SimpleNamespace from types import SimpleNamespace
import numpy as np
import pypsa
import xarray as xr
import yaml
from helper import override_component_attrs, update_config_with_sector_opts
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
spatial = SimpleNamespace() spatial = SimpleNamespace()
def add_build_year_to_new_assets(n, baseyear): def add_build_year_to_new_assets(n, baseyear):
""" """
Parameters Parameters
@ -29,8 +31,7 @@ def add_build_year_to_new_assets(n, baseyear):
# Give assets with lifetimes and no build year the build year baseyear # Give assets with lifetimes and no build year the build year baseyear
for c in n.iterate_components(["Link", "Generator", "Store"]): for c in n.iterate_components(["Link", "Generator", "Store"]):
assets = c.df.index[(c.df.lifetime != np.inf) & (c.df.build_year == 0)]
assets = c.df.index[(c.df.lifetime!=np.inf) & (c.df.build_year==0)]
c.df.loc[assets, "build_year"] = baseyear c.df.loc[assets, "build_year"] = baseyear
# add -baseyear to name # add -baseyear to name
@ -39,40 +40,34 @@ def add_build_year_to_new_assets(n, baseyear):
c.df.rename(index=rename, inplace=True) c.df.rename(index=rename, inplace=True)
# rename time-dependent # rename time-dependent
selection = ( selection = n.component_attrs[c.name].type.str.contains(
n.component_attrs[c.name].type.str.contains("series") "series"
& n.component_attrs[c.name].status.str.contains("Input") ) & n.component_attrs[c.name].status.str.contains("Input")
)
for attr in n.component_attrs[c.name].index[selection]: for attr in n.component_attrs[c.name].index[selection]:
c.pnl[attr].rename(columns=rename, inplace=True) c.pnl[attr].rename(columns=rename, inplace=True)
def add_existing_renewables(df_agg): def add_existing_renewables(df_agg):
""" """
Append existing renewables to the df_agg pd.DataFrame Append existing renewables to the df_agg pd.DataFrame with the conventional
with the conventional power plants. power plants.
""" """
cc = pd.read_csv(snakemake.input.country_codes, index_col=0) cc = pd.read_csv(snakemake.input.country_codes, index_col=0)
carriers = { carriers = {"solar": "solar", "onwind": "onwind", "offwind": "offwind-ac"}
"solar": "solar",
"onwind": "onwind",
"offwind": "offwind-ac"
}
for tech in ['solar', 'onwind', 'offwind']:
for tech in ["solar", "onwind", "offwind"]:
carrier = carriers[tech] carrier = carriers[tech]
df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.) df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.0)
df.columns = df.columns.astype(int) df.columns = df.columns.astype(int)
rename_countries = { rename_countries = {
'Czechia': 'Czech Republic', "Czechia": "Czech Republic",
'UK': 'United Kingdom', "UK": "United Kingdom",
'Bosnia Herzg': 'Bosnia Herzegovina', "Bosnia Herzg": "Bosnia Herzegovina",
'North Macedonia': 'Macedonia' "North Macedonia": "Macedonia",
} }
df.rename(index=rename_countries, inplace=True) df.rename(index=rename_countries, inplace=True)
@ -80,16 +75,21 @@ def add_existing_renewables(df_agg):
df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True) df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True)
# calculate yearly differences # calculate yearly differences
df.insert(loc=0, value=.0, column='1999') df.insert(loc=0, value=0.0, column="1999")
df = df.diff(axis=1).drop('1999', axis=1).clip(lower=0) df = df.diff(axis=1).drop("1999", axis=1).clip(lower=0)
# distribute capacities among nodes according to capacity factor # distribute capacities among nodes according to capacity factor
# weighting with nodal_fraction # weighting with nodal_fraction
elec_buses = n.buses.index[n.buses.carrier == "AC"].union(n.buses.index[n.buses.carrier == "DC"]) elec_buses = n.buses.index[n.buses.carrier == "AC"].union(
nodal_fraction = pd.Series(0., elec_buses) n.buses.index[n.buses.carrier == "DC"]
)
nodal_fraction = pd.Series(0.0, elec_buses)
for country in n.buses.loc[elec_buses, "country"].unique(): for country in n.buses.loc[elec_buses, "country"].unique():
gens = n.generators.index[(n.generators.index.str[:2] == country) & (n.generators.carrier == carrier)] gens = n.generators.index[
(n.generators.index.str[:2] == country)
& (n.generators.carrier == carrier)
]
cfs = n.generators_t.p_max_pu[gens].mean() cfs = n.generators_t.p_max_pu[gens].mean()
cfs_key = cfs / cfs.sum() cfs_key = cfs / cfs.sum()
nodal_fraction.loc[n.generators.loc[gens, "bus"]] = cfs_key.values nodal_fraction.loc[n.generators.loc[gens, "bus"]] = cfs_key.values
@ -102,7 +102,7 @@ def add_existing_renewables(df_agg):
for node in nodal_df.index: for node in nodal_df.index:
name = f"{node}-{tech}-{year}" name = f"{node}-{tech}-{year}"
capacity = nodal_df.loc[node, year] capacity = nodal_df.loc[node, year]
if capacity > 0.: if capacity > 0.0:
df_agg.at[name, "Fueltype"] = tech df_agg.at[name, "Fueltype"] = tech
df_agg.at[name, "Capacity"] = capacity df_agg.at[name, "Capacity"] = capacity
df_agg.at[name, "DateIn"] = year df_agg.at[name, "DateIn"] = year
@ -120,35 +120,34 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
to read lifetime to estimate YearDecomissioning to read lifetime to estimate YearDecomissioning
baseyear : int baseyear : int
""" """
logger.debug(f"Adding power capacities installed before {baseyear} from powerplants.csv") logger.debug(
f"Adding power capacities installed before {baseyear} from powerplants.csv"
)
df_agg = pd.read_csv(snakemake.input.powerplants, index_col=0) df_agg = pd.read_csv(snakemake.input.powerplants, index_col=0)
rename_fuel = { rename_fuel = {
'Hard Coal': 'coal', "Hard Coal": "coal",
'Lignite': 'lignite', "Lignite": "lignite",
'Nuclear': 'nuclear', "Nuclear": "nuclear",
'Oil': 'oil', "Oil": "oil",
'OCGT': 'OCGT', "OCGT": "OCGT",
'CCGT': 'CCGT', "CCGT": "CCGT",
'Natural Gas': 'gas', "Natural Gas": "gas",
'Bioenergy': 'urban central solid biomass CHP', "Bioenergy": "urban central solid biomass CHP",
} }
fueltype_to_drop = [ fueltype_to_drop = [
'Hydro', "Hydro",
'Wind', "Wind",
'Solar', "Solar",
'Geothermal', "Geothermal",
'Waste', "Waste",
'Other', "Other",
'CCGT, Thermal' "CCGT, Thermal",
] ]
technology_to_drop = [ technology_to_drop = ["Pv", "Storage Technologies"]
'Pv',
'Storage Technologies'
]
# drop unused fueltyps and technologies # drop unused fueltyps and technologies
df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)], inplace=True) df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)], inplace=True)
@ -157,16 +156,15 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
# Intermediate fix for DateIn & DateOut # Intermediate fix for DateIn & DateOut
# Fill missing DateIn # Fill missing DateIn
biomass_i = df_agg.loc[df_agg.Fueltype=='urban central solid biomass CHP'].index biomass_i = df_agg.loc[df_agg.Fueltype == "urban central solid biomass CHP"].index
mean = df_agg.loc[biomass_i, 'DateIn'].mean() mean = df_agg.loc[biomass_i, "DateIn"].mean()
df_agg.loc[biomass_i, 'DateIn'] = df_agg.loc[biomass_i, 'DateIn'].fillna(int(mean)) df_agg.loc[biomass_i, "DateIn"] = df_agg.loc[biomass_i, "DateIn"].fillna(int(mean))
# Fill missing DateOut # Fill missing DateOut
dateout = df_agg.loc[biomass_i, 'DateIn'] + snakemake.config['costs']['lifetime'] dateout = df_agg.loc[biomass_i, "DateIn"] + snakemake.config["costs"]["lifetime"]
df_agg.loc[biomass_i, 'DateOut'] = df_agg.loc[biomass_i, 'DateOut'].fillna(dateout) df_agg.loc[biomass_i, "DateOut"] = df_agg.loc[biomass_i, "DateOut"].fillna(dateout)
# drop assets which are already phased out / decommissioned # drop assets which are already phased out / decommissioned
phased_out = df_agg[df_agg["DateOut"]<baseyear].index phased_out = df_agg[df_agg["DateOut"] < baseyear].index
df_agg.drop(phased_out, inplace=True) df_agg.drop(phased_out, inplace=True)
# calculate remaining lifetime before phase-out (+1 because assuming # calculate remaining lifetime before phase-out (+1 because assuming
@ -190,22 +188,21 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
add_existing_renewables(df_agg) add_existing_renewables(df_agg)
df_agg["grouping_year"] = np.take( df_agg["grouping_year"] = np.take(
grouping_years, grouping_years, np.digitize(df_agg.DateIn, grouping_years, right=True)
np.digitize(df_agg.DateIn, grouping_years, right=True)
) )
df = df_agg.pivot_table( df = df_agg.pivot_table(
index=["grouping_year", 'Fueltype'], index=["grouping_year", "Fueltype"],
columns='cluster_bus', columns="cluster_bus",
values='Capacity', values="Capacity",
aggfunc='sum' aggfunc="sum",
) )
lifetime = df_agg.pivot_table( lifetime = df_agg.pivot_table(
index=["grouping_year", 'Fueltype'], index=["grouping_year", "Fueltype"],
columns='cluster_bus', columns="cluster_bus",
values='lifetime', values="lifetime",
aggfunc='mean' # currently taken mean for clustering lifetimes aggfunc="mean", # currently taken mean for clustering lifetimes
) )
carrier = { carrier = {
@ -215,78 +212,89 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
"oil": "oil", "oil": "oil",
"lignite": "lignite", "lignite": "lignite",
"nuclear": "uranium", "nuclear": "uranium",
'urban central solid biomass CHP': "biomass", "urban central solid biomass CHP": "biomass",
} }
for grouping_year, generator in df.index: for grouping_year, generator in df.index:
# capacity is the capacity in MW at each node for this # capacity is the capacity in MW at each node for this
capacity = df.loc[grouping_year, generator] capacity = df.loc[grouping_year, generator]
capacity = capacity[~capacity.isna()] capacity = capacity[~capacity.isna()]
capacity = capacity[capacity > snakemake.config['existing_capacities']['threshold_capacity']] capacity = capacity[
suffix = '-ac' if generator == 'offwind' else '' capacity > snakemake.config["existing_capacities"]["threshold_capacity"]
name_suffix = f' {generator}{suffix}-{grouping_year}' ]
suffix = "-ac" if generator == "offwind" else ""
name_suffix = f" {generator}{suffix}-{grouping_year}"
asset_i = capacity.index + name_suffix asset_i = capacity.index + name_suffix
if generator in ['solar', 'onwind', 'offwind']: if generator in ["solar", "onwind", "offwind"]:
# to consider electricity grid connection costs or a split between # to consider electricity grid connection costs or a split between
# solar utility and rooftop as well, rather take cost assumptions # solar utility and rooftop as well, rather take cost assumptions
# from existing network than from the cost database # from existing network than from the cost database
capital_cost = n.generators.loc[n.generators.carrier==generator+suffix, "capital_cost"].mean() capital_cost = n.generators.loc[
marginal_cost = n.generators.loc[n.generators.carrier==generator+suffix, "marginal_cost"].mean() n.generators.carrier == generator + suffix, "capital_cost"
].mean()
marginal_cost = n.generators.loc[
n.generators.carrier == generator + suffix, "marginal_cost"
].mean()
# check if assets are already in network (e.g. for 2020) # check if assets are already in network (e.g. for 2020)
already_build = n.generators.index.intersection(asset_i) already_build = n.generators.index.intersection(asset_i)
new_build = asset_i.difference(n.generators.index) new_build = asset_i.difference(n.generators.index)
# this is for the year 2020 # this is for the year 2020
if not already_build.empty: if not already_build.empty:
n.generators.loc[already_build, "p_nom_min"] = capacity.loc[already_build.str.replace(name_suffix, "")].values n.generators.loc[already_build, "p_nom_min"] = capacity.loc[
already_build.str.replace(name_suffix, "")
].values
new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")] new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")]
if 'm' in snakemake.wildcards.clusters: if "m" in snakemake.wildcards.clusters:
for ind in new_capacity.index: for ind in new_capacity.index:
# existing capacities are split evenly among regions in every country # existing capacities are split evenly among regions in every country
inv_ind = [i for i in inv_busmap[ind]] inv_ind = [i for i in inv_busmap[ind]]
# for offshore the splitting only includes coastal regions # for offshore the splitting only includes coastal regions
inv_ind = [i for i in inv_ind if (i + name_suffix) in n.generators.index] inv_ind = [
i for i in inv_ind if (i + name_suffix) in n.generators.index
]
p_max_pu = n.generators_t.p_max_pu[[i + name_suffix for i in inv_ind]] p_max_pu = n.generators_t.p_max_pu[
p_max_pu.columns=[i + name_suffix for i in inv_ind ] [i + name_suffix for i in inv_ind]
]
p_max_pu.columns = [i + name_suffix for i in inv_ind]
n.madd("Generator", n.madd(
"Generator",
[i + name_suffix for i in inv_ind], [i + name_suffix for i in inv_ind],
bus=ind, bus=ind,
carrier=generator, carrier=generator,
p_nom=new_capacity[ind] / len(inv_ind), # split among regions in a country p_nom=new_capacity[ind]
/ len(inv_ind), # split among regions in a country
marginal_cost=marginal_cost, marginal_cost=marginal_cost,
capital_cost=capital_cost, capital_cost=capital_cost,
efficiency=costs.at[generator, 'efficiency'], efficiency=costs.at[generator, "efficiency"],
p_max_pu=p_max_pu, p_max_pu=p_max_pu,
build_year=grouping_year, build_year=grouping_year,
lifetime=costs.at[generator,'lifetime'] lifetime=costs.at[generator, "lifetime"],
) )
else: else:
p_max_pu = n.generators_t.p_max_pu[
p_max_pu = n.generators_t.p_max_pu[capacity.index + f' {generator}{suffix}-{baseyear}'] capacity.index + f" {generator}{suffix}-{baseyear}"
]
if not new_build.empty: if not new_build.empty:
n.madd("Generator", n.madd(
"Generator",
new_capacity.index, new_capacity.index,
suffix=' ' + name_suffix, suffix=" " + name_suffix,
bus=new_capacity.index, bus=new_capacity.index,
carrier=generator, carrier=generator,
p_nom=new_capacity, p_nom=new_capacity,
marginal_cost=marginal_cost, marginal_cost=marginal_cost,
capital_cost=capital_cost, capital_cost=capital_cost,
efficiency=costs.at[generator, 'efficiency'], efficiency=costs.at[generator, "efficiency"],
p_max_pu=p_max_pu.rename(columns=n.generators.bus), p_max_pu=p_max_pu.rename(columns=n.generators.bus),
build_year=grouping_year, build_year=grouping_year,
lifetime=costs.at[generator, 'lifetime'] lifetime=costs.at[generator, "lifetime"],
) )
else: else:
@ -296,56 +304,79 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
already_build = n.links.index.intersection(asset_i) already_build = n.links.index.intersection(asset_i)
new_build = asset_i.difference(n.links.index) new_build = asset_i.difference(n.links.index)
lifetime_assets = lifetime.loc[grouping_year,generator].dropna() lifetime_assets = lifetime.loc[grouping_year, generator].dropna()
# this is for the year 2020 # this is for the year 2020
if not already_build.empty: if not already_build.empty:
n.links.loc[already_build, "p_nom_min"] = capacity.loc[already_build.str.replace(name_suffix, "")].values n.links.loc[already_build, "p_nom_min"] = capacity.loc[
already_build.str.replace(name_suffix, "")
].values
if not new_build.empty: if not new_build.empty:
new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")] new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")]
if generator!="urban central solid biomass CHP": if generator != "urban central solid biomass CHP":
n.madd("Link", n.madd(
"Link",
new_capacity.index, new_capacity.index,
suffix= name_suffix, suffix=name_suffix,
bus0=bus0, bus0=bus0,
bus1=new_capacity.index, bus1=new_capacity.index,
bus2="co2 atmosphere", bus2="co2 atmosphere",
carrier=generator, carrier=generator,
marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel marginal_cost=costs.at[generator, "efficiency"]
capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel * costs.at[generator, "VOM"], # NB: VOM is per MWel
p_nom=new_capacity / costs.at[generator, 'efficiency'], capital_cost=costs.at[generator, "efficiency"]
efficiency=costs.at[generator, 'efficiency'], * costs.at[generator, "fixed"], # NB: fixed cost is per MWel
efficiency2=costs.at[carrier[generator], 'CO2 intensity'], p_nom=new_capacity / costs.at[generator, "efficiency"],
efficiency=costs.at[generator, "efficiency"],
efficiency2=costs.at[carrier[generator], "CO2 intensity"],
build_year=grouping_year, build_year=grouping_year,
lifetime=lifetime_assets.loc[new_capacity.index], lifetime=lifetime_assets.loc[new_capacity.index],
) )
else: else:
key = 'central solid biomass CHP' key = "central solid biomass CHP"
n.madd("Link", n.madd(
"Link",
new_capacity.index, new_capacity.index,
suffix= name_suffix, suffix=name_suffix,
bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values, bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values,
bus1=new_capacity.index, bus1=new_capacity.index,
bus2=new_capacity.index + " urban central heat", bus2=new_capacity.index + " urban central heat",
carrier=generator, carrier=generator,
p_nom=new_capacity / costs.at[key, 'efficiency'], p_nom=new_capacity / costs.at[key, "efficiency"],
capital_cost=costs.at[key, 'fixed'] * costs.at[key, 'efficiency'], capital_cost=costs.at[key, "fixed"]
marginal_cost=costs.at[key, 'VOM'], * costs.at[key, "efficiency"],
efficiency=costs.at[key, 'efficiency'], marginal_cost=costs.at[key, "VOM"],
efficiency=costs.at[key, "efficiency"],
build_year=grouping_year, build_year=grouping_year,
efficiency2=costs.at[key, 'efficiency-heat'], efficiency2=costs.at[key, "efficiency-heat"],
lifetime=lifetime_assets.loc[new_capacity.index] lifetime=lifetime_assets.loc[new_capacity.index],
) )
# check if existing capacities are larger than technical potential # check if existing capacities are larger than technical potential
existing_large = n.generators[n.generators["p_nom_min"] > n.generators["p_nom_max"]].index existing_large = n.generators[
n.generators["p_nom_min"] > n.generators["p_nom_max"]
].index
if len(existing_large): if len(existing_large):
logger.warning(f"Existing capacities larger than technical potential for {existing_large},\ logger.warning(
adjust technical potential to existing capacities") f"Existing capacities larger than technical potential for {existing_large},\
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[existing_large, "p_nom_min"] adjust technical potential to existing capacities"
)
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
existing_large, "p_nom_min"
]
def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime):
def add_heating_capacities_installed_before_baseyear(
n,
baseyear,
grouping_years,
ashp_cop,
gshp_cop,
time_dep_hp_cop,
costs,
default_lifetime,
):
""" """
Parameters Parameters
---------- ----------
@ -368,20 +399,20 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
# retrieve existing heating capacities # retrieve existing heating capacities
techs = [ techs = [
'gas boiler', "gas boiler",
'oil boiler', "oil boiler",
'resistive heater', "resistive heater",
'air heat pump', "air heat pump",
'ground heat pump' "ground heat pump",
] ]
df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0)
# data for Albania, Montenegro and Macedonia not included in database # data for Albania, Montenegro and Macedonia not included in database
df.loc['Albania'] = np.nan df.loc["Albania"] = np.nan
df.loc['Montenegro'] = np.nan df.loc["Montenegro"] = np.nan
df.loc['Macedonia'] = np.nan df.loc["Macedonia"] = np.nan
df.fillna(0., inplace=True) df.fillna(0.0, inplace=True)
# convert GW to MW # convert GW to MW
df *= 1e3 df *= 1e3
@ -391,8 +422,8 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True) df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True)
# coal and oil boilers are assimilated to oil boilers # coal and oil boilers are assimilated to oil boilers
df['oil boiler'] = df['oil boiler'] + df['coal boiler'] df["oil boiler"] = df["oil boiler"] + df["coal boiler"]
df.drop(['coal boiler'], axis=1, inplace=True) df.drop(["coal boiler"], axis=1, inplace=True)
# distribute technologies to nodes by population # distribute technologies to nodes by population
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
@ -403,36 +434,54 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
# split existing capacities between residential and services # split existing capacities between residential and services
# proportional to energy demand # proportional to energy demand
ratio_residential=pd.Series([(n.loads_t.p_set.sum()['{} residential rural heat'.format(node)] / ratio_residential = pd.Series(
(n.loads_t.p_set.sum()['{} residential rural heat'.format(node)] + [
n.loads_t.p_set.sum()['{} services rural heat'.format(node)] )) (
for node in nodal_df.index], index=nodal_df.index) n.loads_t.p_set.sum()["{} residential rural heat".format(node)]
/ (
n.loads_t.p_set.sum()["{} residential rural heat".format(node)]
+ n.loads_t.p_set.sum()["{} services rural heat".format(node)]
)
)
for node in nodal_df.index
],
index=nodal_df.index,
)
for tech in techs: for tech in techs:
nodal_df['residential ' + tech] = nodal_df[tech] * ratio_residential nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential
nodal_df['services ' + tech] = nodal_df[tech] * (1 - ratio_residential) nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential)
names = [ names = [
"residential rural", "residential rural",
"services rural", "services rural",
"residential urban decentral", "residential urban decentral",
"services urban decentral", "services urban decentral",
"urban central" "urban central",
] ]
nodes = {} nodes = {}
p_nom = {} p_nom = {}
for name in names: for name in names:
name_type = "central" if name == "urban central" else "decentral" name_type = "central" if name == "urban central" else "decentral"
nodes[name] = pd.Index([n.buses.at[index, "location"] for index in n.buses.index[n.buses.index.str.contains(name) & n.buses.index.str.contains('heat')]]) nodes[name] = pd.Index(
[
n.buses.at[index, "location"]
for index in n.buses.index[
n.buses.index.str.contains(name)
& n.buses.index.str.contains("heat")
]
]
)
heat_pump_type = "air" if "urban" in name else "ground" heat_pump_type = "air" if "urban" in name else "ground"
heat_type= "residential" if "residential" in name else "services" heat_type = "residential" if "residential" in name else "services"
if name == "urban central": if name == "urban central":
p_nom[name] = nodal_df['air heat pump'][nodes[name]] p_nom[name] = nodal_df["air heat pump"][nodes[name]]
else: else:
p_nom[name] = nodal_df[f'{heat_type} {heat_pump_type} heat pump'][nodes[name]] p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][
nodes[name]
]
# Add heat pumps # Add heat pumps
costs_name = f"decentral {heat_pump_type}-sourced heat pump" costs_name = f"decentral {heat_pump_type}-sourced heat pump"
@ -442,131 +491,182 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
if time_dep_hp_cop: if time_dep_hp_cop:
efficiency = cop[heat_pump_type][nodes[name]] efficiency = cop[heat_pump_type][nodes[name]]
else: else:
efficiency = costs.at[costs_name, 'efficiency'] efficiency = costs.at[costs_name, "efficiency"]
for i, grouping_year in enumerate(grouping_years): for i, grouping_year in enumerate(grouping_years):
if int(grouping_year) + default_lifetime <= int(baseyear): if int(grouping_year) + default_lifetime <= int(baseyear):
continue continue
# installation is assumed to be linear for the past 25 years (default lifetime) # installation is assumed to be linear for the past 25 years (default lifetime)
ratio = (int(grouping_year) - int(grouping_years[i-1])) / default_lifetime ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime
n.madd("Link", n.madd(
"Link",
nodes[name], nodes[name],
suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}",
bus0=nodes[name], bus0=nodes[name],
bus1=nodes[name] + " " + name + " heat", bus1=nodes[name] + " " + name + " heat",
carrier=f"{name} {heat_pump_type} heat pump", carrier=f"{name} {heat_pump_type} heat pump",
efficiency=efficiency, efficiency=efficiency,
capital_cost=costs.at[costs_name, 'efficiency'] * costs.at[costs_name, 'fixed'], capital_cost=costs.at[costs_name, "efficiency"]
p_nom=p_nom[name] * ratio / costs.at[costs_name, 'efficiency'], * costs.at[costs_name, "fixed"],
p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"],
build_year=int(grouping_year), build_year=int(grouping_year),
lifetime=costs.at[costs_name, 'lifetime'] lifetime=costs.at[costs_name, "lifetime"],
) )
# add resistive heater, gas boilers and oil boilers # add resistive heater, gas boilers and oil boilers
# (50% capacities to rural buses, 50% to urban buses) # (50% capacities to rural buses, 50% to urban buses)
n.madd("Link", n.madd(
"Link",
nodes[name], nodes[name],
suffix=f" {name} resistive heater-{grouping_year}", suffix=f" {name} resistive heater-{grouping_year}",
bus0=nodes[name], bus0=nodes[name],
bus1=nodes[name] + " " + name + " heat", bus1=nodes[name] + " " + name + " heat",
carrier=name + " resistive heater", carrier=name + " resistive heater",
efficiency=costs.at[name_type + ' resistive heater', 'efficiency'], efficiency=costs.at[name_type + " resistive heater", "efficiency"],
capital_cost=costs.at[name_type + ' resistive heater', 'efficiency'] * costs.at[name_type + ' resistive heater', 'fixed'], capital_cost=costs.at[name_type + " resistive heater", "efficiency"]
p_nom=0.5 * nodal_df[f'{heat_type} resistive heater'][nodes[name]] * ratio / costs.at[name_type + ' resistive heater', 'efficiency'], * costs.at[name_type + " resistive heater", "fixed"],
p_nom=0.5
* nodal_df[f"{heat_type} resistive heater"][nodes[name]]
* ratio
/ costs.at[name_type + " resistive heater", "efficiency"],
build_year=int(grouping_year), build_year=int(grouping_year),
lifetime=costs.at[costs_name, 'lifetime'] lifetime=costs.at[costs_name, "lifetime"],
) )
n.madd(
n.madd("Link", "Link",
nodes[name], nodes[name],
suffix= f" {name} gas boiler-{grouping_year}", suffix=f" {name} gas boiler-{grouping_year}",
bus0=spatial.gas.nodes, bus0=spatial.gas.nodes,
bus1=nodes[name] + " " + name + " heat", bus1=nodes[name] + " " + name + " heat",
bus2="co2 atmosphere", bus2="co2 atmosphere",
carrier=name + " gas boiler", carrier=name + " gas boiler",
efficiency=costs.at[name_type + ' gas boiler', 'efficiency'], efficiency=costs.at[name_type + " gas boiler", "efficiency"],
efficiency2=costs.at['gas', 'CO2 intensity'], efficiency2=costs.at["gas", "CO2 intensity"],
capital_cost=costs.at[name_type + ' gas boiler', 'efficiency'] * costs.at[name_type + ' gas boiler', 'fixed'], capital_cost=costs.at[name_type + " gas boiler", "efficiency"]
p_nom=0.5*nodal_df[f'{heat_type} gas boiler'][nodes[name]] * ratio / costs.at[name_type + ' gas boiler', 'efficiency'], * costs.at[name_type + " gas boiler", "fixed"],
p_nom=0.5
* nodal_df[f"{heat_type} gas boiler"][nodes[name]]
* ratio
/ costs.at[name_type + " gas boiler", "efficiency"],
build_year=int(grouping_year), build_year=int(grouping_year),
lifetime=costs.at[name_type + ' gas boiler', 'lifetime'] lifetime=costs.at[name_type + " gas boiler", "lifetime"],
) )
n.madd("Link", n.madd(
"Link",
nodes[name], nodes[name],
suffix=f" {name} oil boiler-{grouping_year}", suffix=f" {name} oil boiler-{grouping_year}",
bus0=spatial.oil.nodes, bus0=spatial.oil.nodes,
bus1=nodes[name] + " " + name + " heat", bus1=nodes[name] + " " + name + " heat",
bus2="co2 atmosphere", bus2="co2 atmosphere",
carrier=name + " oil boiler", carrier=name + " oil boiler",
efficiency=costs.at['decentral oil boiler', 'efficiency'], efficiency=costs.at["decentral oil boiler", "efficiency"],
efficiency2=costs.at['oil', 'CO2 intensity'], efficiency2=costs.at["oil", "CO2 intensity"],
capital_cost=costs.at['decentral oil boiler', 'efficiency'] * costs.at['decentral oil boiler', 'fixed'], capital_cost=costs.at["decentral oil boiler", "efficiency"]
p_nom=0.5 * nodal_df[f'{heat_type} oil boiler'][nodes[name]] * ratio / costs.at['decentral oil boiler', 'efficiency'], * costs.at["decentral oil boiler", "fixed"],
p_nom=0.5
* nodal_df[f"{heat_type} oil boiler"][nodes[name]]
* ratio
/ costs.at["decentral oil boiler", "efficiency"],
build_year=int(grouping_year), build_year=int(grouping_year),
lifetime=costs.at[name_type + ' gas boiler', 'lifetime'] lifetime=costs.at[name_type + " gas boiler", "lifetime"],
) )
# delete links with p_nom=nan corresponding to extra nodes in country # delete links with p_nom=nan corresponding to extra nodes in country
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and np.isnan(n.links.p_nom[index])]) n.mremove(
"Link",
[
index
for index in n.links.index.to_list()
if str(grouping_year) in index and np.isnan(n.links.p_nom[index])
],
)
# delete links with capacities below threshold # delete links with capacities below threshold
threshold = snakemake.config['existing_capacities']['threshold_capacity'] threshold = snakemake.config["existing_capacities"]["threshold_capacity"]
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and n.links.p_nom[index] < threshold]) n.mremove(
"Link",
[
index
for index in n.links.index.to_list()
if str(grouping_year) in index and n.links.p_nom[index] < threshold
],
)
#%%
# %%
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'add_existing_baseyear', "add_existing_baseyear",
simpl='', simpl="",
clusters="45", clusters="45",
lv=1.0, lv=1.0,
opts='', opts="",
sector_opts='8760H-T-H-B-I-A-solar+p3-dist1', sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
planning_horizons=2020, planning_horizons=2020,
) )
logging.basicConfig(level=snakemake.config['logging_level']) logging.basicConfig(level=snakemake.config["logging_level"])
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
options = snakemake.config["sector"] options = snakemake.config["sector"]
opts = snakemake.wildcards.sector_opts.split('-') opts = snakemake.wildcards.sector_opts.split("-")
baseyear = snakemake.config['scenario']["planning_horizons"][0] baseyear = snakemake.config["scenario"]["planning_horizons"][0]
overrides = override_component_attrs(snakemake.input.overrides) overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
# define spatial resolution of carriers # define spatial resolution of carriers
spatial = define_spatial(n.buses[n.buses.carrier=="AC"].index, options) spatial = define_spatial(n.buses[n.buses.carrier == "AC"].index, options)
add_build_year_to_new_assets(n, baseyear) add_build_year_to_new_assets(n, baseyear)
Nyears = n.snapshot_weightings.generators.sum() / 8760. Nyears = n.snapshot_weightings.generators.sum() / 8760.0
costs = prepare_costs( costs = prepare_costs(
snakemake.input.costs, snakemake.input.costs,
snakemake.config['costs']['USD2013_to_EUR2013'], snakemake.config["costs"]["USD2013_to_EUR2013"],
snakemake.config['costs']['discountrate'], snakemake.config["costs"]["discountrate"],
Nyears, Nyears,
snakemake.config['costs']['lifetime'] snakemake.config["costs"]["lifetime"],
) )
grouping_years_power = snakemake.config['existing_capacities']['grouping_years_power'] grouping_years_power = snakemake.config["existing_capacities"][
grouping_years_heat = snakemake.config['existing_capacities']['grouping_years_heat'] "grouping_years_power"
add_power_capacities_installed_before_baseyear(n, grouping_years_power, costs, baseyear) ]
grouping_years_heat = snakemake.config["existing_capacities"]["grouping_years_heat"]
add_power_capacities_installed_before_baseyear(
n, grouping_years_power, costs, baseyear
)
if "H" in opts: if "H" in opts:
time_dep_hp_cop = options["time_dep_hp_cop"] time_dep_hp_cop = options["time_dep_hp_cop"]
ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots) ashp_cop = (
gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots) xr.open_dataarray(snakemake.input.cop_air_total)
default_lifetime = snakemake.config['costs']['lifetime'] .to_pandas()
add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years_heat, .reindex(index=n.snapshots)
ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime) )
gshp_cop = (
xr.open_dataarray(snakemake.input.cop_soil_total)
.to_pandas()
.reindex(index=n.snapshots)
)
default_lifetime = snakemake.config["costs"]["lifetime"]
add_heating_capacities_installed_before_baseyear(
n,
baseyear,
grouping_years_heat,
ashp_cop,
gshp_cop,
time_dep_hp_cop,
costs,
default_lifetime,
)
if options.get("cluster_heat_buses", False): if options.get("cluster_heat_buses", False):
cluster_heat_buses(n) cluster_heat_buses(n)

View File

@ -1,4 +1,7 @@
"""Build ammonia production.""" # -*- coding: utf-8 -*-
"""
Build ammonia production.
"""
import pandas as pd import pandas as pd
@ -27,17 +30,20 @@ country_to_alpha2 = {
"United Kingdom": "GB", "United Kingdom": "GB",
} }
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_ammonia_production')
ammonia = pd.read_excel(snakemake.input.usgs, snakemake = mock_snakemake("build_ammonia_production")
sheet_name="T12",
skiprows=5, ammonia = pd.read_excel(
header=0, snakemake.input.usgs,
index_col=0, sheet_name="T12",
skipfooter=19) skiprows=5,
header=0,
index_col=0,
skipfooter=19,
)
ammonia.rename(country_to_alpha2, inplace=True) ammonia.rename(country_to_alpha2, inplace=True)

View File

@ -1,27 +1,29 @@
import pandas as pd # -*- coding: utf-8 -*-
import geopandas as gpd import geopandas as gpd
import pandas as pd
def build_nuts_population_data(year=2013): def build_nuts_population_data(year=2013):
pop = pd.read_csv( pop = pd.read_csv(
snakemake.input.nuts3_population, snakemake.input.nuts3_population,
sep=r'\,| \t|\t', sep=r"\,| \t|\t",
engine='python', engine="python",
na_values=[":"], na_values=[":"],
index_col=1 index_col=1,
)[str(year)] )[str(year)]
# only countries # only countries
pop.drop("EU28", inplace=True) pop.drop("EU28", inplace=True)
# mapping from Cantons to NUTS3 # mapping from Cantons to NUTS3
cantons = pd.read_csv(snakemake.input.swiss_cantons) cantons = pd.read_csv(snakemake.input.swiss_cantons)
cantons = cantons.set_index(cantons.HASC.str[3:]).NUTS cantons = cantons.set_index(cantons.HASC.str[3:]).NUTS
cantons = cantons.str.pad(5, side='right', fillchar='0') cantons = cantons.str.pad(5, side="right", fillchar="0")
# get population by NUTS3 # get population by NUTS3
swiss = pd.read_excel(snakemake.input.swiss_population, skiprows=3, index_col=0).loc["Residents in 1000"] swiss = pd.read_excel(
snakemake.input.swiss_population, skiprows=3, index_col=0
).loc["Residents in 1000"]
swiss = swiss.rename(cantons).filter(like="CH") swiss = swiss.rename(cantons).filter(like="CH")
# aggregate also to higher order NUTS levels # aggregate also to higher order NUTS levels
@ -29,21 +31,21 @@ def build_nuts_population_data(year=2013):
# merge Europe + Switzerland # merge Europe + Switzerland
pop = pd.concat([pop, pd.concat(swiss)]).to_frame("total") pop = pd.concat([pop, pd.concat(swiss)]).to_frame("total")
# add missing manually # add missing manually
pop["AL"] = 2893 pop["AL"] = 2893
pop["BA"] = 3871 pop["BA"] = 3871
pop["RS"] = 7210 pop["RS"] = 7210
pop["ct"] = pop.index.str[:2] pop["ct"] = pop.index.str[:2]
return pop return pop
def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"): def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"):
""" """
Loads the JRC ENSPRESO biomass potentials. Loads the JRC ENSPRESO biomass potentials.
Parameters Parameters
---------- ----------
year : int year : int
@ -51,7 +53,7 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"):
Can be {2010, 2020, 2030, 2040, 2050}. Can be {2010, 2020, 2030, 2040, 2050}.
scenario : str scenario : str
The scenario. Can be {"ENS_Low", "ENS_Med", "ENS_High"}. The scenario. Can be {"ENS_Low", "ENS_Med", "ENS_High"}.
Returns Returns
------- -------
pd.DataFrame pd.DataFrame
@ -64,13 +66,13 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"):
sheet_name="Glossary", sheet_name="Glossary",
usecols="B:D", usecols="B:D",
skiprows=1, skiprows=1,
index_col=0 index_col=0,
) )
df = pd.read_excel( df = pd.read_excel(
str(snakemake.input.enspreso_biomass), str(snakemake.input.enspreso_biomass),
sheet_name="ENER - NUTS2 BioCom E", sheet_name="ENER - NUTS2 BioCom E",
usecols="A:H" usecols="A:H",
) )
df["group"] = df["E-Comm"].map(glossary.group) df["group"] = df["E-Comm"].map(glossary.group)
@ -81,9 +83,9 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"):
"NUST2": "NUTS2", "NUST2": "NUTS2",
} }
df.rename(columns=to_rename, inplace=True) df.rename(columns=to_rename, inplace=True)
# fill up with NUTS0 if NUTS2 is not given # fill up with NUTS0 if NUTS2 is not given
df.NUTS2 = df.apply(lambda x: x.NUTS0 if x.NUTS2 == '-' else x.NUTS2, axis=1) df.NUTS2 = df.apply(lambda x: x.NUTS0 if x.NUTS2 == "-" else x.NUTS2, axis=1)
# convert PJ to TWh # convert PJ to TWh
df.potential /= 3.6 df.potential /= 3.6
@ -92,32 +94,31 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"):
dff = df.query("Year == @year and Scenario == @scenario") dff = df.query("Year == @year and Scenario == @scenario")
bio = dff.groupby(["NUTS2", "commodity"]).potential.sum().unstack() bio = dff.groupby(["NUTS2", "commodity"]).potential.sum().unstack()
# currently Serbia and Kosovo not split, so aggregate # currently Serbia and Kosovo not split, so aggregate
bio.loc["RS"] += bio.loc["XK"] bio.loc["RS"] += bio.loc["XK"]
bio.drop("XK", inplace=True) bio.drop("XK", inplace=True)
return bio return bio
def disaggregate_nuts0(bio): def disaggregate_nuts0(bio):
""" """
Some commodities are only given on NUTS0 level. Some commodities are only given on NUTS0 level. These are disaggregated
These are disaggregated here using the NUTS2 here using the NUTS2 population as distribution key.
population as distribution key.
Parameters Parameters
---------- ----------
bio : pd.DataFrame bio : pd.DataFrame
from enspreso_biomass_potentials() from enspreso_biomass_potentials()
Returns Returns
------- -------
pd.DataFrame pd.DataFrame
""" """
pop = build_nuts_population_data() pop = build_nuts_population_data()
# get population in nuts2 # get population in nuts2
pop_nuts2 = pop.loc[pop.index.str.len() == 4] pop_nuts2 = pop.loc[pop.index.str.len() == 4]
by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum() by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum()
@ -130,7 +131,7 @@ def disaggregate_nuts0(bio):
# update inplace # update inplace
bio.update(bio_nodal) bio.update(bio_nodal)
return bio return bio
@ -141,9 +142,11 @@ def build_nuts2_shapes():
- consistently name ME, MK - consistently name ME, MK
""" """
nuts2 = gpd.GeoDataFrame(gpd.read_file(snakemake.input.nuts2).set_index('id').geometry) nuts2 = gpd.GeoDataFrame(
gpd.read_file(snakemake.input.nuts2).set_index("id").geometry
)
countries = gpd.read_file(snakemake.input.country_shapes).set_index('name') countries = gpd.read_file(snakemake.input.country_shapes).set_index("name")
missing_iso2 = countries.index.intersection(["AL", "RS", "BA"]) missing_iso2 = countries.index.intersection(["AL", "RS", "BA"])
missing = countries.loc[missing_iso2] missing = countries.loc[missing_iso2]
@ -153,14 +156,16 @@ def build_nuts2_shapes():
def area(gdf): def area(gdf):
"""Returns area of GeoDataFrame geometries in square kilometers.""" """
Returns area of GeoDataFrame geometries in square kilometers.
"""
return gdf.to_crs(epsg=3035).area.div(1e6) return gdf.to_crs(epsg=3035).area.div(1e6)
def convert_nuts2_to_regions(bio_nuts2, regions): def convert_nuts2_to_regions(bio_nuts2, regions):
""" """
Converts biomass potentials given in NUTS2 to PyPSA-Eur regions based on the Converts biomass potentials given in NUTS2 to PyPSA-Eur regions based on
overlay of both GeoDataFrames in proportion to the area. the overlay of both GeoDataFrames in proportion to the area.
Parameters Parameters
---------- ----------
@ -173,7 +178,7 @@ def convert_nuts2_to_regions(bio_nuts2, regions):
------- -------
gpd.GeoDataFrame gpd.GeoDataFrame
""" """
# calculate area of nuts2 regions # calculate area of nuts2 regions
bio_nuts2["area_nuts2"] = area(bio_nuts2) bio_nuts2["area_nuts2"] = area(bio_nuts2)
@ -183,22 +188,25 @@ def convert_nuts2_to_regions(bio_nuts2, regions):
overlay["share"] = area(overlay) / overlay["area_nuts2"] overlay["share"] = area(overlay) / overlay["area_nuts2"]
# multiply all nuts2-level values with share of nuts2 inside region # multiply all nuts2-level values with share of nuts2 inside region
adjust_cols = overlay.columns.difference({"name", "area_nuts2", "geometry", "share"}) adjust_cols = overlay.columns.difference(
{"name", "area_nuts2", "geometry", "share"}
)
overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0) overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0)
bio_regions = overlay.groupby("name").sum() bio_regions = overlay.groupby("name").sum()
bio_regions.drop(["area_nuts2", "share"], axis=1, inplace=True) bio_regions.drop(["area_nuts2", "share"], axis=1, inplace=True)
return bio_regions return bio_regions
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_biomass_potentials', simpl='', clusters='5')
config = snakemake.config['biomass'] snakemake = mock_snakemake("build_biomass_potentials", simpl="", clusters="5")
config = snakemake.config["biomass"]
year = config["year"] year = config["year"]
scenario = config["scenario"] scenario = config["scenario"]
@ -219,7 +227,7 @@ if __name__ == "__main__":
grouper = {v: k for k, vv in config["classes"].items() for v in vv} grouper = {v: k for k, vv in config["classes"].items() for v in vv}
df = df.groupby(grouper, axis=1).sum() df = df.groupby(grouper, axis=1).sum()
df *= 1e6 # TWh/a to MWh/a df *= 1e6 # TWh/a to MWh/a
df.index.name = "MWh/a" df.index.name = "MWh/a"
df.to_csv(snakemake.output.biomass_potentials) df.to_csv(snakemake.output.biomass_potentials)

View File

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
""" """
Reads biomass transport costs for different countries of the JRC report Reads biomass transport costs for different countries of the JRC report.
"The JRC-EU-TIMES model. "The JRC-EU-TIMES model.
Bioenergy potentials Bioenergy potentials
@ -18,29 +19,24 @@ import tabula as tbl
ENERGY_CONTENT = 4.8 # unit MWh/t (wood pellets) ENERGY_CONTENT = 4.8 # unit MWh/t (wood pellets)
def get_countries():
pandas_options = dict( def get_countries():
skiprows=range(6), pandas_options = dict(skiprows=range(6), header=None, index_col=0)
header=None,
index_col=0
)
return tbl.read_pdf( return tbl.read_pdf(
str(snakemake.input.transport_cost_data), str(snakemake.input.transport_cost_data),
pages="145", pages="145",
multiple_tables=False, multiple_tables=False,
pandas_options=pandas_options pandas_options=pandas_options,
)[0].index )[0].index
def get_cost_per_tkm(page, countries): def get_cost_per_tkm(page, countries):
pandas_options = dict( pandas_options = dict(
skiprows=range(6), skiprows=range(6),
header=0, header=0,
sep=' |,', sep=" |,",
engine='python', engine="python",
index_col=False, index_col=False,
) )
@ -48,16 +44,15 @@ def get_cost_per_tkm(page, countries):
str(snakemake.input.transport_cost_data), str(snakemake.input.transport_cost_data),
pages=page, pages=page,
multiple_tables=False, multiple_tables=False,
pandas_options=pandas_options pandas_options=pandas_options,
)[0] )[0]
sc.index = countries sc.index = countries
sc.columns = sc.columns.str.replace("", "EUR") sc.columns = sc.columns.str.replace("", "EUR")
return sc return sc
def build_biomass_transport_costs(): def build_biomass_transport_costs():
countries = get_countries() countries = get_countries()
sc1 = get_cost_per_tkm(146, countries) sc1 = get_cost_per_tkm(146, countries)
@ -72,11 +67,7 @@ def build_biomass_transport_costs():
transport_costs.name = "EUR/km/MWh" transport_costs.name = "EUR/km/MWh"
# rename country names # rename country names
to_rename = { to_rename = {"UK": "GB", "XK": "KO", "EL": "GR"}
"UK": "GB",
"XK": "KO",
"EL": "GR"
}
transport_costs.rename(to_rename, inplace=True) transport_costs.rename(to_rename, inplace=True)
# add missing Norway with data from Sweden # add missing Norway with data from Sweden
@ -86,5 +77,4 @@ def build_biomass_transport_costs():
if __name__ == "__main__": if __name__ == "__main__":
build_biomass_transport_costs() build_biomass_transport_costs()

View File

@ -1,31 +1,38 @@
"""Build clustered population layouts.""" # -*- coding: utf-8 -*-
"""
Build clustered population layouts.
"""
import geopandas as gpd
import xarray as xr
import pandas as pd
import atlite import atlite
import geopandas as gpd
import pandas as pd
import xarray as xr
if __name__ == "__main__":
if __name__ == '__main__': if "snakemake" not in globals():
if 'snakemake' not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_clustered_population_layouts', "build_clustered_population_layouts",
simpl='', simpl="",
clusters=48, clusters=48,
) )
cutout = atlite.Cutout(snakemake.config['atlite']['cutout']) cutout = atlite.Cutout(snakemake.config["atlite"]["cutout"])
clustered_regions = gpd.read_file( clustered_regions = (
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() gpd.read_file(snakemake.input.regions_onshore)
.set_index("name")
.buffer(0)
.squeeze()
)
I = cutout.indicatormatrix(clustered_regions) I = cutout.indicatormatrix(clustered_regions)
pop = {} pop = {}
for item in ["total", "urban", "rural"]: for item in ["total", "urban", "rural"]:
pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{item}']) pop_layout = xr.open_dataarray(snakemake.input[f"pop_layout_{item}"])
pop[item] = I.dot(pop_layout.stack(spatial=('y', 'x'))) pop[item] = I.dot(pop_layout.stack(spatial=("y", "x")))
pop = pd.DataFrame(pop, index=clustered_regions.index) pop = pd.DataFrame(pop, index=clustered_regions.index)

View File

@ -1,39 +1,41 @@
"""Build COP time series for air- or ground-sourced heat pumps.""" # -*- coding: utf-8 -*-
"""
Build COP time series for air- or ground-sourced heat pumps.
"""
import xarray as xr import xarray as xr
def coefficient_of_performance(delta_T, source='air'): def coefficient_of_performance(delta_T, source="air"):
""" """
COP is function of temp difference source to sink. COP is function of temp difference source to sink.
The quadratic regression is based on Staffell et al. (2012) The quadratic regression is based on Staffell et al. (2012)
https://doi.org/10.1039/C2EE22653G. https://doi.org/10.1039/C2EE22653G.
""" """
if source == 'air': if source == "air":
return 6.81 - 0.121 * delta_T + 0.000630 * delta_T**2 return 6.81 - 0.121 * delta_T + 0.000630 * delta_T**2
elif source == 'soil': elif source == "soil":
return 8.77 - 0.150 * delta_T + 0.000734 * delta_T**2 return 8.77 - 0.150 * delta_T + 0.000734 * delta_T**2
else: else:
raise NotImplementedError("'source' must be one of ['air', 'soil']") raise NotImplementedError("'source' must be one of ['air', 'soil']")
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_cop_profiles', "build_cop_profiles",
simpl='', simpl="",
clusters=48, clusters=48,
) )
for area in ["total", "urban", "rural"]: for area in ["total", "urban", "rural"]:
for source in ["air", "soil"]: for source in ["air", "soil"]:
source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"])
source_T = xr.open_dataarray( delta_T = snakemake.config["sector"]["heat_pump_sink_T"] - source_T
snakemake.input[f"temp_{source}_{area}"])
delta_T = snakemake.config['sector']['heat_pump_sink_T'] - source_T
cop = coefficient_of_performance(delta_T, source) cop = coefficient_of_performance(delta_T, source)

View File

@ -1,25 +1,31 @@
# -*- coding: utf-8 -*-
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
from functools import partial
from tqdm import tqdm
from helper import mute_print
import multiprocessing as mp import multiprocessing as mp
import pandas as pd from functools import partial
import geopandas as gpd import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd
from helper import mute_print
from tqdm import tqdm
idx = pd.IndexSlice idx = pd.IndexSlice
def cartesian(s1, s2): def cartesian(s1, s2):
"""Cartesian product of two pd.Series""" """
Cartesian product of two pd.Series.
"""
return pd.DataFrame(np.outer(s1, s2), index=s1.index, columns=s2.index) return pd.DataFrame(np.outer(s1, s2), index=s1.index, columns=s2.index)
def reverse(dictionary): def reverse(dictionary):
"""reverses a keys and values of a dictionary""" """
Reverses a keys and values of a dictionary.
"""
return {v: k for k, v in dictionary.items()} return {v: k for k, v in dictionary.items()}
@ -122,7 +128,7 @@ to_ipcc = {
"total energy": "1 - Energy", "total energy": "1 - Energy",
"industrial processes": "2 - Industrial Processes and Product Use", "industrial processes": "2 - Industrial Processes and Product Use",
"agriculture": "3 - Agriculture", "agriculture": "3 - Agriculture",
"agriculture, forestry and fishing": '1.A.4.c - Agriculture/Forestry/Fishing', "agriculture, forestry and fishing": "1.A.4.c - Agriculture/Forestry/Fishing",
"LULUCF": "4 - Land Use, Land-Use Change and Forestry", "LULUCF": "4 - Land Use, Land-Use Change and Forestry",
"waste management": "5 - Waste management", "waste management": "5 - Waste management",
"other": "6 - Other Sector", "other": "6 - Other Sector",
@ -131,12 +137,15 @@ to_ipcc = {
"total woL": "Total (without LULUCF)", "total woL": "Total (without LULUCF)",
} }
def build_eurostat(input_eurostat, countries, report_year, year):
"""Return multi-index for all countries' energy data in TWh/a.""" def build_eurostat(input_eurostat, countries, report_year, year):
"""
Return multi-index for all countries' energy data in TWh/a.
"""
filenames = { filenames = {
2016: f"/{year}-Energy-Balances-June2016edition.xlsx", 2016: f"/{year}-Energy-Balances-June2016edition.xlsx",
2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx" 2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx",
} }
with mute_print(): with mute_print():
@ -149,9 +158,11 @@ def build_eurostat(input_eurostat, countries, report_year, year):
# sorted_index necessary for slicing # sorted_index necessary for slicing
lookup = eurostat_country_to_alpha2 lookup = eurostat_country_to_alpha2
labelled_dfs = {lookup[df.columns[0]]: df labelled_dfs = {
for df in dfs.values() lookup[df.columns[0]]: df
if lookup[df.columns[0]] in countries} for df in dfs.values()
if lookup[df.columns[0]] in countries
}
df = pd.concat(labelled_dfs, sort=True).sort_index() df = pd.concat(labelled_dfs, sort=True).sort_index()
# drop non-numeric and country columns # drop non-numeric and country columns
@ -167,11 +178,13 @@ def build_eurostat(input_eurostat, countries, report_year, year):
def build_swiss(year): def build_swiss(year):
"""Return a pd.Series of Swiss energy data in TWh/a""" """
Return a pd.Series of Swiss energy data in TWh/a.
"""
fn = snakemake.input.swiss fn = snakemake.input.swiss
df = pd.read_csv(fn, index_col=[0,1]).loc["CH", str(year)] df = pd.read_csv(fn, index_col=[0, 1]).loc["CH", str(year)]
# convert PJ/a to TWh/a # convert PJ/a to TWh/a
df /= 3.6 df /= 3.6
@ -180,7 +193,6 @@ def build_swiss(year):
def idees_per_country(ct, year): def idees_per_country(ct, year):
base_dir = snakemake.input.idees base_dir = snakemake.input.idees
ct_totals = {} ct_totals = {}
@ -220,7 +232,7 @@ def idees_per_country(ct, year):
assert df.index[46] == "Derived heat" assert df.index[46] == "Derived heat"
ct_totals["derived heat residential"] = df[46] ct_totals["derived heat residential"] = df[46]
assert df.index[50] == 'Thermal uses' assert df.index[50] == "Thermal uses"
ct_totals["thermal uses residential"] = df[50] ct_totals["thermal uses residential"] = df[50]
# services # services
@ -253,10 +265,9 @@ def idees_per_country(ct, year):
assert df.index[49] == "Derived heat" assert df.index[49] == "Derived heat"
ct_totals["derived heat services"] = df[49] ct_totals["derived heat services"] = df[49]
assert df.index[53] == 'Thermal uses' assert df.index[53] == "Thermal uses"
ct_totals["thermal uses services"] = df[53] ct_totals["thermal uses services"] = df[53]
# agriculture, forestry and fishing # agriculture, forestry and fishing
start = "Detailed split of energy consumption (ktoe)" start = "Detailed split of energy consumption (ktoe)"
@ -268,7 +279,7 @@ def idees_per_country(ct, year):
"Lighting", "Lighting",
"Ventilation", "Ventilation",
"Specific electricity uses", "Specific electricity uses",
"Pumping devices (electric)" "Pumping devices (electric)",
] ]
ct_totals["total agriculture electricity"] = df[rows].sum() ct_totals["total agriculture electricity"] = df[rows].sum()
@ -352,7 +363,7 @@ def idees_per_country(ct, year):
assert df.index[8] == "International - Intra-EU" assert df.index[8] == "International - Intra-EU"
assert df.index[9] == "International - Extra-EU" assert df.index[9] == "International - Extra-EU"
ct_totals["total international aviation passenger"] = df[[8,9]].sum() ct_totals["total international aviation passenger"] = df[[8, 9]].sum()
assert df.index[11] == "Domestic and International - Intra-EU" assert df.index[11] == "Domestic and International - Intra-EU"
ct_totals["total domestic aviation freight"] = df[11] ct_totals["total domestic aviation freight"] = df[11]
@ -360,11 +371,15 @@ def idees_per_country(ct, year):
assert df.index[12] == "International - Extra-EU" assert df.index[12] == "International - Extra-EU"
ct_totals["total international aviation freight"] = df[12] ct_totals["total international aviation freight"] = df[12]
ct_totals["total domestic aviation"] = ct_totals["total domestic aviation freight"] \ ct_totals["total domestic aviation"] = (
+ ct_totals["total domestic aviation passenger"] ct_totals["total domestic aviation freight"]
+ ct_totals["total domestic aviation passenger"]
)
ct_totals["total international aviation"] = ct_totals["total international aviation freight"] \ ct_totals["total international aviation"] = (
+ ct_totals["total international aviation passenger"] ct_totals["total international aviation freight"]
+ ct_totals["total international aviation passenger"]
)
df = pd.read_excel(fn_transport, "TrNavi_ene", index_col=0)[year] df = pd.read_excel(fn_transport, "TrNavi_ene", index_col=0)[year]
@ -380,17 +395,19 @@ def idees_per_country(ct, year):
def build_idees(countries, year): def build_idees(countries, year):
nprocesses = snakemake.threads nprocesses = snakemake.threads
func = partial(idees_per_country, year=year) func = partial(idees_per_country, year=year)
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries), tqdm_kwargs = dict(
desc='Build from IDEES database') ascii=False,
unit=" country",
total=len(countries),
desc="Build from IDEES database",
)
with mute_print(): with mute_print():
with mp.Pool(processes=nprocesses) as pool: with mp.Pool(processes=nprocesses) as pool:
totals_list = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) totals_list = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
totals = pd.concat(totals_list, axis=1) totals = pd.concat(totals_list, axis=1)
# convert ktoe to TWh # convert ktoe to TWh
@ -401,19 +418,17 @@ def build_idees(countries, year):
totals.loc["passenger car efficiency"] *= 10 totals.loc["passenger car efficiency"] *= 10
# district heating share # district heating share
district_heat = totals.loc[["derived heat residential", district_heat = totals.loc[
"derived heat services"]].sum() ["derived heat residential", "derived heat services"]
total_heat = totals.loc[["thermal uses residential", ].sum()
"thermal uses services"]].sum() total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum()
totals.loc["district heat share"] = district_heat.div(total_heat) totals.loc["district heat share"] = district_heat.div(total_heat)
return totals.T return totals.T
def build_energy_totals(countries, eurostat, swiss, idees): def build_energy_totals(countries, eurostat, swiss, idees):
eurostat_fuels = {"electricity": "Electricity", "total": "Total all products"}
eurostat_fuels = {"electricity": "Electricity",
"total": "Total all products"}
to_drop = ["passenger cars", "passenger car efficiency"] to_drop = ["passenger cars", "passenger car efficiency"]
df = idees.reindex(countries).drop(to_drop, axis=1) df = idees.reindex(countries).drop(to_drop, axis=1)
@ -439,36 +454,47 @@ def build_energy_totals(countries, eurostat, swiss, idees):
uses = ["space", "cooking", "water"] uses = ["space", "cooking", "water"]
for sector in ["residential", "services", "road", "rail"]: for sector in ["residential", "services", "road", "rail"]:
eurostat_sector = sector.capitalize() eurostat_sector = sector.capitalize()
# fuel use # fuel use
for fuel in ["electricity", "total"]: for fuel in ["electricity", "total"]:
slicer = idx[to_fill, :, :, eurostat_sector] slicer = idx[to_fill, :, :, eurostat_sector]
fill_values = eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=0).sum() fill_values = (
eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=0).sum()
)
df.loc[to_fill, f"{fuel} {sector}"] = fill_values df.loc[to_fill, f"{fuel} {sector}"] = fill_values
for sector in ["residential", "services"]: for sector in ["residential", "services"]:
# electric use # electric use
for use in uses: for use in uses:
fuel_use = df[f"electricity {sector} {use}"] fuel_use = df[f"electricity {sector} {use}"]
fuel = df[f"electricity {sector}"] fuel = df[f"electricity {sector}"]
avg = fuel_use.div(fuel).mean() avg = fuel_use.div(fuel).mean()
logger.debug(f"{sector}: average fraction of electricity for {use} is {avg:.3f}") logger.debug(
df.loc[to_fill, f"electricity {sector} {use}"] = avg * df.loc[to_fill, f"electricity {sector}"] f"{sector}: average fraction of electricity for {use} is {avg:.3f}"
)
df.loc[to_fill, f"electricity {sector} {use}"] = (
avg * df.loc[to_fill, f"electricity {sector}"]
)
# non-electric use # non-electric use
for use in uses: for use in uses:
nonelectric_use = df[f"total {sector} {use}"] - df[f"electricity {sector} {use}"] nonelectric_use = (
df[f"total {sector} {use}"] - df[f"electricity {sector} {use}"]
)
nonelectric = df[f"total {sector}"] - df[f"electricity {sector}"] nonelectric = df[f"total {sector}"] - df[f"electricity {sector}"]
avg = nonelectric_use.div(nonelectric).mean() avg = nonelectric_use.div(nonelectric).mean()
logger.debug(f"{sector}: average fraction of non-electric for {use} is {avg:.3f}") logger.debug(
f"{sector}: average fraction of non-electric for {use} is {avg:.3f}"
)
electric_use = df.loc[to_fill, f"electricity {sector} {use}"] electric_use = df.loc[to_fill, f"electricity {sector} {use}"]
nonelectric = df.loc[to_fill, f"total {sector}"] - df.loc[to_fill, f"electricity {sector}"] nonelectric = (
df.loc[to_fill, f"total {sector}"]
- df.loc[to_fill, f"electricity {sector}"]
)
df.loc[to_fill, f"total {sector} {use}"] = electric_use + avg * nonelectric df.loc[to_fill, f"total {sector} {use}"] = electric_use + avg * nonelectric
# Fix Norway space and water heating fractions # Fix Norway space and water heating fractions
@ -480,17 +506,25 @@ def build_energy_totals(countries, eurostat, swiss, idees):
no_norway = df.drop("NO") no_norway = df.drop("NO")
for sector in ["residential", "services"]: for sector in ["residential", "services"]:
# assume non-electric is heating # assume non-electric is heating
nonelectric = df.loc["NO", f"total {sector}"] - df.loc["NO", f"electricity {sector}"] nonelectric = (
df.loc["NO", f"total {sector}"] - df.loc["NO", f"electricity {sector}"]
)
total_heating = nonelectric / (1 - elec_fraction) total_heating = nonelectric / (1 - elec_fraction)
for use in uses: for use in uses:
nonelectric_use = no_norway[f"total {sector} {use}"] - no_norway[f"electricity {sector} {use}"] nonelectric_use = (
nonelectric = no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"] no_norway[f"total {sector} {use}"]
- no_norway[f"electricity {sector} {use}"]
)
nonelectric = (
no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"]
)
fraction = nonelectric_use.div(nonelectric).mean() fraction = nonelectric_use.div(nonelectric).mean()
df.loc["NO", f"total {sector} {use}"] = total_heating * fraction df.loc["NO", f"total {sector} {use}"] = total_heating * fraction
df.loc["NO", f"electricity {sector} {use}"] = total_heating * fraction * elec_fraction df.loc["NO", f"electricity {sector} {use}"] = (
total_heating * fraction * elec_fraction
)
# Missing aviation # Missing aviation
@ -517,10 +551,7 @@ def build_energy_totals(countries, eurostat, swiss, idees):
f"{fuel} light duty road freight", f"{fuel} light duty road freight",
] ]
if fuel == "total": if fuel == "total":
selection.extend([ selection.extend([f"{fuel} two-wheel", f"{fuel} heavy duty road freight"])
f"{fuel} two-wheel",
f"{fuel} heavy duty road freight"
])
road = df[selection].sum() road = df[selection].sum()
road_fraction = road / road.sum() road_fraction = road / road.sum()
fill_values = cartesian(df.loc[missing, f"{fuel} road"], road_fraction) fill_values = cartesian(df.loc[missing, f"{fuel} road"], road_fraction)
@ -544,33 +575,40 @@ def build_energy_totals(countries, eurostat, swiss, idees):
] ]
aviation = df[selection].sum() aviation = df[selection].sum()
aviation_fraction = aviation / aviation.sum() aviation_fraction = aviation / aviation.sum()
fill_values = cartesian(df.loc[missing, f"total {destination} aviation"], aviation_fraction) fill_values = cartesian(
df.loc[missing, f"total {destination} aviation"], aviation_fraction
)
df.loc[missing, aviation_fraction.index] = fill_values df.loc[missing, aviation_fraction.index] = fill_values
for purpose in ["passenger", "freight"]: for purpose in ["passenger", "freight"]:
attrs = [f"total domestic aviation {purpose}", f"total international aviation {purpose}"] attrs = [
df.loc[missing, f"total aviation {purpose}"] = df.loc[missing, attrs].sum(axis=1) f"total domestic aviation {purpose}",
f"total international aviation {purpose}",
]
df.loc[missing, f"total aviation {purpose}"] = df.loc[missing, attrs].sum(
axis=1
)
if "BA" in df.index: if "BA" in df.index:
# fill missing data for BA (services and road energy data) # fill missing data for BA (services and road energy data)
# proportional to RS with ratio of total residential demand # proportional to RS with ratio of total residential demand
missing = df.loc["BA"] == 0.0 missing = df.loc["BA"] == 0.0
ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"]
df.loc['BA', missing] = ratio * df.loc["RS", missing] df.loc["BA", missing] = ratio * df.loc["RS", missing]
# Missing district heating share # Missing district heating share
dh_share = pd.read_csv(snakemake.input.district_heat_share, dh_share = pd.read_csv(
index_col=0, usecols=[0, 1]) snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]
)
# make conservative assumption and take minimum from both data sets # make conservative assumption and take minimum from both data sets
df["district heat share"] = (pd.concat([df["district heat share"], df["district heat share"] = pd.concat(
dh_share.reindex(index=df.index)/100], [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1
axis=1).min(axis=1)) ).min(axis=1)
return df return df
def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
# downloaded 201228 (modified by EEA last on 201221) # downloaded 201228 (modified by EEA last on 201221)
df = pd.read_csv(input_co2, encoding="latin-1", low_memory=False) df = pd.read_csv(input_co2, encoding="latin-1", low_memory=False)
@ -589,7 +627,7 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
df.loc[slicer, "emissions"] df.loc[slicer, "emissions"]
.unstack("Sector_name") .unstack("Sector_name")
.rename(columns=reverse(to_ipcc)) .rename(columns=reverse(to_ipcc))
.droplevel([1,2]) .droplevel([1, 2])
) )
emissions.rename(index={"EUA": "EU28", "UK": "GB"}, inplace=True) emissions.rename(index={"EUA": "EU28", "UK": "GB"}, inplace=True)
@ -604,13 +642,20 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
"international aviation", "international aviation",
"domestic navigation", "domestic navigation",
"international navigation", "international navigation",
"agriculture, forestry and fishing" "agriculture, forestry and fishing",
] ]
emissions["industrial non-elec"] = emissions["total energy"] - emissions[to_subtract].sum(axis=1) emissions["industrial non-elec"] = emissions["total energy"] - emissions[
to_subtract
].sum(axis=1)
emissions["agriculture"] += emissions["agriculture, forestry and fishing"] emissions["agriculture"] += emissions["agriculture, forestry and fishing"]
to_drop = ["total energy", "total wL", "total woL", "agriculture, forestry and fishing"] to_drop = [
"total energy",
"total wL",
"total woL",
"agriculture, forestry and fishing",
]
emissions.drop(columns=to_drop, inplace=True) emissions.drop(columns=to_drop, inplace=True)
# convert from Gg to Mt # convert from Gg to Mt
@ -618,7 +663,6 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
def build_eurostat_co2(input_eurostat, countries, report_year, year=1990): def build_eurostat_co2(input_eurostat, countries, report_year, year=1990):
eurostat = build_eurostat(input_eurostat, countries, report_year, year) eurostat = build_eurostat(input_eurostat, countries, report_year, year)
specific_emissions = pd.Series(index=eurostat.columns, dtype=float) specific_emissions = pd.Series(index=eurostat.columns, dtype=float)
@ -637,13 +681,16 @@ def build_eurostat_co2(input_eurostat, countries, report_year, year=1990):
def build_co2_totals(countries, eea_co2, eurostat_co2): def build_co2_totals(countries, eea_co2, eurostat_co2):
co2 = eea_co2.reindex(countries) co2 = eea_co2.reindex(countries)
for ct in countries.intersection(["BA", "RS", "AL", "ME", "MK"]): for ct in countries.intersection(["BA", "RS", "AL", "ME", "MK"]):
mappings = { mappings = {
"electricity": (ct, "+", "Conventional Thermal Power Stations", "of which From Coal"), "electricity": (
ct,
"+",
"Conventional Thermal Power Stations",
"of which From Coal",
),
"residential non-elec": (ct, "+", "+", "Residential"), "residential non-elec": (ct, "+", "+", "Residential"),
"services non-elec": (ct, "+", "+", "Services"), "services non-elec": (ct, "+", "+", "Services"),
"road non-elec": (ct, "+", "+", "Road"), "road non-elec": (ct, "+", "+", "Road"),
@ -655,7 +702,8 @@ def build_co2_totals(countries, eea_co2, eurostat_co2):
# does not include industrial process emissions or fuel processing/refining # does not include industrial process emissions or fuel processing/refining
"industrial non-elec": (ct, "+", "Industry"), "industrial non-elec": (ct, "+", "Industry"),
# does not include non-energy emissions # does not include non-energy emissions
"agriculture": (eurostat_co2.index.get_level_values(0) == ct) & eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3), "agriculture": (eurostat_co2.index.get_level_values(0) == ct)
& eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3),
} }
for i, mi in mappings.items(): for i, mi in mappings.items():
@ -665,7 +713,6 @@ def build_co2_totals(countries, eea_co2, eurostat_co2):
def build_transport_data(countries, population, idees): def build_transport_data(countries, population, idees):
transport_data = pd.DataFrame(index=countries) transport_data = pd.DataFrame(index=countries)
# collect number of cars # collect number of cars
@ -676,7 +723,9 @@ def build_transport_data(countries, population, idees):
transport_data.at["CH", "number cars"] = 4.136e6 transport_data.at["CH", "number cars"] = 4.136e6
missing = transport_data.index[transport_data["number cars"].isna()] missing = transport_data.index[transport_data["number cars"].isna()]
logger.info(f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data.") logger.info(
f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data."
)
cars_pp = transport_data["number cars"] / population cars_pp = transport_data["number cars"] / population
transport_data.loc[missing, "number cars"] = cars_pp.mean() * population transport_data.loc[missing, "number cars"] = cars_pp.mean() * population
@ -686,7 +735,9 @@ def build_transport_data(countries, population, idees):
transport_data["average fuel efficiency"] = idees["passenger car efficiency"] transport_data["average fuel efficiency"] = idees["passenger car efficiency"]
missing = transport_data.index[transport_data["average fuel efficiency"].isna()] missing = transport_data.index[transport_data["average fuel efficiency"].isna()]
logger.info(f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data.") logger.info(
f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data."
)
fill_values = transport_data["average fuel efficiency"].mean() fill_values = transport_data["average fuel efficiency"].mean()
transport_data.loc[missing, "average fuel efficiency"] = fill_values transport_data.loc[missing, "average fuel efficiency"] = fill_values
@ -695,11 +746,12 @@ def build_transport_data(countries, population, idees):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_energy_totals')
logging.basicConfig(level=snakemake.config['logging_level']) snakemake = mock_snakemake("build_energy_totals")
logging.basicConfig(level=snakemake.config["logging_level"])
config = snakemake.config["energy"] config = snakemake.config["energy"]
@ -722,7 +774,9 @@ if __name__ == "__main__":
base_year_emissions = config["base_emissions_year"] base_year_emissions = config["base_emissions_year"]
emissions_scope = snakemake.config["energy"]["emissions"] emissions_scope = snakemake.config["energy"]["emissions"]
eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope)
eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, base_year_emissions) eurostat_co2 = build_eurostat_co2(
input_eurostat, countries, report_year, base_year_emissions
)
co2 = build_co2_totals(countries, eea_co2, eurostat_co2) co2 = build_co2_totals(countries, eea_co2, eurostat_co2)
co2.to_csv(snakemake.output.co2_name) co2.to_csv(snakemake.output.co2_name)

View File

@ -1,15 +1,17 @@
# -*- coding: utf-8 -*-
""" """
Build import locations for fossil gas from entry-points, LNG terminals and production sites. Build import locations for fossil gas from entry-points, LNG terminals and
production sites.
""" """
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import pandas as pd
import geopandas as gpd import geopandas as gpd
from shapely import wkt import pandas as pd
from cluster_gas_network import load_bus_regions from cluster_gas_network import load_bus_regions
from shapely import wkt
def read_scigrid_gas(fn): def read_scigrid_gas(fn):
@ -20,24 +22,25 @@ def read_scigrid_gas(fn):
def build_gem_lng_data(lng_fn): def build_gem_lng_data(lng_fn):
df = pd.read_excel(lng_fn[0], sheet_name='LNG terminals - data') df = pd.read_excel(lng_fn[0], sheet_name="LNG terminals - data")
df = df.set_index("ComboID") df = df.set_index("ComboID")
remove_status = ['Cancelled'] remove_status = ["Cancelled"]
remove_country = ['Cyprus','Turkey'] remove_country = ["Cyprus", "Turkey"]
remove_terminal = ['Puerto de la Luz LNG Terminal', 'Gran Canaria LNG Terminal'] remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"]
df = df.query("Status != 'Cancelled' \ df = df.query(
"Status != 'Cancelled' \
& Country != @remove_country \ & Country != @remove_country \
& TerminalName != @remove_terminal \ & TerminalName != @remove_terminal \
& CapacityInMtpa != '--'") & CapacityInMtpa != '--'"
)
geometry = gpd.points_from_xy(df['Longitude'], df['Latitude']) geometry = gpd.points_from_xy(df["Longitude"], df["Latitude"])
return gpd.GeoDataFrame(df, geometry=geometry, crs="EPSG:4326") return gpd.GeoDataFrame(df, geometry=geometry, crs="EPSG:4326")
def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries): def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries):
# LNG terminals # LNG terminals
lng = build_gem_lng_data(lng_fn) lng = build_gem_lng_data(lng_fn)
@ -45,21 +48,19 @@ def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries):
entry = read_scigrid_gas(entry_fn) entry = read_scigrid_gas(entry_fn)
entry["from_country"] = entry.from_country.str.rstrip() entry["from_country"] = entry.from_country.str.rstrip()
entry = entry.loc[ entry = entry.loc[
~(entry.from_country.isin(countries) & entry.to_country.isin(countries)) & # only take non-EU entries ~(entry.from_country.isin(countries) & entry.to_country.isin(countries))
~entry.name.str.contains("Tegelen") | # malformed datapoint & ~entry.name.str.contains("Tegelen") # only take non-EU entries
(entry.from_country == "NO") # entries from NO to GB | (entry.from_country == "NO") # malformed datapoint # entries from NO to GB
] ]
# production sites inside the model scope # production sites inside the model scope
prod = read_scigrid_gas(prod_fn) prod = read_scigrid_gas(prod_fn)
prod = prod.loc[ prod = prod.loc[
(prod.geometry.y > 35) & (prod.geometry.y > 35) & (prod.geometry.x < 30) & (prod.country_code != "DE")
(prod.geometry.x < 30) &
(prod.country_code != "DE")
] ]
mcm_per_day_to_mw = 437.5 # MCM/day to MWh/h mcm_per_day_to_mw = 437.5 # MCM/day to MWh/h
mtpa_to_mw = 1649.224 # mtpa to MWh/h mtpa_to_mw = 1649.224 # mtpa to MWh/h
lng["p_nom"] = lng["CapacityInMtpa"] * mtpa_to_mw lng["p_nom"] = lng["CapacityInMtpa"] * mtpa_to_mw
entry["p_nom"] = entry["max_cap_from_to_M_m3_per_d"] * mcm_per_day_to_mw entry["p_nom"] = entry["max_cap_from_to_M_m3_per_d"] * mcm_per_day_to_mw
prod["p_nom"] = prod["max_supply_M_m3_per_d"] * mcm_per_day_to_mw prod["p_nom"] = prod["max_supply_M_m3_per_d"] * mcm_per_day_to_mw
@ -74,28 +75,29 @@ def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries):
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals():
if 'snakemake' not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_gas_input_locations', "build_gas_input_locations",
simpl='', simpl="",
clusters='37', clusters="37",
) )
logging.basicConfig(level=snakemake.config['logging_level']) logging.basicConfig(level=snakemake.config["logging_level"])
regions = load_bus_regions( regions = load_bus_regions(
snakemake.input.regions_onshore, snakemake.input.regions_onshore, snakemake.input.regions_offshore
snakemake.input.regions_offshore
) )
# add a buffer to eastern countries because some # add a buffer to eastern countries because some
# entry points are still in Russian or Ukrainian territory. # entry points are still in Russian or Ukrainian territory.
buffer = 9000 # meters buffer = 9000 # meters
eastern_countries = ['FI', 'EE', 'LT', 'LV', 'PL', 'SK', 'HU', 'RO'] eastern_countries = ["FI", "EE", "LT", "LV", "PL", "SK", "HU", "RO"]
add_buffer_b = regions.index.str[:2].isin(eastern_countries) add_buffer_b = regions.index.str[:2].isin(eastern_countries)
regions.loc[add_buffer_b] = regions[add_buffer_b].to_crs(3035).buffer(buffer).to_crs(4326) regions.loc[add_buffer_b] = (
regions[add_buffer_b].to_crs(3035).buffer(buffer).to_crs(4326)
)
countries = regions.index.str[:2].unique().str.replace("GB", "UK") countries = regions.index.str[:2].unique().str.replace("GB", "UK")
@ -103,16 +105,18 @@ if __name__ == "__main__":
snakemake.input.lng, snakemake.input.lng,
snakemake.input.entry, snakemake.input.entry,
snakemake.input.production, snakemake.input.production,
countries countries,
) )
gas_input_nodes = gpd.sjoin(gas_input_locations, regions, how='left') gas_input_nodes = gpd.sjoin(gas_input_locations, regions, how="left")
gas_input_nodes.rename(columns={"index_right": "bus"}, inplace=True) gas_input_nodes.rename(columns={"index_right": "bus"}, inplace=True)
gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver='GeoJSON') gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver="GeoJSON")
gas_input_nodes_s = gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack() gas_input_nodes_s = (
gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack()
)
gas_input_nodes_s.columns.name = "p_nom" gas_input_nodes_s.columns.name = "p_nom"
gas_input_nodes_s.to_csv(snakemake.output.gas_input_nodes_simplified) gas_input_nodes_s.to_csv(snakemake.output.gas_input_nodes_simplified)

View File

@ -1,16 +1,22 @@
"""Preprocess gas network based on data from bthe SciGRID Gas project (https://www.gas.scigrid.de/).""" # -*- coding: utf-8 -*-
"""
Preprocess gas network based on data from bthe SciGRID Gas project
(https://www.gas.scigrid.de/).
"""
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import pandas as pd
import geopandas as gpd import geopandas as gpd
from shapely.geometry import Point import pandas as pd
from pypsa.geo import haversine_pts from pypsa.geo import haversine_pts
from shapely.geometry import Point
def diameter_to_capacity(pipe_diameter_mm): def diameter_to_capacity(pipe_diameter_mm):
"""Calculate pipe capacity in MW based on diameter in mm. """
Calculate pipe capacity in MW based on diameter in mm.
20 inch (500 mm) 50 bar -> 1.5 GW CH4 pipe capacity (LHV) 20 inch (500 mm) 50 bar -> 1.5 GW CH4 pipe capacity (LHV)
24 inch (600 mm) 50 bar -> 5 GW CH4 pipe capacity (LHV) 24 inch (600 mm) 50 bar -> 5 GW CH4 pipe capacity (LHV)
@ -59,22 +65,31 @@ def prepare_dataset(
length_factor=1.5, length_factor=1.5,
correction_threshold_length=4, correction_threshold_length=4,
correction_threshold_p_nom=8, correction_threshold_p_nom=8,
bidirectional_below=10 bidirectional_below=10,
): ):
# extract start and end from LineString # extract start and end from LineString
df["point0"] = df.geometry.apply(lambda x: Point(x.coords[0])) df["point0"] = df.geometry.apply(lambda x: Point(x.coords[0]))
df["point1"] = df.geometry.apply(lambda x: Point(x.coords[-1])) df["point1"] = df.geometry.apply(lambda x: Point(x.coords[-1]))
conversion_factor = 437.5 # MCM/day to MWh/h conversion_factor = 437.5 # MCM/day to MWh/h
df["p_nom"] = df.max_cap_M_m3_per_d * conversion_factor df["p_nom"] = df.max_cap_M_m3_per_d * conversion_factor
# for inferred diameters, assume 500 mm rather than 900 mm (more conservative) # for inferred diameters, assume 500 mm rather than 900 mm (more conservative)
df.loc[df.diameter_mm_method != 'raw', "diameter_mm"] = 500. df.loc[df.diameter_mm_method != "raw", "diameter_mm"] = 500.0
keep = ["name", "diameter_mm", "is_H_gas", "is_bothDirection", keep = [
"length_km", "p_nom", "max_pressure_bar", "name",
"start_year", "point0", "point1", "geometry"] "diameter_mm",
"is_H_gas",
"is_bothDirection",
"length_km",
"p_nom",
"max_pressure_bar",
"start_year",
"point0",
"point1",
"geometry",
]
to_rename = { to_rename = {
"is_bothDirection": "bidirectional", "is_bothDirection": "bidirectional",
"is_H_gas": "H_gas", "is_H_gas": "H_gas",
@ -96,40 +111,43 @@ def prepare_dataset(
df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity) df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity)
ratio = df.p_nom / df.p_nom_diameter ratio = df.p_nom / df.p_nom_diameter
not_nordstream = df.max_pressure_bar < 220 not_nordstream = df.max_pressure_bar < 220
df.p_nom.update(df.p_nom_diameter.where( df.p_nom.update(
(df.p_nom <= 500) | df.p_nom_diameter.where(
((ratio > correction_threshold_p_nom) & not_nordstream) | (df.p_nom <= 500)
((ratio < 1 / correction_threshold_p_nom) & not_nordstream) | ((ratio > correction_threshold_p_nom) & not_nordstream)
)) | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
)
)
# lines which have way too discrepant line lengths # lines which have way too discrepant line lengths
# get assigned haversine length * length factor # get assigned haversine length * length factor
df["length_haversine"] = df.apply( df["length_haversine"] = df.apply(
lambda p: length_factor * haversine_pts( lambda p: length_factor
[p.point0.x, p.point0.y], * haversine_pts([p.point0.x, p.point0.y], [p.point1.x, p.point1.y]),
[p.point1.x, p.point1.y] axis=1,
), axis=1
) )
ratio = df.eval("length / length_haversine") ratio = df.eval("length / length_haversine")
df["length"].update(df.length_haversine.where( df["length"].update(
(df["length"] < 20) | df.length_haversine.where(
(ratio > correction_threshold_length) | (df["length"] < 20)
(ratio < 1 / correction_threshold_length) | (ratio > correction_threshold_length)
)) | (ratio < 1 / correction_threshold_length)
)
)
return df return df
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals():
if 'snakemake' not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_gas_network')
logging.basicConfig(level=snakemake.config['logging_level']) snakemake = mock_snakemake("build_gas_network")
logging.basicConfig(level=snakemake.config["logging_level"])
gas_network = load_dataset(snakemake.input.gas_network) gas_network = load_dataset(snakemake.input.gas_network)
gas_network = prepare_dataset(gas_network) gas_network = prepare_dataset(gas_network)
gas_network.to_csv(snakemake.output.cleaned_gas_network) gas_network.to_csv(snakemake.output.cleaned_gas_network)

View File

@ -1,18 +1,22 @@
"""Build heat demand time series.""" # -*- coding: utf-8 -*-
"""
Build heat demand time series.
"""
import geopandas as gpd
import atlite import atlite
import geopandas as gpd
import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
import numpy as np
from dask.distributed import Client, LocalCluster from dask.distributed import Client, LocalCluster
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_heat_demands', "build_heat_demands",
simpl='', simpl="",
clusters=48, clusters=48,
) )
@ -20,23 +24,29 @@ if __name__ == '__main__':
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
client = Client(cluster, asynchronous=True) client = Client(cluster, asynchronous=True)
time = pd.date_range(freq='h', **snakemake.config['snapshots']) time = pd.date_range(freq="h", **snakemake.config["snapshots"])
cutout_config = snakemake.config['atlite']['cutout'] cutout_config = snakemake.config["atlite"]["cutout"]
cutout = atlite.Cutout(cutout_config).sel(time=time) cutout = atlite.Cutout(cutout_config).sel(time=time)
clustered_regions = gpd.read_file( clustered_regions = (
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() gpd.read_file(snakemake.input.regions_onshore)
.set_index("name")
.buffer(0)
.squeeze()
)
I = cutout.indicatormatrix(clustered_regions) I = cutout.indicatormatrix(clustered_regions)
pop_layout = xr.open_dataarray(snakemake.input.pop_layout) pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
stacked_pop = pop_layout.stack(spatial=('y', 'x')) stacked_pop = pop_layout.stack(spatial=("y", "x"))
M = I.T.dot(np.diag(I.dot(stacked_pop))) M = I.T.dot(np.diag(I.dot(stacked_pop)))
heat_demand = cutout.heat_demand( heat_demand = cutout.heat_demand(
matrix=M.T, index=clustered_regions.index, matrix=M.T,
index=clustered_regions.index,
dask_kwargs=dict(scheduler=client), dask_kwargs=dict(scheduler=client),
show_progress=False) show_progress=False,
)
heat_demand.to_netcdf(snakemake.output.heat_demand) heat_demand.to_netcdf(snakemake.output.heat_demand)

View File

@ -1,40 +1,47 @@
"""Build industrial distribution keys from hotmaps database.""" # -*- coding: utf-8 -*-
"""
Build industrial distribution keys from hotmaps database.
"""
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import uuid import uuid
import pandas as pd
import geopandas as gpd
from itertools import product from itertools import product
import geopandas as gpd
import pandas as pd
from packaging.version import Version, parse from packaging.version import Version, parse
def locate_missing_industrial_sites(df): def locate_missing_industrial_sites(df):
""" """
Locate industrial sites without valid locations based on Locate industrial sites without valid locations based on city and
city and countries. Should only be used if the model's countries.
spatial resolution is coarser than individual cities.
Should only be used if the model's spatial resolution is coarser
than individual cities.
""" """
try: try:
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter from geopy.extra.rate_limiter import RateLimiter
from geopy.geocoders import Nominatim
except: except:
raise ModuleNotFoundError("Optional dependency 'geopy' not found." raise ModuleNotFoundError(
"Install via 'conda install -c conda-forge geopy'" "Optional dependency 'geopy' not found."
"or set 'industry: hotmaps_locate_missing: false'.") "Install via 'conda install -c conda-forge geopy'"
"or set 'industry: hotmaps_locate_missing: false'."
)
locator = Nominatim(user_agent=str(uuid.uuid4())) locator = Nominatim(user_agent=str(uuid.uuid4()))
geocode = RateLimiter(locator.geocode, min_delay_seconds=2) geocode = RateLimiter(locator.geocode, min_delay_seconds=2)
def locate_missing(s): def locate_missing(s):
if pd.isna(s.City) or s.City == "CONFIDENTIAL": if pd.isna(s.City) or s.City == "CONFIDENTIAL":
return None return None
loc = geocode([s.City, s.Country], geometry='wkt') loc = geocode([s.City, s.Country], geometry="wkt")
if loc is not None: if loc is not None:
logger.debug(f"Found:\t{loc}\nFor:\t{s['City']}, {s['Country']}\n") logger.debug(f"Found:\t{loc}\nFor:\t{s['City']}, {s['Country']}\n")
return f"POINT({loc.longitude} {loc.latitude})" return f"POINT({loc.longitude} {loc.latitude})"
@ -42,14 +49,16 @@ def locate_missing_industrial_sites(df):
return None return None
missing = df.index[df.geom.isna()] missing = df.index[df.geom.isna()]
df.loc[missing, 'coordinates'] = df.loc[missing].apply(locate_missing, axis=1) df.loc[missing, "coordinates"] = df.loc[missing].apply(locate_missing, axis=1)
# report stats # report stats
num_still_missing = df.coordinates.isna().sum() num_still_missing = df.coordinates.isna().sum()
num_found = len(missing) - num_still_missing num_found = len(missing) - num_still_missing
share_missing = len(missing) / len(df) * 100 share_missing = len(missing) / len(df) * 100
share_still_missing = num_still_missing / len(df) * 100 share_still_missing = num_still_missing / len(df) * 100
logger.warning(f"Found {num_found} missing locations. \nShare of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%.") logger.warning(
f"Found {num_found} missing locations. \nShare of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%."
)
return df return df
@ -61,19 +70,23 @@ def prepare_hotmaps_database(regions):
df = pd.read_csv(snakemake.input.hotmaps_industrial_database, sep=";", index_col=0) df = pd.read_csv(snakemake.input.hotmaps_industrial_database, sep=";", index_col=0)
df[["srid", "coordinates"]] = df.geom.str.split(';', expand=True) df[["srid", "coordinates"]] = df.geom.str.split(";", expand=True)
if snakemake.config['industry'].get('hotmaps_locate_missing', False): if snakemake.config["industry"].get("hotmaps_locate_missing", False):
df = locate_missing_industrial_sites(df) df = locate_missing_industrial_sites(df)
# remove those sites without valid locations # remove those sites without valid locations
df.drop(df.index[df.coordinates.isna()], inplace=True) df.drop(df.index[df.coordinates.isna()], inplace=True)
df['coordinates'] = gpd.GeoSeries.from_wkt(df['coordinates']) df["coordinates"] = gpd.GeoSeries.from_wkt(df["coordinates"])
gdf = gpd.GeoDataFrame(df, geometry='coordinates', crs="EPSG:4326") gdf = gpd.GeoDataFrame(df, geometry="coordinates", crs="EPSG:4326")
kws = dict(op="within") if parse(gpd.__version__) < Version('0.10') else dict(predicate="within") kws = (
dict(op="within")
if parse(gpd.__version__) < Version("0.10")
else dict(predicate="within")
)
gdf = gpd.sjoin(gdf, regions, how="inner", **kws) gdf = gpd.sjoin(gdf, regions, how="inner", **kws)
gdf.rename(columns={"index_right": "bus"}, inplace=True) gdf.rename(columns={"index_right": "bus"}, inplace=True)
@ -83,7 +96,9 @@ def prepare_hotmaps_database(regions):
def build_nodal_distribution_key(hotmaps, regions): def build_nodal_distribution_key(hotmaps, regions):
"""Build nodal distribution keys for each sector.""" """
Build nodal distribution keys for each sector.
"""
sectors = hotmaps.Subsector.unique() sectors = hotmaps.Subsector.unique()
countries = regions.index.str[:2].unique() countries = regions.index.str[:2].unique()
@ -91,12 +106,11 @@ def build_nodal_distribution_key(hotmaps, regions):
keys = pd.DataFrame(index=regions.index, columns=sectors, dtype=float) keys = pd.DataFrame(index=regions.index, columns=sectors, dtype=float)
pop = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) pop = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
pop['country'] = pop.index.str[:2] pop["country"] = pop.index.str[:2]
ct_total = pop.total.groupby(pop['country']).sum() ct_total = pop.total.groupby(pop["country"]).sum()
keys['population'] = pop.total / pop.country.map(ct_total) keys["population"] = pop.total / pop.country.map(ct_total)
for sector, country in product(sectors, countries): for sector, country in product(sectors, countries):
regions_ct = regions.index[regions.index.str.contains(country)] regions_ct = regions.index[regions.index.str.contains(country)]
facilities = hotmaps.query("country == @country and Subsector == @sector") facilities = hotmaps.query("country == @country and Subsector == @sector")
@ -106,12 +120,12 @@ def build_nodal_distribution_key(hotmaps, regions):
if emissions.sum() == 0: if emissions.sum() == 0:
key = pd.Series(1 / len(facilities), facilities.index) key = pd.Series(1 / len(facilities), facilities.index)
else: else:
#BEWARE: this is a strong assumption # BEWARE: this is a strong assumption
emissions = emissions.fillna(emissions.mean()) emissions = emissions.fillna(emissions.mean())
key = emissions / emissions.sum() key = emissions / emissions.sum()
key = key.groupby(facilities.bus).sum().reindex(regions_ct, fill_value=0.) key = key.groupby(facilities.bus).sum().reindex(regions_ct, fill_value=0.0)
else: else:
key = keys.loc[regions_ct, 'population'] key = keys.loc[regions_ct, "population"]
keys.loc[regions_ct, sector] = key keys.loc[regions_ct, sector] = key
@ -119,17 +133,18 @@ def build_nodal_distribution_key(hotmaps, regions):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_industrial_distribution_key', "build_industrial_distribution_key",
simpl='', simpl="",
clusters=48, clusters=48,
) )
logging.basicConfig(level=snakemake.config['logging_level']) logging.basicConfig(level=snakemake.config["logging_level"])
regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name') regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name")
hotmaps = prepare_hotmaps_database(regions) hotmaps = prepare_hotmaps_database(regions)

View File

@ -1,84 +1,116 @@
"""Build industrial energy demand per country.""" # -*- coding: utf-8 -*-
"""
Build industrial energy demand per country.
"""
import multiprocessing as mp
import pandas as pd import pandas as pd
import multiprocessing as mp
from tqdm import tqdm from tqdm import tqdm
ktoe_to_twh = 0.011630 ktoe_to_twh = 0.011630
# name in JRC-IDEES Energy Balances # name in JRC-IDEES Energy Balances
sector_sheets = {'Integrated steelworks': 'cisb', sector_sheets = {
'Electric arc': 'cise', "Integrated steelworks": "cisb",
'Alumina production': 'cnfa', "Electric arc": "cise",
'Aluminium - primary production': 'cnfp', "Alumina production": "cnfa",
'Aluminium - secondary production': 'cnfs', "Aluminium - primary production": "cnfp",
'Other non-ferrous metals': 'cnfo', "Aluminium - secondary production": "cnfs",
'Basic chemicals': 'cbch', "Other non-ferrous metals": "cnfo",
'Other chemicals': 'coch', "Basic chemicals": "cbch",
'Pharmaceutical products etc.': 'cpha', "Other chemicals": "coch",
'Basic chemicals feedstock': 'cpch', "Pharmaceutical products etc.": "cpha",
'Cement': 'ccem', "Basic chemicals feedstock": "cpch",
'Ceramics & other NMM': 'ccer', "Cement": "ccem",
'Glass production': 'cgla', "Ceramics & other NMM": "ccer",
'Pulp production': 'cpul', "Glass production": "cgla",
'Paper production': 'cpap', "Pulp production": "cpul",
'Printing and media reproduction': 'cprp', "Paper production": "cpap",
'Food, beverages and tobacco': 'cfbt', "Printing and media reproduction": "cprp",
'Transport Equipment': 'ctre', "Food, beverages and tobacco": "cfbt",
'Machinery Equipment': 'cmae', "Transport Equipment": "ctre",
'Textiles and leather': 'ctel', "Machinery Equipment": "cmae",
'Wood and wood products': 'cwwp', "Textiles and leather": "ctel",
'Mining and quarrying': 'cmiq', "Wood and wood products": "cwwp",
'Construction': 'ccon', "Mining and quarrying": "cmiq",
'Non-specified': 'cnsi', "Construction": "ccon",
} "Non-specified": "cnsi",
}
fuels = {'All Products': 'all', fuels = {
'Solid Fuels': 'solid', "All Products": "all",
'Total petroleum products (without biofuels)': 'liquid', "Solid Fuels": "solid",
'Gases': 'gas', "Total petroleum products (without biofuels)": "liquid",
'Nuclear heat': 'heat', "Gases": "gas",
'Derived heat': 'heat', "Nuclear heat": "heat",
'Biomass and Renewable wastes': 'biomass', "Derived heat": "heat",
'Wastes (non-renewable)': 'waste', "Biomass and Renewable wastes": "biomass",
'Electricity': 'electricity' "Wastes (non-renewable)": "waste",
} "Electricity": "electricity",
}
eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI', eu28 = [
'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ', "FR",
'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT'] "DE",
"GB",
"IT",
"ES",
"PL",
"SE",
"NL",
"BE",
"FI",
"DK",
"PT",
"RO",
"AT",
"BG",
"EE",
"GR",
"LV",
"CZ",
"HU",
"IE",
"SK",
"LT",
"HR",
"LU",
"SI",
"CY",
"MT",
]
jrc_names = {"GR": "EL", "GB": "UK"} jrc_names = {"GR": "EL", "GB": "UK"}
def industrial_energy_demand_per_country(country): def industrial_energy_demand_per_country(country):
jrc_dir = snakemake.input.jrc jrc_dir = snakemake.input.jrc
jrc_country = jrc_names.get(country, country) jrc_country = jrc_names.get(country, country)
fn = f'{jrc_dir}/JRC-IDEES-2015_EnergyBalance_{jrc_country}.xlsx' fn = f"{jrc_dir}/JRC-IDEES-2015_EnergyBalance_{jrc_country}.xlsx"
sheets = list(sector_sheets.values()) sheets = list(sector_sheets.values())
df_dict = pd.read_excel(fn, sheet_name=sheets, index_col=0) df_dict = pd.read_excel(fn, sheet_name=sheets, index_col=0)
def get_subsector_data(sheet): def get_subsector_data(sheet):
df = df_dict[sheet][year].groupby(fuels).sum() df = df_dict[sheet][year].groupby(fuels).sum()
df["ammonia"] = 0. df["ammonia"] = 0.0
df['other'] = df['all'] - df.loc[df.index != 'all'].sum() df["other"] = df["all"] - df.loc[df.index != "all"].sum()
return df return df
df = pd.concat({sub: get_subsector_data(sheet) df = pd.concat(
for sub, sheet in sector_sheets.items()}, axis=1) {sub: get_subsector_data(sheet) for sub, sheet in sector_sheets.items()}, axis=1
)
sel = ['Mining and quarrying', 'Construction', 'Non-specified'] sel = ["Mining and quarrying", "Construction", "Non-specified"]
df['Other Industrial Sectors'] = df[sel].sum(axis=1) df["Other Industrial Sectors"] = df[sel].sum(axis=1)
df['Basic chemicals'] += df['Basic chemicals feedstock'] df["Basic chemicals"] += df["Basic chemicals feedstock"]
df.drop(columns=sel+['Basic chemicals feedstock'], index='all', inplace=True) df.drop(columns=sel + ["Basic chemicals feedstock"], index="all", inplace=True)
df *= ktoe_to_twh df *= ktoe_to_twh
@ -86,41 +118,44 @@ def industrial_energy_demand_per_country(country):
def add_ammonia_energy_demand(demand): def add_ammonia_energy_demand(demand):
# MtNH3/a # MtNH3/a
fn = snakemake.input.ammonia_production fn = snakemake.input.ammonia_production
ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3 ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3
def get_ammonia_by_fuel(x): def get_ammonia_by_fuel(x):
fuels = {
"gas": config["MWh_CH4_per_tNH3_SMR"],
"electricity": config["MWh_elec_per_tNH3_SMR"],
}
fuels = {'gas': config['MWh_CH4_per_tNH3_SMR'], return pd.Series({k: x * v for k, v in fuels.items()})
'electricity': config['MWh_elec_per_tNH3_SMR']}
return pd.Series({k: x*v for k,v in fuels.items()})
ammonia_by_fuel = ammonia.apply(get_ammonia_by_fuel).T ammonia_by_fuel = ammonia.apply(get_ammonia_by_fuel).T
ammonia_by_fuel = ammonia_by_fuel.unstack().reindex(index=demand.index, fill_value=0.) ammonia_by_fuel = ammonia_by_fuel.unstack().reindex(
index=demand.index, fill_value=0.0
)
ammonia = pd.DataFrame({"ammonia": ammonia * config['MWh_NH3_per_tNH3']}).T ammonia = pd.DataFrame({"ammonia": ammonia * config["MWh_NH3_per_tNH3"]}).T
demand['Ammonia'] = ammonia.unstack().reindex(index=demand.index, fill_value=0.) demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0)
demand['Basic chemicals (without ammonia)'] = demand["Basic chemicals"] - ammonia_by_fuel demand["Basic chemicals (without ammonia)"] = (
demand["Basic chemicals"] - ammonia_by_fuel
)
demand['Basic chemicals (without ammonia)'].clip(lower=0, inplace=True) demand["Basic chemicals (without ammonia)"].clip(lower=0, inplace=True)
demand.drop(columns='Basic chemicals', inplace=True) demand.drop(columns="Basic chemicals", inplace=True)
return demand return demand
def add_non_eu28_industrial_energy_demand(demand): def add_non_eu28_industrial_energy_demand(demand):
# output in MtMaterial/a # output in MtMaterial/a
fn = snakemake.input.industrial_production_per_country fn = snakemake.input.industrial_production_per_country
production = pd.read_csv(fn, index_col=0) / 1e3 production = pd.read_csv(fn, index_col=0) / 1e3
#recombine HVC, Chlorine and Methanol to Basic chemicals (without ammonia) # recombine HVC, Chlorine and Methanol to Basic chemicals (without ammonia)
chemicals = ["HVC", "Chlorine", "Methanol"] chemicals = ["HVC", "Chlorine", "Methanol"]
production["Basic chemicals (without ammonia)"] = production[chemicals].sum(axis=1) production["Basic chemicals (without ammonia)"] = production[chemicals].sum(axis=1)
production.drop(columns=chemicals, inplace=True) production.drop(columns=chemicals, inplace=True)
@ -131,18 +166,22 @@ def add_non_eu28_industrial_energy_demand(demand):
non_eu28 = production.index.symmetric_difference(eu28) non_eu28 = production.index.symmetric_difference(eu28)
demand_non_eu28 = pd.concat({k: v * eu28_averages demand_non_eu28 = pd.concat(
for k, v in production.loc[non_eu28].iterrows()}) {k: v * eu28_averages for k, v in production.loc[non_eu28].iterrows()}
)
return pd.concat([demand, demand_non_eu28]) return pd.concat([demand, demand_non_eu28])
def industrial_energy_demand(countries): def industrial_energy_demand(countries):
nprocesses = snakemake.threads nprocesses = snakemake.threads
func = industrial_energy_demand_per_country func = industrial_energy_demand_per_country
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries), tqdm_kwargs = dict(
desc="Build industrial energy demand") ascii=False,
unit=" country",
total=len(countries),
desc="Build industrial energy demand",
)
with mp.Pool(processes=nprocesses) as pool: with mp.Pool(processes=nprocesses) as pool:
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
@ -151,13 +190,14 @@ def industrial_energy_demand(countries):
return demand return demand
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_industrial_energy_demand_per_country_today')
config = snakemake.config['industry'] snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today")
year = config.get('reference_year', 2015)
config = snakemake.config["industry"]
year = config.get("reference_year", 2015)
demand = industrial_energy_demand(eu28) demand = industrial_energy_demand(eu28)
@ -166,10 +206,10 @@ if __name__ == '__main__':
demand = add_non_eu28_industrial_energy_demand(demand) demand = add_non_eu28_industrial_energy_demand(demand)
# for format compatibility # for format compatibility
demand = demand.stack(dropna=False).unstack(level=[0,2]) demand = demand.stack(dropna=False).unstack(level=[0, 2])
# style and annotation # style and annotation
demand.index.name = 'TWh/a' demand.index.name = "TWh/a"
demand.sort_index(axis=1, inplace=True) demand.sort_index(axis=1, inplace=True)
fn = snakemake.output.industrial_energy_demand_per_country_today fn = snakemake.output.industrial_energy_demand_per_country_today

View File

@ -1,17 +1,21 @@
"""Build industrial energy demand per node.""" # -*- coding: utf-8 -*-
"""
Build industrial energy demand per node.
"""
import pandas as pd import pandas as pd
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_industrial_energy_demand_per_node', "build_industrial_energy_demand_per_node",
simpl='', simpl="",
clusters=48, clusters=48,
planning_horizons=2030, planning_horizons=2030,
) )
# import EU ratios df as csv # import EU ratios df as csv
fn = snakemake.input.industry_sector_ratios fn = snakemake.input.industry_sector_ratios
industry_sector_ratios = pd.read_csv(fn, index_col=0) industry_sector_ratios = pd.read_csv(fn, index_col=0)
@ -26,14 +30,14 @@ if __name__ == '__main__':
# final energy consumption per node and industry (TWh/a) # final energy consumption per node and industry (TWh/a)
nodal_df = nodal_production.dot(industry_sector_ratios.T) nodal_df = nodal_production.dot(industry_sector_ratios.T)
# convert GWh to TWh and ktCO2 to MtCO2 # convert GWh to TWh and ktCO2 to MtCO2
nodal_df *= 0.001 nodal_df *= 0.001
rename_sectors = { rename_sectors = {
'elec': 'electricity', "elec": "electricity",
'biomass': 'solid biomass', "biomass": "solid biomass",
'heat': 'low-temperature heat' "heat": "low-temperature heat",
} }
nodal_df.rename(columns=rename_sectors, inplace=True) nodal_df.rename(columns=rename_sectors, inplace=True)
@ -42,4 +46,4 @@ if __name__ == '__main__':
nodal_df.index.name = "TWh/a (MtCO2/a)" nodal_df.index.name = "TWh/a (MtCO2/a)"
fn = snakemake.output.industrial_energy_demand_per_node fn = snakemake.output.industrial_energy_demand_per_node
nodal_df.to_csv(fn, float_format='%.2f') nodal_df.to_csv(fn, float_format="%.2f")

View File

@ -1,33 +1,36 @@
"""Build industrial energy demand per node.""" # -*- coding: utf-8 -*-
"""
Build industrial energy demand per node.
"""
import pandas as pd
import numpy as np
from itertools import product from itertools import product
import numpy as np
import pandas as pd
# map JRC/our sectors to hotmaps sector, where mapping exist # map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = { sector_mapping = {
'Electric arc': 'Iron and steel', "Electric arc": "Iron and steel",
'Integrated steelworks': 'Iron and steel', "Integrated steelworks": "Iron and steel",
'DRI + Electric arc': 'Iron and steel', "DRI + Electric arc": "Iron and steel",
'Ammonia': 'Chemical industry', "Ammonia": "Chemical industry",
'Basic chemicals (without ammonia)': 'Chemical industry', "Basic chemicals (without ammonia)": "Chemical industry",
'Other chemicals': 'Chemical industry', "Other chemicals": "Chemical industry",
'Pharmaceutical products etc.': 'Chemical industry', "Pharmaceutical products etc.": "Chemical industry",
'Cement': 'Cement', "Cement": "Cement",
'Ceramics & other NMM': 'Non-metallic mineral products', "Ceramics & other NMM": "Non-metallic mineral products",
'Glass production': 'Glass', "Glass production": "Glass",
'Pulp production': 'Paper and printing', "Pulp production": "Paper and printing",
'Paper production': 'Paper and printing', "Paper production": "Paper and printing",
'Printing and media reproduction': 'Paper and printing', "Printing and media reproduction": "Paper and printing",
'Alumina production': 'Non-ferrous metals', "Alumina production": "Non-ferrous metals",
'Aluminium - primary production': 'Non-ferrous metals', "Aluminium - primary production": "Non-ferrous metals",
'Aluminium - secondary production': 'Non-ferrous metals', "Aluminium - secondary production": "Non-ferrous metals",
'Other non-ferrous metals': 'Non-ferrous metals', "Other non-ferrous metals": "Non-ferrous metals",
} }
def build_nodal_industrial_energy_demand(): def build_nodal_industrial_energy_demand():
fn = snakemake.input.industrial_energy_demand_per_country_today fn = snakemake.input.industrial_energy_demand_per_country_today
industrial_demand = pd.read_csv(fn, header=[0, 1], index_col=0) industrial_demand = pd.read_csv(fn, header=[0, 1], index_col=0)
@ -35,24 +38,23 @@ def build_nodal_industrial_energy_demand():
keys = pd.read_csv(fn, index_col=0) keys = pd.read_csv(fn, index_col=0)
keys["country"] = keys.index.str[:2] keys["country"] = keys.index.str[:2]
nodal_demand = pd.DataFrame(0., dtype=float, nodal_demand = pd.DataFrame(
index=keys.index, 0.0, dtype=float, index=keys.index, columns=industrial_demand.index
columns=industrial_demand.index) )
countries = keys.country.unique() countries = keys.country.unique()
sectors = industrial_demand.columns.levels[1] sectors = industrial_demand.columns.levels[1]
for country, sector in product(countries, sectors): for country, sector in product(countries, sectors):
buses = keys.index[keys.country == country] buses = keys.index[keys.country == country]
mapping = sector_mapping.get(sector, 'population') mapping = sector_mapping.get(sector, "population")
key = keys.loc[buses, mapping] key = keys.loc[buses, mapping]
demand = industrial_demand[country, sector] demand = industrial_demand[country, sector]
outer = pd.DataFrame(np.outer(key, demand), outer = pd.DataFrame(
index=key.index, np.outer(key, demand), index=key.index, columns=demand.index
columns=demand.index) )
nodal_demand.loc[buses] += outer nodal_demand.loc[buses] += outer
@ -62,11 +64,12 @@ def build_nodal_industrial_energy_demand():
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_industrial_energy_demand_per_node_today', "build_industrial_energy_demand_per_node_today",
simpl='', simpl="",
clusters=48, clusters=48,
) )

View File

@ -1,132 +1,204 @@
"""Build industrial production per country.""" # -*- coding: utf-8 -*-
"""
Build industrial production per country.
"""
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import pandas as pd
import numpy as np
import multiprocessing as mp import multiprocessing as mp
from tqdm import tqdm
import numpy as np
import pandas as pd
from helper import mute_print from helper import mute_print
from tqdm import tqdm
tj_to_ktoe = 0.0238845 tj_to_ktoe = 0.0238845
ktoe_to_twh = 0.01163 ktoe_to_twh = 0.01163
sub_sheet_name_dict = {'Iron and steel': 'ISI', sub_sheet_name_dict = {
'Chemicals Industry': 'CHI', "Iron and steel": "ISI",
'Non-metallic mineral products': 'NMM', "Chemicals Industry": "CHI",
'Pulp, paper and printing': 'PPA', "Non-metallic mineral products": "NMM",
'Food, beverages and tobacco': 'FBT', "Pulp, paper and printing": "PPA",
'Non Ferrous Metals': 'NFM', "Food, beverages and tobacco": "FBT",
'Transport Equipment': 'TRE', "Non Ferrous Metals": "NFM",
'Machinery Equipment': 'MAE', "Transport Equipment": "TRE",
'Textiles and leather': 'TEL', "Machinery Equipment": "MAE",
'Wood and wood products': 'WWP', "Textiles and leather": "TEL",
'Other Industrial Sectors': 'OIS'} "Wood and wood products": "WWP",
"Other Industrial Sectors": "OIS",
}
non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL'] non_EU = ["NO", "CH", "ME", "MK", "RS", "BA", "AL"]
jrc_names = {"GR": "EL", "GB": "UK"} jrc_names = {"GR": "EL", "GB": "UK"}
eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI', eu28 = [
'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ', "FR",
'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT'] "DE",
"GB",
"IT",
"ES",
"PL",
"SE",
"NL",
"BE",
"FI",
"DK",
"PT",
"RO",
"AT",
"BG",
"EE",
"GR",
"LV",
"CZ",
"HU",
"IE",
"SK",
"LT",
"HR",
"LU",
"SI",
"CY",
"MT",
]
sect2sub = {'Iron and steel': ['Electric arc', 'Integrated steelworks'], sect2sub = {
'Chemicals Industry': ['Basic chemicals', 'Other chemicals', 'Pharmaceutical products etc.'], "Iron and steel": ["Electric arc", "Integrated steelworks"],
'Non-metallic mineral products': ['Cement', 'Ceramics & other NMM', 'Glass production'], "Chemicals Industry": [
'Pulp, paper and printing': ['Pulp production', 'Paper production', 'Printing and media reproduction'], "Basic chemicals",
'Food, beverages and tobacco': ['Food, beverages and tobacco'], "Other chemicals",
'Non Ferrous Metals': ['Alumina production', 'Aluminium - primary production', 'Aluminium - secondary production', 'Other non-ferrous metals'], "Pharmaceutical products etc.",
'Transport Equipment': ['Transport Equipment'], ],
'Machinery Equipment': ['Machinery Equipment'], "Non-metallic mineral products": [
'Textiles and leather': ['Textiles and leather'], "Cement",
'Wood and wood products': ['Wood and wood products'], "Ceramics & other NMM",
'Other Industrial Sectors': ['Other Industrial Sectors']} "Glass production",
],
"Pulp, paper and printing": [
"Pulp production",
"Paper production",
"Printing and media reproduction",
],
"Food, beverages and tobacco": ["Food, beverages and tobacco"],
"Non Ferrous Metals": [
"Alumina production",
"Aluminium - primary production",
"Aluminium - secondary production",
"Other non-ferrous metals",
],
"Transport Equipment": ["Transport Equipment"],
"Machinery Equipment": ["Machinery Equipment"],
"Textiles and leather": ["Textiles and leather"],
"Wood and wood products": ["Wood and wood products"],
"Other Industrial Sectors": ["Other Industrial Sectors"],
}
sub2sect = {v: k for k, vv in sect2sub.items() for v in vv} sub2sect = {v: k for k, vv in sect2sub.items() for v in vv}
fields = {'Electric arc': 'Electric arc', fields = {
'Integrated steelworks': 'Integrated steelworks', "Electric arc": "Electric arc",
'Basic chemicals': 'Basic chemicals (kt ethylene eq.)', "Integrated steelworks": "Integrated steelworks",
'Other chemicals': 'Other chemicals (kt ethylene eq.)', "Basic chemicals": "Basic chemicals (kt ethylene eq.)",
'Pharmaceutical products etc.': 'Pharmaceutical products etc. (kt ethylene eq.)', "Other chemicals": "Other chemicals (kt ethylene eq.)",
'Cement': 'Cement (kt)', "Pharmaceutical products etc.": "Pharmaceutical products etc. (kt ethylene eq.)",
'Ceramics & other NMM': 'Ceramics & other NMM (kt bricks eq.)', "Cement": "Cement (kt)",
'Glass production': 'Glass production (kt)', "Ceramics & other NMM": "Ceramics & other NMM (kt bricks eq.)",
'Pulp production': 'Pulp production (kt)', "Glass production": "Glass production (kt)",
'Paper production': 'Paper production (kt)', "Pulp production": "Pulp production (kt)",
'Printing and media reproduction': 'Printing and media reproduction (kt paper eq.)', "Paper production": "Paper production (kt)",
'Food, beverages and tobacco': 'Physical output (index)', "Printing and media reproduction": "Printing and media reproduction (kt paper eq.)",
'Alumina production': 'Alumina production (kt)', "Food, beverages and tobacco": "Physical output (index)",
'Aluminium - primary production': 'Aluminium - primary production', "Alumina production": "Alumina production (kt)",
'Aluminium - secondary production': 'Aluminium - secondary production', "Aluminium - primary production": "Aluminium - primary production",
'Other non-ferrous metals': 'Other non-ferrous metals (kt lead eq.)', "Aluminium - secondary production": "Aluminium - secondary production",
'Transport Equipment': 'Physical output (index)', "Other non-ferrous metals": "Other non-ferrous metals (kt lead eq.)",
'Machinery Equipment': 'Physical output (index)', "Transport Equipment": "Physical output (index)",
'Textiles and leather': 'Physical output (index)', "Machinery Equipment": "Physical output (index)",
'Wood and wood products': 'Physical output (index)', "Textiles and leather": "Physical output (index)",
'Other Industrial Sectors': 'Physical output (index)'} "Wood and wood products": "Physical output (index)",
"Other Industrial Sectors": "Physical output (index)",
}
eb_names = {'NO': 'Norway', 'AL': 'Albania', 'BA': 'Bosnia and Herzegovina', eb_names = {
'MK': 'FYR of Macedonia', 'GE': 'Georgia', 'IS': 'Iceland', "NO": "Norway",
'KO': 'Kosovo', 'MD': 'Moldova', 'ME': 'Montenegro', 'RS': 'Serbia', "AL": "Albania",
'UA': 'Ukraine', 'TR': 'Turkey', } "BA": "Bosnia and Herzegovina",
"MK": "FYR of Macedonia",
"GE": "Georgia",
"IS": "Iceland",
"KO": "Kosovo",
"MD": "Moldova",
"ME": "Montenegro",
"RS": "Serbia",
"UA": "Ukraine",
"TR": "Turkey",
}
eb_sectors = {'Iron & steel industry': 'Iron and steel', eb_sectors = {
'Chemical and Petrochemical industry': 'Chemicals Industry', "Iron & steel industry": "Iron and steel",
'Non-ferrous metal industry': 'Non-metallic mineral products', "Chemical and Petrochemical industry": "Chemicals Industry",
'Paper, Pulp and Print': 'Pulp, paper and printing', "Non-ferrous metal industry": "Non-metallic mineral products",
'Food and Tabacco': 'Food, beverages and tobacco', "Paper, Pulp and Print": "Pulp, paper and printing",
'Non-metallic Minerals (Glass, pottery & building mat. Industry)': 'Non Ferrous Metals', "Food and Tabacco": "Food, beverages and tobacco",
'Transport Equipment': 'Transport Equipment', "Non-metallic Minerals (Glass, pottery & building mat. Industry)": "Non Ferrous Metals",
'Machinery': 'Machinery Equipment', "Transport Equipment": "Transport Equipment",
'Textile and Leather': 'Textiles and leather', "Machinery": "Machinery Equipment",
'Wood and Wood Products': 'Wood and wood products', "Textile and Leather": "Textiles and leather",
'Non-specified (Industry)': 'Other Industrial Sectors'} "Wood and Wood Products": "Wood and wood products",
"Non-specified (Industry)": "Other Industrial Sectors",
}
# TODO: this should go in a csv in `data` # TODO: this should go in a csv in `data`
# Annual energy consumption in Switzerland by sector in 2015 (in TJ) # Annual energy consumption in Switzerland by sector in 2015 (in TJ)
# From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat # From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat
# http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775 # http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775
e_switzerland = pd.Series({'Iron and steel': 7889., e_switzerland = pd.Series(
'Chemicals Industry': 26871., {
'Non-metallic mineral products': 15513.+3820., "Iron and steel": 7889.0,
'Pulp, paper and printing': 12004., "Chemicals Industry": 26871.0,
'Food, beverages and tobacco': 17728., "Non-metallic mineral products": 15513.0 + 3820.0,
'Non Ferrous Metals': 3037., "Pulp, paper and printing": 12004.0,
'Transport Equipment': 14993., "Food, beverages and tobacco": 17728.0,
'Machinery Equipment': 4724., "Non Ferrous Metals": 3037.0,
'Textiles and leather': 1742., "Transport Equipment": 14993.0,
'Wood and wood products': 0., "Machinery Equipment": 4724.0,
'Other Industrial Sectors': 10825., "Textiles and leather": 1742.0,
'current electricity': 53760.}) "Wood and wood products": 0.0,
"Other Industrial Sectors": 10825.0,
"current electricity": 53760.0,
}
)
def find_physical_output(df): def find_physical_output(df):
start = np.where(df.index.str.contains('Physical output', na=''))[0][0] start = np.where(df.index.str.contains("Physical output", na=""))[0][0]
empty_row = np.where(df.index.isnull())[0] empty_row = np.where(df.index.isnull())[0]
end = empty_row[np.argmax(empty_row > start)] end = empty_row[np.argmax(empty_row > start)]
return slice(start, end) return slice(start, end)
def get_energy_ratio(country): def get_energy_ratio(country):
if country == "CH":
if country == 'CH':
e_country = e_switzerland * tj_to_ktoe e_country = e_switzerland * tj_to_ktoe
else: else:
# estimate physical output, energy consumption in the sector and country # estimate physical output, energy consumption in the sector and country
fn = f"{eurostat_dir}/{eb_names[country]}.XLSX" fn = f"{eurostat_dir}/{eb_names[country]}.XLSX"
with mute_print(): with mute_print():
df = pd.read_excel(fn, sheet_name='2016', index_col=2, df = pd.read_excel(
header=0, skiprows=1).squeeze('columns') fn, sheet_name="2016", index_col=2, header=0, skiprows=1
e_country = df.loc[eb_sectors.keys( ).squeeze("columns")
), 'Total all products'].rename(eb_sectors) e_country = df.loc[eb_sectors.keys(), "Total all products"].rename(eb_sectors)
fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx' fn = f"{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx"
with mute_print(): with mute_print():
df = pd.read_excel(fn, sheet_name='Ind_Summary', df = pd.read_excel(fn, sheet_name="Ind_Summary", index_col=0, header=0).squeeze(
index_col=0, header=0).squeeze('columns') "columns"
)
assert df.index[48] == "by sector" assert df.index[48] == "by sector"
year_i = df.columns.get_loc(year) year_i = df.columns.get_loc(year)
@ -139,15 +211,14 @@ def get_energy_ratio(country):
def industry_production_per_country(country): def industry_production_per_country(country):
def get_sector_data(sector, country): def get_sector_data(sector, country):
jrc_country = jrc_names.get(country, country) jrc_country = jrc_names.get(country, country)
fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_{jrc_country}.xlsx' fn = f"{jrc_dir}/JRC-IDEES-2015_Industry_{jrc_country}.xlsx"
sheet = sub_sheet_name_dict[sector] sheet = sub_sheet_name_dict[sector]
with mute_print(): with mute_print():
df = pd.read_excel(fn, sheet_name=sheet, df = pd.read_excel(fn, sheet_name=sheet, index_col=0, header=0).squeeze(
index_col=0, header=0).squeeze('columns') "columns"
)
year_i = df.columns.get_loc(year) year_i = df.columns.get_loc(year)
df = df.iloc[find_physical_output(df), year_i] df = df.iloc[find_physical_output(df), year_i]
@ -169,11 +240,14 @@ def industry_production_per_country(country):
def industry_production(countries): def industry_production(countries):
nprocesses = snakemake.threads nprocesses = snakemake.threads
func = industry_production_per_country func = industry_production_per_country
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries), tqdm_kwargs = dict(
desc="Build industry production") ascii=False,
unit=" country",
total=len(countries),
desc="Build industry production",
)
with mp.Pool(processes=nprocesses) as pool: with mp.Pool(processes=nprocesses) as pool:
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
@ -185,7 +259,9 @@ def industry_production(countries):
def separate_basic_chemicals(demand): def separate_basic_chemicals(demand):
"""Separate basic chemicals into ammonia, chlorine, methanol and HVC.""" """
Separate basic chemicals into ammonia, chlorine, methanol and HVC.
"""
ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0) ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0)
@ -194,14 +270,14 @@ def separate_basic_chemicals(demand):
logger.info(f"Following countries have no ammonia demand: {missing.tolist()}") logger.info(f"Following countries have no ammonia demand: {missing.tolist()}")
demand["Ammonia"] = 0. demand["Ammonia"] = 0.0
demand.loc[there, "Ammonia"] = ammonia.loc[there, str(year)] demand.loc[there, "Ammonia"] = ammonia.loc[there, str(year)]
demand["Basic chemicals"] -= demand["Ammonia"] demand["Basic chemicals"] -= demand["Ammonia"]
# EE, HR and LT got negative demand through subtraction - poor data # EE, HR and LT got negative demand through subtraction - poor data
demand['Basic chemicals'].clip(lower=0., inplace=True) demand["Basic chemicals"].clip(lower=0.0, inplace=True)
# assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals # assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals
distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum() distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum()
@ -211,16 +287,18 @@ def separate_basic_chemicals(demand):
demand.drop(columns=["Basic chemicals"], inplace=True) demand.drop(columns=["Basic chemicals"], inplace=True)
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_industrial_production_per_country')
logging.basicConfig(level=snakemake.config['logging_level']) if __name__ == "__main__":
if "snakemake" not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake("build_industrial_production_per_country")
logging.basicConfig(level=snakemake.config["logging_level"])
countries = non_EU + eu28 countries = non_EU + eu28
year = snakemake.config['industry']['reference_year'] year = snakemake.config["industry"]["reference_year"]
config = snakemake.config["industry"] config = snakemake.config["industry"]
@ -232,4 +310,4 @@ if __name__ == '__main__':
separate_basic_chemicals(demand) separate_basic_chemicals(demand)
fn = snakemake.output.industrial_production_per_country fn = snakemake.output.industrial_production_per_country
demand.to_csv(fn, float_format='%.2f') demand.to_csv(fn, float_format="%.2f")

View File

@ -1,13 +1,16 @@
"""Build future industrial production per country.""" # -*- coding: utf-8 -*-
"""
Build future industrial production per country.
"""
import pandas as pd import pandas as pd
from prepare_sector_network import get from prepare_sector_network import get
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_industrial_production_per_country_tomorrow')
snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow")
config = snakemake.config["industry"] config = snakemake.config["industry"]
@ -24,12 +27,20 @@ if __name__ == '__main__':
int_steel = production["Integrated steelworks"].sum() int_steel = production["Integrated steelworks"].sum()
fraction_persistent_primary = st_primary_fraction * total_steel.sum() / int_steel fraction_persistent_primary = st_primary_fraction * total_steel.sum() / int_steel
dri = dri_fraction * fraction_persistent_primary * production["Integrated steelworks"] dri = (
dri_fraction * fraction_persistent_primary * production["Integrated steelworks"]
)
production.insert(2, "DRI + Electric arc", dri) production.insert(2, "DRI + Electric arc", dri)
not_dri = (1 - dri_fraction) not_dri = 1 - dri_fraction
production["Integrated steelworks"] = not_dri * fraction_persistent_primary * production["Integrated steelworks"] production["Integrated steelworks"] = (
production["Electric arc"] = total_steel - production["DRI + Electric arc"] - production["Integrated steelworks"] not_dri * fraction_persistent_primary * production["Integrated steelworks"]
)
production["Electric arc"] = (
total_steel
- production["DRI + Electric arc"]
- production["Integrated steelworks"]
)
keys = ["Aluminium - primary production", "Aluminium - secondary production"] keys = ["Aluminium - primary production", "Aluminium - secondary production"]
total_aluminium = production[keys].sum(axis=1) total_aluminium = production[keys].sum(axis=1)
@ -38,15 +49,23 @@ if __name__ == '__main__':
key_sec = "Aluminium - secondary production" key_sec = "Aluminium - secondary production"
al_primary_fraction = get(config["Al_primary_fraction"], investment_year) al_primary_fraction = get(config["Al_primary_fraction"], investment_year)
fraction_persistent_primary = al_primary_fraction * total_aluminium.sum() / production[key_pri].sum() fraction_persistent_primary = (
al_primary_fraction * total_aluminium.sum() / production[key_pri].sum()
)
production[key_pri] = fraction_persistent_primary * production[key_pri] production[key_pri] = fraction_persistent_primary * production[key_pri]
production[key_sec] = total_aluminium - production[key_pri] production[key_sec] = total_aluminium - production[key_pri]
production["HVC (mechanical recycling)"] = get(config["HVC_mechanical_recycling_fraction"], investment_year) * production["HVC"] production["HVC (mechanical recycling)"] = (
production["HVC (chemical recycling)"] = get(config["HVC_chemical_recycling_fraction"], investment_year) * production["HVC"] get(config["HVC_mechanical_recycling_fraction"], investment_year)
* production["HVC"]
)
production["HVC (chemical recycling)"] = (
get(config["HVC_chemical_recycling_fraction"], investment_year)
* production["HVC"]
)
production["HVC"] *= get(config['HVC_primary_fraction'], investment_year) production["HVC"] *= get(config["HVC_primary_fraction"], investment_year)
fn = snakemake.output.industrial_production_per_country_tomorrow fn = snakemake.output.industrial_production_per_country_tomorrow
production.to_csv(fn, float_format='%.2f') production.to_csv(fn, float_format="%.2f")

View File

@ -1,36 +1,39 @@
"""Build industrial production per node.""" # -*- coding: utf-8 -*-
"""
Build industrial production per node.
"""
from itertools import product
import pandas as pd import pandas as pd
from itertools import product
# map JRC/our sectors to hotmaps sector, where mapping exist # map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = { sector_mapping = {
'Electric arc': 'Iron and steel', "Electric arc": "Iron and steel",
'Integrated steelworks': 'Iron and steel', "Integrated steelworks": "Iron and steel",
'DRI + Electric arc': 'Iron and steel', "DRI + Electric arc": "Iron and steel",
'Ammonia': 'Chemical industry', "Ammonia": "Chemical industry",
'HVC': 'Chemical industry', "HVC": "Chemical industry",
'HVC (mechanical recycling)': 'Chemical industry', "HVC (mechanical recycling)": "Chemical industry",
'HVC (chemical recycling)': 'Chemical industry', "HVC (chemical recycling)": "Chemical industry",
'Methanol': 'Chemical industry', "Methanol": "Chemical industry",
'Chlorine': 'Chemical industry', "Chlorine": "Chemical industry",
'Other chemicals': 'Chemical industry', "Other chemicals": "Chemical industry",
'Pharmaceutical products etc.': 'Chemical industry', "Pharmaceutical products etc.": "Chemical industry",
'Cement': 'Cement', "Cement": "Cement",
'Ceramics & other NMM': 'Non-metallic mineral products', "Ceramics & other NMM": "Non-metallic mineral products",
'Glass production': 'Glass', "Glass production": "Glass",
'Pulp production': 'Paper and printing', "Pulp production": "Paper and printing",
'Paper production': 'Paper and printing', "Paper production": "Paper and printing",
'Printing and media reproduction': 'Paper and printing', "Printing and media reproduction": "Paper and printing",
'Alumina production': 'Non-ferrous metals', "Alumina production": "Non-ferrous metals",
'Aluminium - primary production': 'Non-ferrous metals', "Aluminium - primary production": "Non-ferrous metals",
'Aluminium - secondary production': 'Non-ferrous metals', "Aluminium - secondary production": "Non-ferrous metals",
'Other non-ferrous metals': 'Non-ferrous metals', "Other non-ferrous metals": "Non-ferrous metals",
} }
def build_nodal_industrial_production(): def build_nodal_industrial_production():
fn = snakemake.input.industrial_production_per_country_tomorrow fn = snakemake.input.industrial_production_per_country_tomorrow
industrial_production = pd.read_csv(fn, index_col=0) industrial_production = pd.read_csv(fn, index_col=0)
@ -38,29 +41,32 @@ def build_nodal_industrial_production():
keys = pd.read_csv(fn, index_col=0) keys = pd.read_csv(fn, index_col=0)
keys["country"] = keys.index.str[:2] keys["country"] = keys.index.str[:2]
nodal_production = pd.DataFrame(index=keys.index, nodal_production = pd.DataFrame(
columns=industrial_production.columns, index=keys.index, columns=industrial_production.columns, dtype=float
dtype=float) )
countries = keys.country.unique() countries = keys.country.unique()
sectors = industrial_production.columns sectors = industrial_production.columns
for country, sector in product(countries, sectors): for country, sector in product(countries, sectors):
buses = keys.index[keys.country == country] buses = keys.index[keys.country == country]
mapping = sector_mapping.get(sector, "population") mapping = sector_mapping.get(sector, "population")
key = keys.loc[buses, mapping] key = keys.loc[buses, mapping]
nodal_production.loc[buses, sector] = industrial_production.at[country, sector] * key nodal_production.loc[buses, sector] = (
industrial_production.at[country, sector] * key
)
nodal_production.to_csv(snakemake.output.industrial_production_per_node) nodal_production.to_csv(snakemake.output.industrial_production_per_node)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_industrial_production_per_node',
simpl='', snakemake = mock_snakemake(
"build_industrial_production_per_node",
simpl="",
clusters=48, clusters=48,
) )

View File

@ -1,4 +1,7 @@
"""Build industry sector ratios.""" # -*- coding: utf-8 -*-
"""
Build industry sector ratios.
"""
import pandas as pd import pandas as pd
from helper import mute_print from helper import mute_print
@ -68,7 +71,6 @@ index = [
def load_idees_data(sector, country="EU28"): def load_idees_data(sector, country="EU28"):
suffixes = {"out": "", "fec": "_fec", "ued": "_ued", "emi": "_emi"} suffixes = {"out": "", "fec": "_fec", "ued": "_ued", "emi": "_emi"}
sheets = {k: sheet_names[sector] + v for k, v in suffixes.items()} sheets = {k: sheet_names[sector] + v for k, v in suffixes.items()}
@ -91,7 +93,6 @@ def load_idees_data(sector, country="EU28"):
def iron_and_steel(): def iron_and_steel():
# There are two different approaches to produce iron and steel: # There are two different approaches to produce iron and steel:
# i.e., integrated steelworks and electric arc. # i.e., integrated steelworks and electric arc.
# Electric arc approach has higher efficiency and relies more on electricity. # Electric arc approach has higher efficiency and relies more on electricity.
@ -602,7 +603,6 @@ def chemicals_industry():
def nonmetalic_mineral_products(): def nonmetalic_mineral_products():
# This includes cement, ceramic and glass production. # This includes cement, ceramic and glass production.
# This includes process emissions related to the fabrication of clinker. # This includes process emissions related to the fabrication of clinker.
@ -789,7 +789,6 @@ def nonmetalic_mineral_products():
def pulp_paper_printing(): def pulp_paper_printing():
# Pulp, paper and printing can be completely electrified. # Pulp, paper and printing can be completely electrified.
# There are no process emissions associated to this sector. # There are no process emissions associated to this sector.
@ -942,7 +941,6 @@ def pulp_paper_printing():
def food_beverages_tobacco(): def food_beverages_tobacco():
# Food, beverages and tobaco can be completely electrified. # Food, beverages and tobaco can be completely electrified.
# There are no process emissions associated to this sector. # There are no process emissions associated to this sector.
@ -1002,7 +1000,6 @@ def food_beverages_tobacco():
def non_ferrous_metals(): def non_ferrous_metals():
sector = "Non Ferrous Metals" sector = "Non Ferrous Metals"
idees = load_idees_data(sector) idees = load_idees_data(sector)
@ -1205,7 +1202,6 @@ def non_ferrous_metals():
def transport_equipment(): def transport_equipment():
sector = "Transport Equipment" sector = "Transport Equipment"
idees = load_idees_data(sector) idees = load_idees_data(sector)
@ -1256,7 +1252,6 @@ def transport_equipment():
def machinery_equipment(): def machinery_equipment():
sector = "Machinery Equipment" sector = "Machinery Equipment"
idees = load_idees_data(sector) idees = load_idees_data(sector)
@ -1309,7 +1304,6 @@ def machinery_equipment():
def textiles_and_leather(): def textiles_and_leather():
sector = "Textiles and leather" sector = "Textiles and leather"
idees = load_idees_data(sector) idees = load_idees_data(sector)
@ -1358,7 +1352,6 @@ def textiles_and_leather():
def wood_and_wood_products(): def wood_and_wood_products():
sector = "Wood and wood products" sector = "Wood and wood products"
idees = load_idees_data(sector) idees = load_idees_data(sector)
@ -1404,7 +1397,6 @@ def wood_and_wood_products():
def other_industrial_sectors(): def other_industrial_sectors():
sector = "Other Industrial Sectors" sector = "Other Industrial Sectors"
idees = load_idees_data(sector) idees = load_idees_data(sector)
@ -1465,9 +1457,10 @@ def other_industrial_sectors():
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_industry_sector_ratios')
snakemake = mock_snakemake("build_industry_sector_ratios")
# TODO make config option # TODO make config option
year = 2015 year = 2015

View File

@ -1,29 +1,35 @@
"""Build mapping between grid cells and population (total, urban, rural)""" # -*- coding: utf-8 -*-
"""
Build mapping between grid cells and population (total, urban, rural)
"""
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import multiprocessing as mp import multiprocessing as mp
import atlite import atlite
import geopandas as gpd
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
import geopandas as gpd
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_population_layouts')
logging.basicConfig(level=snakemake.config['logging_level']) snakemake = mock_snakemake("build_population_layouts")
cutout = atlite.Cutout(snakemake.config['atlite']['cutout']) logging.basicConfig(level=snakemake.config["logging_level"])
cutout = atlite.Cutout(snakemake.config["atlite"]["cutout"])
grid_cells = cutout.grid.geometry grid_cells = cutout.grid.geometry
# nuts3 has columns country, gdp, pop, geometry # nuts3 has columns country, gdp, pop, geometry
# population is given in dimensions of 1e3=k # population is given in dimensions of 1e3=k
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
# Indicator matrix NUTS3 -> grid cells # Indicator matrix NUTS3 -> grid cells
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells)
@ -34,9 +40,12 @@ if __name__ == '__main__':
countries = np.sort(nuts3.country.unique()) countries = np.sort(nuts3.country.unique())
urban_fraction = pd.read_csv(snakemake.input.urban_percent, urban_fraction = (
header=None, index_col=0, pd.read_csv(
names=['fraction']).squeeze() / 100. snakemake.input.urban_percent, header=None, index_col=0, names=["fraction"]
).squeeze()
/ 100.0
)
# fill missing Balkans values # fill missing Balkans values
missing = ["AL", "ME", "MK"] missing = ["AL", "ME", "MK"]
@ -46,7 +55,7 @@ if __name__ == '__main__':
urban_fraction = pd.concat([urban_fraction, fill_values]) urban_fraction = pd.concat([urban_fraction, fill_values])
# population in each grid cell # population in each grid cell
pop_cells = pd.Series(I.dot(nuts3['pop'])) pop_cells = pd.Series(I.dot(nuts3["pop"]))
# in km^2 # in km^2
cell_areas = grid_cells.to_crs(3035).area / 1e6 cell_areas = grid_cells.to_crs(3035).area / 1e6
@ -55,13 +64,15 @@ if __name__ == '__main__':
density_cells = pop_cells / cell_areas density_cells = pop_cells / cell_areas
# rural or urban population in grid cell # rural or urban population in grid cell
pop_rural = pd.Series(0., density_cells.index) pop_rural = pd.Series(0.0, density_cells.index)
pop_urban = pd.Series(0., density_cells.index) pop_urban = pd.Series(0.0, density_cells.index)
for ct in countries: for ct in countries:
logger.debug(f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%") logger.debug(
f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%"
)
indicator_nuts3_ct = nuts3.country.apply(lambda x: 1. if x == ct else 0.) indicator_nuts3_ct = nuts3.country.apply(lambda x: 1.0 if x == ct else 0.0)
indicator_cells_ct = pd.Series(Iinv.T.dot(indicator_nuts3_ct)) indicator_cells_ct = pd.Series(Iinv.T.dot(indicator_nuts3_ct))
@ -70,7 +81,7 @@ if __name__ == '__main__':
pop_cells_ct = indicator_cells_ct * pop_cells pop_cells_ct = indicator_cells_ct * pop_cells
# correct for imprecision of Iinv*I # correct for imprecision of Iinv*I
pop_ct = nuts3.loc[nuts3.country==ct,'pop'].sum() pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum()
pop_cells_ct *= pop_ct / pop_cells_ct.sum() pop_cells_ct *= pop_ct / pop_cells_ct.sum()
# The first low density grid cells to reach rural fraction are rural # The first low density grid cells to reach rural fraction are rural
@ -80,20 +91,19 @@ if __name__ == '__main__':
pop_ct_rural_b = asc_density_cumsum < rural_fraction_ct pop_ct_rural_b = asc_density_cumsum < rural_fraction_ct
pop_ct_urban_b = ~pop_ct_rural_b pop_ct_urban_b = ~pop_ct_rural_b
pop_ct_rural_b[indicator_cells_ct == 0.] = False pop_ct_rural_b[indicator_cells_ct == 0.0] = False
pop_ct_urban_b[indicator_cells_ct == 0.] = False pop_ct_urban_b[indicator_cells_ct == 0.0] = False
pop_rural += pop_cells_ct.where(pop_ct_rural_b, 0.) pop_rural += pop_cells_ct.where(pop_ct_rural_b, 0.0)
pop_urban += pop_cells_ct.where(pop_ct_urban_b, 0.) pop_urban += pop_cells_ct.where(pop_ct_urban_b, 0.0)
pop_cells = {"total": pop_cells} pop_cells = {"total": pop_cells}
pop_cells["rural"] = pop_rural pop_cells["rural"] = pop_rural
pop_cells["urban"] = pop_urban pop_cells["urban"] = pop_urban
for key, pop in pop_cells.items(): for key, pop in pop_cells.items():
ycoords = ("y", cutout.coords["y"].data)
ycoords = ('y', cutout.coords['y'].data) xcoords = ("x", cutout.coords["x"].data)
xcoords = ('x', cutout.coords['x'].data)
values = pop.values.reshape(cutout.shape) values = pop.values.reshape(cutout.shape)
layout = xr.DataArray(values, [ycoords, xcoords]) layout = xr.DataArray(values, [ycoords, xcoords])

View File

@ -1,13 +1,17 @@
"""Build population-weighted energy totals.""" # -*- coding: utf-8 -*-
"""
Build population-weighted energy totals.
"""
import pandas as pd import pandas as pd
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_population_weighted_energy_totals', "build_population_weighted_energy_totals",
simpl='', simpl="",
clusters=48, clusters=48,
) )
@ -15,7 +19,7 @@ if __name__ == '__main__':
energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=0) energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=0)
nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.) nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.0)
nodal_energy_totals.index = pop_layout.index nodal_energy_totals.index = pop_layout.index
nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0)

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
""" """
Build salt cavern potentials for hydrogen storage. Build salt cavern potentials for hydrogen storage.
@ -22,29 +23,35 @@ import geopandas as gpd
import pandas as pd import pandas as pd
def concat_gdf(gdf_list, crs='EPSG:4326'): def concat_gdf(gdf_list, crs="EPSG:4326"):
"""Concatenate multiple geopandas dataframes with common coordinate reference system (crs).""" """
Concatenate multiple geopandas dataframes with common coordinate reference
system (crs).
"""
return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs) return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs)
def load_bus_regions(onshore_path, offshore_path): def load_bus_regions(onshore_path, offshore_path):
"""Load pypsa-eur on- and offshore regions and concat.""" """
Load pypsa-eur on- and offshore regions and concat.
"""
bus_regions_offshore = gpd.read_file(offshore_path) bus_regions_offshore = gpd.read_file(offshore_path)
bus_regions_onshore = gpd.read_file(onshore_path) bus_regions_onshore = gpd.read_file(onshore_path)
bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore]) bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore])
bus_regions = bus_regions.dissolve(by='name', aggfunc='sum') bus_regions = bus_regions.dissolve(by="name", aggfunc="sum")
return bus_regions return bus_regions
def area(gdf): def area(gdf):
"""Returns area of GeoDataFrame geometries in square kilometers.""" """
Returns area of GeoDataFrame geometries in square kilometers.
"""
return gdf.to_crs(epsg=3035).area.div(1e6) return gdf.to_crs(epsg=3035).area.div(1e6)
def salt_cavern_potential_by_region(caverns, regions): def salt_cavern_potential_by_region(caverns, regions):
# calculate area of caverns shapes # calculate area of caverns shapes
caverns["area_caverns"] = area(caverns) caverns["area_caverns"] = area(caverns)
@ -53,18 +60,24 @@ def salt_cavern_potential_by_region(caverns, regions):
# calculate share of cavern area inside region # calculate share of cavern area inside region
overlay["share"] = area(overlay) / overlay["area_caverns"] overlay["share"] = area(overlay) / overlay["area_caverns"]
overlay["e_nom"] = overlay.eval("capacity_per_area * share * area_caverns / 1000") # TWh overlay["e_nom"] = overlay.eval(
"capacity_per_area * share * area_caverns / 1000"
) # TWh
caverns_regions = (
overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type")
)
caverns_regions = overlay.groupby(['name', "storage_type"]).e_nom.sum().unstack("storage_type")
return caverns_regions return caverns_regions
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('build_salt_cavern_potentials', simpl='', clusters='37')
snakemake = mock_snakemake(
"build_salt_cavern_potentials", simpl="", clusters="37"
)
fn_onshore = snakemake.input.regions_onshore fn_onshore = snakemake.input.regions_onshore
fn_offshore = snakemake.input.regions_offshore fn_offshore = snakemake.input.regions_offshore
@ -75,4 +88,4 @@ if __name__ == '__main__':
caverns_regions = salt_cavern_potential_by_region(caverns, regions) caverns_regions = salt_cavern_potential_by_region(caverns, regions)
caverns_regions.to_csv(snakemake.output.h2_cavern_potential) caverns_regions.to_csv(snakemake.output.h2_cavern_potential)

View File

@ -1,12 +1,18 @@
import pandas as pd # -*- coding: utf-8 -*-
import geopandas as gpd import geopandas as gpd
import pandas as pd
def area(gdf): def area(gdf):
"""Returns area of GeoDataFrame geometries in square kilometers.""" """
Returns area of GeoDataFrame geometries in square kilometers.
"""
return gdf.to_crs(epsg=3035).area.div(1e6) return gdf.to_crs(epsg=3035).area.div(1e6)
def allocate_sequestration_potential(gdf, regions, attr='conservative estimate Mt', threshold=3): def allocate_sequestration_potential(
gdf, regions, attr="conservative estimate Mt", threshold=3
):
gdf = gdf.loc[gdf[attr] > threshold, [attr, "geometry"]] gdf = gdf.loc[gdf[attr] > threshold, [attr, "geometry"]]
gdf["area_sqkm"] = area(gdf) gdf["area_sqkm"] = area(gdf)
overlay = gpd.overlay(regions, gdf, keep_geom_type=True) overlay = gpd.overlay(regions, gdf, keep_geom_type=True)
@ -19,12 +25,11 @@ def allocate_sequestration_potential(gdf, regions, attr='conservative estimate M
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_sequestration_potentials', "build_sequestration_potentials", simpl="", clusters="181"
simpl='',
clusters="181"
) )
cf = snakemake.config["sector"]["regional_co2_sequestration_potential"] cf = snakemake.config["sector"]["regional_co2_sequestration_potential"]
@ -34,10 +39,12 @@ if __name__ == "__main__":
regions = gpd.read_file(snakemake.input.regions_offshore) regions = gpd.read_file(snakemake.input.regions_offshore)
if cf["include_onshore"]: if cf["include_onshore"]:
onregions = gpd.read_file(snakemake.input.regions_onshore) onregions = gpd.read_file(snakemake.input.regions_onshore)
regions = pd.concat([regions, onregions]).dissolve(by='name').reset_index() regions = pd.concat([regions, onregions]).dissolve(by="name").reset_index()
s = allocate_sequestration_potential(gdf, regions, attr=cf["attribute"], threshold=cf["min_size"]) s = allocate_sequestration_potential(
gdf, regions, attr=cf["attribute"], threshold=cf["min_size"]
)
s = s.where(s>cf["min_size"]).dropna() s = s.where(s > cf["min_size"]).dropna()
s.to_csv(snakemake.output.sequestration_potential) s.to_csv(snakemake.output.sequestration_potential)

View File

@ -1,45 +1,55 @@
"""Build regional demand for international navigation based on outflow volume of ports.""" # -*- coding: utf-8 -*-
"""
Build regional demand for international navigation based on outflow volume of
ports.
"""
import pandas as pd
import geopandas as gpd
import json import json
if __name__ == '__main__': import geopandas as gpd
if 'snakemake' not in globals(): import pandas as pd
if __name__ == "__main__":
if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_shipping_demand_per_node', "build_shipping_demand_per_node",
simpl='', simpl="",
clusters=48, clusters=48,
) )
scope = gpd.read_file(snakemake.input.scope).geometry[0] scope = gpd.read_file(snakemake.input.scope).geometry[0]
regions = gpd.read_file(snakemake.input.regions).set_index('name') regions = gpd.read_file(snakemake.input.regions).set_index("name")
demand = pd.read_csv(snakemake.input.demand, index_col=0)["total international navigation"] demand = pd.read_csv(snakemake.input.demand, index_col=0)[
"total international navigation"
]
# read port data into GeoDataFrame # read port data into GeoDataFrame
with open(snakemake.input.ports, 'r', encoding='latin_1') as f: with open(snakemake.input.ports, "r", encoding="latin_1") as f:
ports = json.load(f) ports = json.load(f)
ports = pd.json_normalize(ports, "features", sep="_") ports = pd.json_normalize(ports, "features", sep="_")
coordinates = ports.geometry_coordinates coordinates = ports.geometry_coordinates
geometry = gpd.points_from_xy(coordinates.str[0], coordinates.str[1]) geometry = gpd.points_from_xy(coordinates.str[0], coordinates.str[1])
ports = gpd.GeoDataFrame(ports, geometry=geometry, crs=4326) ports = gpd.GeoDataFrame(ports, geometry=geometry, crs=4326)
# filter global port data by European ports # filter global port data by European ports
european_ports = ports[ports.within(scope)] european_ports = ports[ports.within(scope)]
# assign ports to nearest region # assign ports to nearest region
p = european_ports.to_crs(3857) p = european_ports.to_crs(3857)
r = regions.to_crs(3857) r = regions.to_crs(3857)
outflows = p.sjoin_nearest(r).groupby("index_right").properties_outflows.sum().div(1e3) outflows = (
p.sjoin_nearest(r).groupby("index_right").properties_outflows.sum().div(1e3)
)
# calculate fraction of each country's port outflows # calculate fraction of each country's port outflows
countries = outflows.index.str[:2] countries = outflows.index.str[:2]
outflows_per_country = outflows.groupby(countries).sum() outflows_per_country = outflows.groupby(countries).sum()
fraction = outflows / countries.map(outflows_per_country) fraction = outflows / countries.map(outflows_per_country)
# distribute per-country demands to nodes based on these fractions # distribute per-country demands to nodes based on these fractions
nodal_demand = demand.loc[countries].fillna(0.) nodal_demand = demand.loc[countries].fillna(0.0)
nodal_demand.index = fraction.index nodal_demand.index = fraction.index
nodal_demand = nodal_demand.multiply(fraction, axis=0) nodal_demand = nodal_demand.multiply(fraction, axis=0)
nodal_demand = nodal_demand.reindex(regions.index, fill_value=0) nodal_demand = nodal_demand.reindex(regions.index, fill_value=0)

View File

@ -1,18 +1,22 @@
"""Build solar thermal collector time series.""" # -*- coding: utf-8 -*-
"""
Build solar thermal collector time series.
"""
import geopandas as gpd
import atlite import atlite
import geopandas as gpd
import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
import numpy as np
from dask.distributed import Client, LocalCluster from dask.distributed import Client, LocalCluster
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_solar_thermal_profiles', "build_solar_thermal_profiles",
simpl='', simpl="",
clusters=48, clusters=48,
) )
@ -20,29 +24,36 @@ if __name__ == '__main__':
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
client = Client(cluster, asynchronous=True) client = Client(cluster, asynchronous=True)
config = snakemake.config['solar_thermal'] config = snakemake.config["solar_thermal"]
time = pd.date_range(freq='h', **snakemake.config['snapshots']) time = pd.date_range(freq="h", **snakemake.config["snapshots"])
cutout_config = snakemake.config['atlite']['cutout'] cutout_config = snakemake.config["atlite"]["cutout"]
cutout = atlite.Cutout(cutout_config).sel(time=time) cutout = atlite.Cutout(cutout_config).sel(time=time)
clustered_regions = gpd.read_file( clustered_regions = (
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() gpd.read_file(snakemake.input.regions_onshore)
.set_index("name")
.buffer(0)
.squeeze()
)
I = cutout.indicatormatrix(clustered_regions) I = cutout.indicatormatrix(clustered_regions)
pop_layout = xr.open_dataarray(snakemake.input.pop_layout) pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
stacked_pop = pop_layout.stack(spatial=('y', 'x')) stacked_pop = pop_layout.stack(spatial=("y", "x"))
M = I.T.dot(np.diag(I.dot(stacked_pop))) M = I.T.dot(np.diag(I.dot(stacked_pop)))
nonzero_sum = M.sum(axis=0, keepdims=True) nonzero_sum = M.sum(axis=0, keepdims=True)
nonzero_sum[nonzero_sum == 0.] = 1. nonzero_sum[nonzero_sum == 0.0] = 1.0
M_tilde = M / nonzero_sum M_tilde = M / nonzero_sum
solar_thermal = cutout.solar_thermal(**config, matrix=M_tilde.T, solar_thermal = cutout.solar_thermal(
index=clustered_regions.index, **config,
dask_kwargs=dict(scheduler=client), matrix=M_tilde.T,
show_progress=False) index=clustered_regions.index,
dask_kwargs=dict(scheduler=client),
show_progress=False
)
solar_thermal.to_netcdf(snakemake.output.solar_thermal) solar_thermal.to_netcdf(snakemake.output.solar_thermal)

View File

@ -1,18 +1,22 @@
"""Build temperature profiles.""" # -*- coding: utf-8 -*-
"""
Build temperature profiles.
"""
import geopandas as gpd
import atlite import atlite
import geopandas as gpd
import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
import numpy as np
from dask.distributed import Client, LocalCluster from dask.distributed import Client, LocalCluster
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'build_temperature_profiles', "build_temperature_profiles",
simpl='', simpl="",
clusters=48, clusters=48,
) )
@ -20,34 +24,42 @@ if __name__ == '__main__':
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
client = Client(cluster, asynchronous=True) client = Client(cluster, asynchronous=True)
time = pd.date_range(freq='h', **snakemake.config['snapshots']) time = pd.date_range(freq="h", **snakemake.config["snapshots"])
cutout_config = snakemake.config['atlite']['cutout'] cutout_config = snakemake.config["atlite"]["cutout"]
cutout = atlite.Cutout(cutout_config).sel(time=time) cutout = atlite.Cutout(cutout_config).sel(time=time)
clustered_regions = gpd.read_file( clustered_regions = (
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() gpd.read_file(snakemake.input.regions_onshore)
.set_index("name")
.buffer(0)
.squeeze()
)
I = cutout.indicatormatrix(clustered_regions) I = cutout.indicatormatrix(clustered_regions)
pop_layout = xr.open_dataarray(snakemake.input.pop_layout) pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
stacked_pop = pop_layout.stack(spatial=('y', 'x')) stacked_pop = pop_layout.stack(spatial=("y", "x"))
M = I.T.dot(np.diag(I.dot(stacked_pop))) M = I.T.dot(np.diag(I.dot(stacked_pop)))
nonzero_sum = M.sum(axis=0, keepdims=True) nonzero_sum = M.sum(axis=0, keepdims=True)
nonzero_sum[nonzero_sum == 0.] = 1. nonzero_sum[nonzero_sum == 0.0] = 1.0
M_tilde = M / nonzero_sum M_tilde = M / nonzero_sum
temp_air = cutout.temperature( temp_air = cutout.temperature(
matrix=M_tilde.T, index=clustered_regions.index, matrix=M_tilde.T,
index=clustered_regions.index,
dask_kwargs=dict(scheduler=client), dask_kwargs=dict(scheduler=client),
show_progress=False) show_progress=False,
)
temp_air.to_netcdf(snakemake.output.temp_air) temp_air.to_netcdf(snakemake.output.temp_air)
temp_soil = cutout.soil_temperature( temp_soil = cutout.soil_temperature(
matrix=M_tilde.T, index=clustered_regions.index, matrix=M_tilde.T,
index=clustered_regions.index,
dask_kwargs=dict(scheduler=client), dask_kwargs=dict(scheduler=client),
show_progress=False) show_progress=False,
)
temp_soil.to_netcdf(snakemake.output.temp_soil) temp_soil.to_netcdf(snakemake.output.temp_soil)

View File

@ -1,13 +1,15 @@
"""Build transport demand.""" # -*- coding: utf-8 -*-
"""
Build transport demand.
"""
import pandas as pd
import numpy as np import numpy as np
import pandas as pd
import xarray as xr import xarray as xr
from helper import generate_periodic_profiles from helper import generate_periodic_profiles
def build_nodal_transport_data(fn, pop_layout): def build_nodal_transport_data(fn, pop_layout):
transport_data = pd.read_csv(fn, index_col=0) transport_data = pd.read_csv(fn, index_col=0)
nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.0) nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.0)
@ -24,12 +26,9 @@ def build_nodal_transport_data(fn, pop_layout):
def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data): def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data):
## Get overall demand curve for all vehicles ## Get overall demand curve for all vehicles
traffic = pd.read_csv( traffic = pd.read_csv(traffic_fn, skiprows=2, usecols=["count"]).squeeze("columns")
traffic_fn, skiprows=2, usecols=["count"]
).squeeze("columns")
transport_shape = generate_periodic_profiles( transport_shape = generate_periodic_profiles(
dt_index=snapshots, dt_index=snapshots,
@ -94,9 +93,11 @@ def transport_degree_factor(
upper_degree_factor=1.6, upper_degree_factor=1.6,
): ):
""" """
Work out how much energy demand in vehicles increases due to heating and cooling. Work out how much energy demand in vehicles increases due to heating and
There is a deadband where there is no increase. cooling.
Degree factors are % increase in demand compared to no heating/cooling fuel consumption.
There is a deadband where there is no increase. Degree factors are %
increase in demand compared to no heating/cooling fuel consumption.
Returns per unit increase in demand for each place and time Returns per unit increase in demand for each place and time
""" """
@ -137,7 +138,6 @@ def bev_availability_profile(fn, snapshots, nodes, options):
def bev_dsm_profile(snapshots, nodes, options): def bev_dsm_profile(snapshots, nodes, options):
dsm_week = np.zeros((24 * 7,)) dsm_week = np.zeros((24 * 7,))
dsm_week[(np.arange(0, 7, 1) * 24 + options["bev_dsm_restriction_time"])] = options[ dsm_week[(np.arange(0, 7, 1) * 24 + options["bev_dsm_restriction_time"])] = options[
@ -173,24 +173,23 @@ if __name__ == "__main__":
options = snakemake.config["sector"] options = snakemake.config["sector"]
snapshots = pd.date_range(freq='h', **snakemake.config["snapshots"], tz="UTC") snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"], tz="UTC")
Nyears = 1 Nyears = 1
nodal_transport_data = build_nodal_transport_data( nodal_transport_data = build_nodal_transport_data(
snakemake.input.transport_data, snakemake.input.transport_data, pop_layout
pop_layout
) )
transport_demand = build_transport_demand( transport_demand = build_transport_demand(
snakemake.input.traffic_data_KFZ, snakemake.input.traffic_data_KFZ,
snakemake.input.temp_air_total, snakemake.input.temp_air_total,
nodes, nodal_transport_data nodes,
nodal_transport_data,
) )
avail_profile = bev_availability_profile( avail_profile = bev_availability_profile(
snakemake.input.traffic_data_Pkw, snakemake.input.traffic_data_Pkw, snapshots, nodes, options
snapshots, nodes, options
) )
dsm_profile = bev_dsm_profile(snapshots, nodes, options) dsm_profile = bev_dsm_profile(snapshots, nodes, options)
@ -198,4 +197,4 @@ if __name__ == "__main__":
nodal_transport_data.to_csv(snakemake.output.transport_data) nodal_transport_data.to_csv(snakemake.output.transport_data)
transport_demand.to_csv(snakemake.output.transport_demand) transport_demand.to_csv(snakemake.output.transport_demand)
avail_profile.to_csv(snakemake.output.avail_profile) avail_profile.to_csv(snakemake.output.avail_profile)
dsm_profile.to_csv(snakemake.output.dsm_profile) dsm_profile.to_csv(snakemake.output.dsm_profile)

View File

@ -1,45 +1,57 @@
"""Cluster gas network.""" # -*- coding: utf-8 -*-
"""
Cluster gas network.
"""
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import pandas as pd
import geopandas as gpd import geopandas as gpd
import pandas as pd
from shapely import wkt
from pypsa.geo import haversine_pts
from packaging.version import Version, parse from packaging.version import Version, parse
from pypsa.geo import haversine_pts
from shapely import wkt
def concat_gdf(gdf_list, crs='EPSG:4326'): def concat_gdf(gdf_list, crs="EPSG:4326"):
"""Concatenate multiple geopandas dataframes with common coordinate reference system (crs).""" """
Concatenate multiple geopandas dataframes with common coordinate reference
system (crs).
"""
return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs) return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs)
def load_bus_regions(onshore_path, offshore_path): def load_bus_regions(onshore_path, offshore_path):
"""Load pypsa-eur on- and offshore regions and concat.""" """
Load pypsa-eur on- and offshore regions and concat.
"""
bus_regions_offshore = gpd.read_file(offshore_path) bus_regions_offshore = gpd.read_file(offshore_path)
bus_regions_onshore = gpd.read_file(onshore_path) bus_regions_onshore = gpd.read_file(onshore_path)
bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore]) bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore])
bus_regions = bus_regions.dissolve(by='name', aggfunc='sum') bus_regions = bus_regions.dissolve(by="name", aggfunc="sum")
return bus_regions return bus_regions
def build_clustered_gas_network(df, bus_regions, length_factor=1.25): def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
for i in [0, 1]:
for i in [0,1]:
gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326") gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326")
kws = dict(op="within") if parse(gpd.__version__) < Version('0.10') else dict(predicate="within") kws = (
dict(op="within")
if parse(gpd.__version__) < Version("0.10")
else dict(predicate="within")
)
bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right
bus_mapping = bus_mapping.groupby(bus_mapping.index).first() bus_mapping = bus_mapping.groupby(bus_mapping.index).first()
df[f"bus{i}"] = bus_mapping df[f"bus{i}"] = bus_mapping
df[f"point{i}"] = df[f"bus{i}"].map(bus_regions.to_crs(3035).centroid.to_crs(4326)) df[f"point{i}"] = df[f"bus{i}"].map(
bus_regions.to_crs(3035).centroid.to_crs(4326)
)
# drop pipes where not both buses are inside regions # drop pipes where not both buses are inside regions
df = df.loc[~df.bus0.isna() & ~df.bus1.isna()] df = df.loc[~df.bus0.isna() & ~df.bus1.isna()]
@ -49,10 +61,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
# recalculate lengths as center to center * length factor # recalculate lengths as center to center * length factor
df["length"] = df.apply( df["length"] = df.apply(
lambda p: length_factor * haversine_pts( lambda p: length_factor
[p.point0.x, p.point0.y], * haversine_pts([p.point0.x, p.point0.y], [p.point1.x, p.point1.y]),
[p.point1.x, p.point1.y] axis=1,
), axis=1
) )
# tidy and create new numbered index # tidy and create new numbered index
@ -63,7 +74,6 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
def reindex_pipes(df): def reindex_pipes(df):
def make_index(x): def make_index(x):
connector = " <-> " if x.bidirectional else " -> " connector = " <-> " if x.bidirectional else " -> "
return "gas pipeline " + x.bus0 + connector + x.bus1 return "gas pipeline " + x.bus0 + connector + x.bus1
@ -77,33 +87,28 @@ def reindex_pipes(df):
def aggregate_parallel_pipes(df): def aggregate_parallel_pipes(df):
strategies = { strategies = {
'bus0': 'first', "bus0": "first",
'bus1': 'first', "bus1": "first",
"p_nom": 'sum', "p_nom": "sum",
"p_nom_diameter": 'sum', "p_nom_diameter": "sum",
"max_pressure_bar": "mean", "max_pressure_bar": "mean",
"build_year": "mean", "build_year": "mean",
"diameter_mm": "mean", "diameter_mm": "mean",
"length": 'mean', "length": "mean",
'name': ' '.join, "name": " ".join,
"p_min_pu": 'min', "p_min_pu": "min",
} }
return df.groupby(df.index).agg(strategies) return df.groupby(df.index).agg(strategies)
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals():
if 'snakemake' not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake(
'cluster_gas_network',
simpl='',
clusters='37'
)
logging.basicConfig(level=snakemake.config['logging_level']) snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37")
logging.basicConfig(level=snakemake.config["logging_level"])
fn = snakemake.input.cleaned_gas_network fn = snakemake.input.cleaned_gas_network
df = pd.read_csv(fn, index_col=0) df = pd.read_csv(fn, index_col=0)
@ -111,8 +116,7 @@ if __name__ == "__main__":
df[col] = df[col].apply(wkt.loads) df[col] = df[col].apply(wkt.loads)
bus_regions = load_bus_regions( bus_regions = load_bus_regions(
snakemake.input.regions_onshore, snakemake.input.regions_onshore, snakemake.input.regions_offshore
snakemake.input.regions_offshore
) )
gas_network = build_clustered_gas_network(df, bus_regions) gas_network = build_clustered_gas_network(df, bus_regions)
@ -120,4 +124,4 @@ if __name__ == "__main__":
reindex_pipes(gas_network) reindex_pipes(gas_network)
gas_network = aggregate_parallel_pipes(gas_network) gas_network = aggregate_parallel_pipes(gas_network)
gas_network.to_csv(snakemake.output.clustered_gas_network) gas_network.to_csv(snakemake.output.clustered_gas_network)

View File

@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
from shutil import copy from shutil import copy
import yaml import yaml
files = { files = {
@ -7,24 +9,27 @@ files = {
"Snakefile": "Snakefile", "Snakefile": "Snakefile",
"scripts/solve_network.py": "solve_network.py", "scripts/solve_network.py": "solve_network.py",
"scripts/prepare_sector_network.py": "prepare_sector_network.py", "scripts/prepare_sector_network.py": "prepare_sector_network.py",
"../pypsa-eur/config.yaml": "config.pypsaeur.yaml" "../pypsa-eur/config.yaml": "config.pypsaeur.yaml",
} }
if __name__ == '__main__': if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('copy_config')
basepath = snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/configs/' snakemake = mock_snakemake("copy_config")
basepath = (
snakemake.config["summary_dir"] + "/" + snakemake.config["run"] + "/configs/"
)
for f, name in files.items(): for f, name in files.items():
copy(f, basepath + name) copy(f, basepath + name)
with open(basepath + 'config.snakemake.yaml', 'w') as yaml_file: with open(basepath + "config.snakemake.yaml", "w") as yaml_file:
yaml.dump( yaml.dump(
snakemake.config, snakemake.config,
yaml_file, yaml_file,
default_flow_style=False, default_flow_style=False,
allow_unicode=True, allow_unicode=True,
sort_keys=False sort_keys=False,
) )

View File

@ -1,26 +1,28 @@
# -*- coding: utf-8 -*-
import contextlib
import logging
import os import os
import sys import sys
import contextlib
import yaml
import pytz
import pandas as pd
from pathlib import Path from pathlib import Path
from snakemake.utils import update_config
from pypsa.descriptors import Dict
from pypsa.components import components, component_attrs
import logging import pandas as pd
import pytz
import yaml
from pypsa.components import component_attrs, components
from pypsa.descriptors import Dict
from snakemake.utils import update_config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Define a context manager to temporarily mute print statements # Define a context manager to temporarily mute print statements
@contextlib.contextmanager @contextlib.contextmanager
def mute_print(): def mute_print():
with open(os.devnull, 'w') as devnull: with open(os.devnull, "w") as devnull:
with contextlib.redirect_stdout(devnull): with contextlib.redirect_stdout(devnull):
yield yield
def override_component_attrs(directory): def override_component_attrs(directory):
"""Tell PyPSA that links can have multiple outputs by """Tell PyPSA that links can have multiple outputs by
overriding the component_attrs. This can be done for overriding the component_attrs. This can be done for
@ -30,7 +32,7 @@ def override_component_attrs(directory):
Parameters Parameters
---------- ----------
directory : string directory : string
Folder where component attributes to override are stored Folder where component attributes to override are stored
analogous to ``pypsa/component_attrs``, e.g. `links.csv`. analogous to ``pypsa/component_attrs``, e.g. `links.csv`.
Returns Returns
@ -38,7 +40,7 @@ def override_component_attrs(directory):
Dictionary of overridden component attributes. Dictionary of overridden component attributes.
""" """
attrs = Dict({k : v.copy() for k,v in component_attrs.items()}) attrs = Dict({k: v.copy() for k, v in component_attrs.items()})
for component, list_name in components.list_name.items(): for component, list_name in components.list_name.items():
fn = f"{directory}/{list_name}.csv" fn = f"{directory}/{list_name}.csv"
@ -66,15 +68,17 @@ def mock_snakemake(rulename, **wildcards):
keyword arguments fixing the wildcards. Only necessary if wildcards are keyword arguments fixing the wildcards. Only necessary if wildcards are
needed. needed.
""" """
import snakemake as sm
import os import os
import snakemake as sm
from packaging.version import Version, parse
from pypsa.descriptors import Dict from pypsa.descriptors import Dict
from snakemake.script import Snakemake from snakemake.script import Snakemake
from packaging.version import Version, parse
script_dir = Path(__file__).parent.resolve() script_dir = Path(__file__).parent.resolve()
assert Path.cwd().resolve() == script_dir, \ assert (
f'mock_snakemake has to be run from the repository scripts directory {script_dir}' Path.cwd().resolve() == script_dir
), f"mock_snakemake has to be run from the repository scripts directory {script_dir}"
os.chdir(script_dir.parent) os.chdir(script_dir.parent)
for p in sm.SNAKEFILE_CHOICES: for p in sm.SNAKEFILE_CHOICES:
if os.path.exists(p): if os.path.exists(p):
@ -95,9 +99,18 @@ def mock_snakemake(rulename, **wildcards):
io[i] = os.path.abspath(io[i]) io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log) make_accessable(job.input, job.output, job.log)
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards, snakemake = Snakemake(
job.threads, job.resources, job.log, job.input,
job.dag.workflow.config, job.rule.name, None,) job.output,
job.params,
job.wildcards,
job.threads,
job.resources,
job.log,
job.dag.workflow.config,
job.rule.name,
None,
)
# create log and output dir if not existent # create log and output dir if not existent
for path in list(snakemake.log) + list(snakemake.output): for path in list(snakemake.log) + list(snakemake.output):
Path(path).parent.mkdir(parents=True, exist_ok=True) Path(path).parent.mkdir(parents=True, exist_ok=True)
@ -105,15 +118,17 @@ def mock_snakemake(rulename, **wildcards):
os.chdir(script_dir) os.chdir(script_dir)
return snakemake return snakemake
# from pypsa-eur/_helpers.py # from pypsa-eur/_helpers.py
def progress_retrieve(url, file): def progress_retrieve(url, file):
import urllib import urllib
from progressbar import ProgressBar from progressbar import ProgressBar
pbar = ProgressBar(0, 100) pbar = ProgressBar(0, 100)
def dlProgress(count, blockSize, totalSize): def dlProgress(count, blockSize, totalSize):
pbar.update( int(count * blockSize * 100 / totalSize) ) pbar.update(int(count * blockSize * 100 / totalSize))
urllib.request.urlretrieve(url, file, reporthook=dlProgress) urllib.request.urlretrieve(url, file, reporthook=dlProgress)
@ -121,10 +136,11 @@ def progress_retrieve(url, file):
def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None): def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
""" """
Give a 24*7 long list of weekly hourly profiles, generate this for each Give a 24*7 long list of weekly hourly profiles, generate this for each
country for the period dt_index, taking account of time zones and summer time. country for the period dt_index, taking account of time zones and summer
time.
""" """
weekly_profile = pd.Series(weekly_profile, range(24*7)) weekly_profile = pd.Series(weekly_profile, range(24 * 7))
week_df = pd.DataFrame(index=dt_index, columns=nodes) week_df = pd.DataFrame(index=dt_index, columns=nodes)
@ -150,4 +166,4 @@ def update_config_with_sector_opts(config, sector_opts):
for o in sector_opts.split("-"): for o in sector_opts.split("-"):
if o.startswith("CF+"): if o.startswith("CF+"):
l = o.split("+")[1:] l = o.split("+")[1:]
update_config(config, parse(l)) update_config(config, parse(l))

View File

@ -1,23 +1,20 @@
# -*- coding: utf-8 -*-
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import sys import sys
import yaml
import pypsa
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa
from prepare_sector_network import prepare_costs import yaml
from helper import override_component_attrs from helper import override_component_attrs
from prepare_sector_network import prepare_costs
idx = pd.IndexSlice idx = pd.IndexSlice
opt_name = { opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
"Store": "e",
"Line": "s",
"Transformer": "s"
}
def assign_carriers(n): def assign_carriers(n):
@ -26,20 +23,25 @@ def assign_carriers(n):
def assign_locations(n): def assign_locations(n):
for c in n.iterate_components(n.one_port_components|n.branch_components): for c in n.iterate_components(n.one_port_components | n.branch_components):
ifind = pd.Series(c.df.index.str.find(" ",start=4),c.df.index) ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
for i in ifind.unique(): for i in ifind.unique():
names = ifind.index[ifind == i] names = ifind.index[ifind == i]
if i == -1: if i == -1:
c.df.loc[names, 'location'] = "" c.df.loc[names, "location"] = ""
else: else:
c.df.loc[names, 'location'] = names.str[:i] c.df.loc[names, "location"] = names.str[:i]
def calculate_nodal_cfs(n, label, nodal_cfs): def calculate_nodal_cfs(n, label, nodal_cfs):
#Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff # Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components((n.branch_components^{"Line","Transformer"})|n.controllable_one_port_components^{"Load","StorageUnit"}): for c in n.iterate_components(
capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum() (n.branch_components ^ {"Line", "Transformer"})
| n.controllable_one_port_components ^ {"Load", "StorageUnit"}
):
capacities_c = c.df.groupby(["location", "carrier"])[
opt_name.get(c.name, "p") + "_nom_opt"
].sum()
if c.name == "Link": if c.name == "Link":
p = c.pnl.p0.abs().mean() p = c.pnl.p0.abs().mean()
@ -53,21 +55,27 @@ def calculate_nodal_cfs(n, label, nodal_cfs):
c.df["p"] = p c.df["p"] = p
p_c = c.df.groupby(["location", "carrier"])["p"].sum() p_c = c.df.groupby(["location", "carrier"])["p"].sum()
cf_c = p_c/capacities_c cf_c = p_c / capacities_c
index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in cf_c.index.to_list()]) index = pd.MultiIndex.from_tuples(
[(c.list_name,) + t for t in cf_c.index.to_list()]
)
nodal_cfs = nodal_cfs.reindex(index.union(nodal_cfs.index)) nodal_cfs = nodal_cfs.reindex(index.union(nodal_cfs.index))
nodal_cfs.loc[index,label] = cf_c.values nodal_cfs.loc[index, label] = cf_c.values
return nodal_cfs return nodal_cfs
def calculate_cfs(n, label, cfs): def calculate_cfs(n, label, cfs):
for c in n.iterate_components(
n.branch_components
| n.controllable_one_port_components ^ {"Load", "StorageUnit"}
):
capacities_c = (
c.df[opt_name.get(c.name, "p") + "_nom_opt"].groupby(c.df.carrier).sum()
)
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load","StorageUnit"}): if c.name in ["Link", "Line", "Transformer"]:
capacities_c = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum()
if c.name in ["Link","Line","Transformer"]:
p = c.pnl.p0.abs().mean() p = c.pnl.p0.abs().mean()
elif c.name == "Store": elif c.name == "Store":
p = c.pnl.e.abs().mean() p = c.pnl.e.abs().mean()
@ -76,25 +84,31 @@ def calculate_cfs(n, label, cfs):
p_c = p.groupby(c.df.carrier).sum() p_c = p.groupby(c.df.carrier).sum()
cf_c = p_c/capacities_c cf_c = p_c / capacities_c
cf_c = pd.concat([cf_c], keys=[c.list_name]) cf_c = pd.concat([cf_c], keys=[c.list_name])
cfs = cfs.reindex(cf_c.index.union(cfs.index)) cfs = cfs.reindex(cf_c.index.union(cfs.index))
cfs.loc[cf_c.index,label] = cf_c cfs.loc[cf_c.index, label] = cf_c
return cfs return cfs
def calculate_nodal_costs(n, label, nodal_costs): def calculate_nodal_costs(n, label, nodal_costs):
#Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff # Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): for c in n.iterate_components(
c.df["capital_costs"] = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"] n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
c.df["capital_costs"] = (
c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
)
capital_costs = c.df.groupby(["location", "carrier"])["capital_costs"].sum() capital_costs = c.df.groupby(["location", "carrier"])["capital_costs"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name, "capital") + t for t in capital_costs.index.to_list()]) index = pd.MultiIndex.from_tuples(
[(c.list_name, "capital") + t for t in capital_costs.index.to_list()]
)
nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index)) nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
nodal_costs.loc[index,label] = capital_costs.values nodal_costs.loc[index, label] = capital_costs.values
if c.name == "Link": if c.name == "Link":
p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum() p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
@ -102,19 +116,23 @@ def calculate_nodal_costs(n, label, nodal_costs):
continue continue
elif c.name == "StorageUnit": elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
p_all[p_all < 0.] = 0. p_all[p_all < 0.0] = 0.0
p = p_all.sum() p = p_all.sum()
else: else:
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum() p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
#correct sequestration cost # correct sequestration cost
if c.name == "Store": if c.name == "Store":
items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.)] items = c.df.index[
c.df.loc[items, "marginal_cost"] = -20. (c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
]
c.df.loc[items, "marginal_cost"] = -20.0
c.df["marginal_costs"] = p*c.df.marginal_cost c.df["marginal_costs"] = p * c.df.marginal_cost
marginal_costs = c.df.groupby(["location", "carrier"])["marginal_costs"].sum() marginal_costs = c.df.groupby(["location", "carrier"])["marginal_costs"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name, "marginal") + t for t in marginal_costs.index.to_list()]) index = pd.MultiIndex.from_tuples(
[(c.list_name, "marginal") + t for t in marginal_costs.index.to_list()]
)
nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index)) nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
nodal_costs.loc[index, label] = marginal_costs.values nodal_costs.loc[index, label] = marginal_costs.values
@ -122,9 +140,10 @@ def calculate_nodal_costs(n, label, nodal_costs):
def calculate_costs(n, label, costs): def calculate_costs(n, label, costs):
for c in n.iterate_components(
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): n.branch_components | n.controllable_one_port_components ^ {"Load"}
capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] ):
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum() capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum()
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"]) capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
@ -140,17 +159,19 @@ def calculate_costs(n, label, costs):
continue continue
elif c.name == "StorageUnit": elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
p_all[p_all < 0.] = 0. p_all[p_all < 0.0] = 0.0
p = p_all.sum() p = p_all.sum()
else: else:
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum() p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
#correct sequestration cost # correct sequestration cost
if c.name == "Store": if c.name == "Store":
items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.)] items = c.df.index[
c.df.loc[items, "marginal_cost"] = -20. (c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
]
c.df.loc[items, "marginal_cost"] = -20.0
marginal_costs = p*c.df.marginal_cost marginal_costs = p * c.df.marginal_cost
marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum() marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum()
@ -159,54 +180,79 @@ def calculate_costs(n, label, costs):
costs = costs.reindex(marginal_costs_grouped.index.union(costs.index)) costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))
costs.loc[marginal_costs_grouped.index,label] = marginal_costs_grouped costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped
# add back in all hydro # add back in all hydro
#costs.loc[("storage_units", "capital", "hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro", "p_nom"].sum() # costs.loc[("storage_units", "capital", "hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro", "p_nom"].sum()
#costs.loc[("storage_units", "capital", "PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS", "p_nom"].sum() # costs.loc[("storage_units", "capital", "PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS", "p_nom"].sum()
#costs.loc[("generators", "capital", "ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror", "p_nom"].sum() # costs.loc[("generators", "capital", "ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror", "p_nom"].sum()
return costs return costs
def calculate_cumulative_cost(): def calculate_cumulative_cost():
planning_horizons = snakemake.config['scenario']['planning_horizons'] planning_horizons = snakemake.config["scenario"]["planning_horizons"]
cumulative_cost = pd.DataFrame(index = df["costs"].sum().index, cumulative_cost = pd.DataFrame(
columns=pd.Series(data=np.arange(0,0.1, 0.01), name='social discount rate')) index=df["costs"].sum().index,
columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"),
)
#discount cost and express them in money value of planning_horizons[0] # discount cost and express them in money value of planning_horizons[0]
for r in cumulative_cost.columns: for r in cumulative_cost.columns:
cumulative_cost[r]=[df["costs"].sum()[index]/((1+r)**(index[-1]-planning_horizons[0])) for index in cumulative_cost.index] cumulative_cost[r] = [
df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0]))
for index in cumulative_cost.index
]
#integrate cost throughout the transition path # integrate cost throughout the transition path
for r in cumulative_cost.columns: for r in cumulative_cost.columns:
for cluster in cumulative_cost.index.get_level_values(level=0).unique(): for cluster in cumulative_cost.index.get_level_values(level=0).unique():
for lv in cumulative_cost.index.get_level_values(level=1).unique(): for lv in cumulative_cost.index.get_level_values(level=1).unique():
for sector_opts in cumulative_cost.index.get_level_values(level=2).unique(): for sector_opts in cumulative_cost.index.get_level_values(
cumulative_cost.loc[(cluster, lv, sector_opts, 'cumulative cost'),r] = np.trapz(cumulative_cost.loc[idx[cluster, lv, sector_opts,planning_horizons],r].values, x=planning_horizons) level=2
).unique():
cumulative_cost.loc[
(cluster, lv, sector_opts, "cumulative cost"), r
] = np.trapz(
cumulative_cost.loc[
idx[cluster, lv, sector_opts, planning_horizons], r
].values,
x=planning_horizons,
)
return cumulative_cost return cumulative_cost
def calculate_nodal_capacities(n, label, nodal_capacities): def calculate_nodal_capacities(n, label, nodal_capacities):
#Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff # Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): for c in n.iterate_components(
nodal_capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum() n.branch_components | n.controllable_one_port_components ^ {"Load"}
index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]) ):
nodal_capacities_c = c.df.groupby(["location", "carrier"])[
opt_name.get(c.name, "p") + "_nom_opt"
].sum()
index = pd.MultiIndex.from_tuples(
[(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]
)
nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index)) nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
nodal_capacities.loc[index,label] = nodal_capacities_c.values nodal_capacities.loc[index, label] = nodal_capacities_c.values
return nodal_capacities return nodal_capacities
def calculate_capacities(n, label, capacities): def calculate_capacities(n, label, capacities):
for c in n.iterate_components(
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): n.branch_components | n.controllable_one_port_components ^ {"Load"}
capacities_grouped = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum() ):
capacities_grouped = (
c.df[opt_name.get(c.name, "p") + "_nom_opt"].groupby(c.df.carrier).sum()
)
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name]) capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
capacities = capacities.reindex(capacities_grouped.index.union(capacities.index)) capacities = capacities.reindex(
capacities_grouped.index.union(capacities.index)
)
capacities.loc[capacities_grouped.index, label] = capacities_grouped capacities.loc[capacities_grouped.index, label] = capacities_grouped
@ -214,28 +260,42 @@ def calculate_capacities(n, label, capacities):
def calculate_curtailment(n, label, curtailment): def calculate_curtailment(n, label, curtailment):
avail = (
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum() n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
.sum()
.groupby(n.generators.carrier)
.sum()
)
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
curtailment[label] = (((avail - used)/avail)*100).round(3) curtailment[label] = (((avail - used) / avail) * 100).round(3)
return curtailment return curtailment
def calculate_energy(n, label, energy): def calculate_energy(n, label, energy):
for c in n.iterate_components(n.one_port_components | n.branch_components):
for c in n.iterate_components(n.one_port_components|n.branch_components):
if c.name in n.one_port_components: if c.name in n.one_port_components:
c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier)
.sum()
)
else: else:
c_energies = pd.Series(0., c.df.carrier.unique()) c_energies = pd.Series(0.0, c.df.carrier.unique())
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]: for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
totals = c.pnl["p" + port].multiply(n.snapshot_weightings.generators, axis=0).sum() totals = (
#remove values where bus is missing (bug in nomopyomo) c.pnl["p" + port]
.multiply(n.snapshot_weightings.generators, axis=0)
.sum()
)
# remove values where bus is missing (bug in nomopyomo)
no_bus = c.df.index[c.df["bus" + port] == ""] no_bus = c.df.index[c.df["bus" + port] == ""]
totals.loc[no_bus] = n.component_attrs[c.name].loc["p" + port, "default"] totals.loc[no_bus] = n.component_attrs[c.name].loc[
"p" + port, "default"
]
c_energies -= totals.groupby(c.df.carrier).sum() c_energies -= totals.groupby(c.df.carrier).sum()
c_energies = pd.concat([c_energies], keys=[c.list_name]) c_energies = pd.concat([c_energies], keys=[c.list_name])
@ -248,40 +308,47 @@ def calculate_energy(n, label, energy):
def calculate_supply(n, label, supply): def calculate_supply(n, label, supply):
"""calculate the max dispatch of each component at the buses aggregated by carrier""" """
Calculate the max dispatch of each component at the buses aggregated by
carrier.
"""
bus_carriers = n.buses.carrier.unique() bus_carriers = n.buses.carrier.unique()
for i in bus_carriers: for i in bus_carriers:
bus_map = (n.buses.carrier == i) bus_map = n.buses.carrier == i
bus_map.at[""] = False bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components): for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)] items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0: if len(items) == 0:
continue continue
s = c.pnl.p[items].max().multiply(c.df.loc[items, 'sign']).groupby(c.df.loc[items, 'carrier']).sum() s = (
c.pnl.p[items]
.max()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i]) s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index)) supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index,label] = s supply.loc[s.index, label] = s
for c in n.iterate_components(n.branch_components): for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]: for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)] items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
if len(items) == 0: if len(items) == 0:
continue continue
#lots of sign compensation for direction and to do maximums # lots of sign compensation for direction and to do maximums
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items, 'carrier']).sum() s = (-1) ** (1 - int(end)) * (
(-1) ** int(end) * c.pnl["p" + end][items]
).max().groupby(c.df.loc[items, "carrier"]).sum()
s.index = s.index + end s.index = s.index + end
s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i]) s = pd.concat([s], keys=[i])
@ -291,46 +358,56 @@ def calculate_supply(n, label, supply):
return supply return supply
def calculate_supply_energy(n, label, supply_energy):
"""calculate the total energy supply/consuption of each component at the buses aggregated by carrier"""
def calculate_supply_energy(n, label, supply_energy):
"""
Calculate the total energy supply/consuption of each component at the buses
aggregated by carrier.
"""
bus_carriers = n.buses.carrier.unique() bus_carriers = n.buses.carrier.unique()
for i in bus_carriers: for i in bus_carriers:
bus_map = (n.buses.carrier == i) bus_map = n.buses.carrier == i
bus_map.at[""] = False bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components): for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)] items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0: if len(items) == 0:
continue continue
s = c.pnl.p[items].multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.loc[items, 'sign']).groupby(c.df.loc[items, 'carrier']).sum() s = (
c.pnl.p[items]
.multiply(n.snapshot_weightings.generators, axis=0)
.sum()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i]) s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(s.index.union(supply_energy.index)) supply_energy = supply_energy.reindex(s.index.union(supply_energy.index))
supply_energy.loc[s.index, label] = s supply_energy.loc[s.index, label] = s
for c in n.iterate_components(n.branch_components): for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]: for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)] items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
if len(items) == 0: if len(items) == 0:
continue continue
s = (-1)*c.pnl["p"+end][items].multiply(n.snapshot_weightings.generators,axis=0).sum().groupby(c.df.loc[items, 'carrier']).sum() s = (-1) * c.pnl["p" + end][items].multiply(
n.snapshot_weightings.generators, axis=0
).sum().groupby(c.df.loc[items, "carrier"]).sum()
s.index = s.index + end s.index = s.index + end
s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i]) s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(s.index.union(supply_energy.index)) supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index)
)
supply_energy.loc[s.index, label] = s supply_energy.loc[s.index, label] = s
@ -338,21 +415,24 @@ def calculate_supply_energy(n, label, supply_energy):
def calculate_metrics(n, label, metrics): def calculate_metrics(n, label, metrics):
metrics_list = [ metrics_list = [
"line_volume", "line_volume",
"line_volume_limit", "line_volume_limit",
"line_volume_AC", "line_volume_AC",
"line_volume_DC", "line_volume_DC",
"line_volume_shadow", "line_volume_shadow",
"co2_shadow" "co2_shadow",
] ]
metrics = metrics.reindex(pd.Index(metrics_list).union(metrics.index)) metrics = metrics.reindex(pd.Index(metrics_list).union(metrics.index))
metrics.at["line_volume_DC",label] = (n.links.length * n.links.p_nom_opt)[n.links.carrier == "DC"].sum() metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
metrics.at["line_volume_AC",label] = (n.lines.length * n.lines.s_nom_opt).sum() n.links.carrier == "DC"
metrics.at["line_volume",label] = metrics.loc[["line_volume_AC", "line_volume_DC"], label].sum() ].sum()
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
metrics.at["line_volume", label] = metrics.loc[
["line_volume_AC", "line_volume_DC"], label
].sum()
if hasattr(n, "line_volume_limit"): if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_limit", label] = n.line_volume_limit metrics.at["line_volume_limit", label] = n.line_volume_limit
@ -365,10 +445,9 @@ def calculate_metrics(n, label, metrics):
def calculate_prices(n, label, prices): def calculate_prices(n, label, prices):
prices = prices.reindex(prices.index.union(n.buses.carrier.unique())) prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
#WARNING: this is time-averaged, see weighted_prices for load-weighted average # WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean() prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
return prices return prices
@ -377,32 +456,42 @@ def calculate_prices(n, label, prices):
def calculate_weighted_prices(n, label, weighted_prices): def calculate_weighted_prices(n, label, weighted_prices):
# Warning: doesn't include storage units as loads # Warning: doesn't include storage units as loads
weighted_prices = weighted_prices.reindex(pd.Index([ weighted_prices = weighted_prices.reindex(
"electricity", pd.Index(
"heat", [
"space heat", "electricity",
"urban heat", "heat",
"space urban heat", "space heat",
"gas", "urban heat",
"H2" "space urban heat",
])) "gas",
"H2",
]
)
)
link_loads = {"electricity": ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"], link_loads = {
"heat": ["water tanks charger"], "electricity": [
"urban heat": ["water tanks charger"], "heat pump",
"space heat": [], "resistive heater",
"space urban heat": [], "battery charger",
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"], "H2 Electrolysis",
"H2": ["Sabatier", "H2 Fuel Cell"]} ],
"heat": ["water tanks charger"],
"urban heat": ["water tanks charger"],
"space heat": [],
"space urban heat": [],
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
"H2": ["Sabatier", "H2 Fuel Cell"],
}
for carrier in link_loads: for carrier in link_loads:
if carrier == "electricity": if carrier == "electricity":
suffix = "" suffix = ""
elif carrier[:5] == "space": elif carrier[:5] == "space":
suffix = carrier[5:] suffix = carrier[5:]
else: else:
suffix = " " + carrier suffix = " " + carrier
buses = n.buses.index[n.buses.index.str[2:] == suffix] buses = n.buses.index[n.buses.index.str[2:] == suffix]
@ -410,28 +499,33 @@ def calculate_weighted_prices(n, label, weighted_prices):
continue continue
if carrier in ["H2", "gas"]: if carrier in ["H2", "gas"]:
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.) load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
elif carrier[:5] == "space": elif carrier[:5] == "space":
load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix) load = heat_demand_df[buses.str[:2]].rename(
columns=lambda i: str(i) + suffix
)
else: else:
load = n.loads_t.p_set[buses] load = n.loads_t.p_set[buses]
for tech in link_loads[carrier]: for tech in link_loads[carrier]:
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech]
if names.empty: if names.empty:
continue continue
load += n.links_t.p0[names].groupby(n.links.loc[names, "bus0"],axis=1).sum() load += (
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
)
# Add H2 Store when charging # Add H2 Store when charging
#if carrier == "H2": # if carrier == "H2":
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store", "bus"],axis=1).sum(axis=1) # stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store", "bus"],axis=1).sum(axis=1)
# stores[stores > 0.] = 0. # stores[stores > 0.] = 0.
# load += -stores # load += -stores
weighted_prices.loc[carrier,label] = (load * n.buses_t.marginal_price[buses]).sum().sum() / load.sum().sum() weighted_prices.loc[carrier, label] = (
load * n.buses_t.marginal_price[buses]
).sum().sum() / load.sum().sum()
# still have no idea what this is for, only for debug reasons. # still have no idea what this is for, only for debug reasons.
if carrier[:5] == "space": if carrier[:5] == "space":
@ -455,21 +549,24 @@ def calculate_market_values(n, label, market_values):
market_values = market_values.reindex(market_values.index.union(techs)) market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs: for tech in techs:
gens = generators[n.generators.loc[generators, "carrier"] == tech] gens = generators[n.generators.loc[generators, "carrier"] == tech]
dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens, "bus"], axis=1).sum().reindex(columns=buses, fill_value=0.) dispatch = (
n.generators_t.p[gens]
.groupby(n.generators.loc[gens, "bus"], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses] revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech,label] = revenue.sum().sum() / dispatch.sum().sum() market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
## Now do market value of links ## ## Now do market value of links ##
for i in ["0", "1"]: for i in ["0", "1"]:
all_links = n.links.index[n.buses.loc[n.links["bus"+i], "carrier"] == carrier] all_links = n.links.index[n.buses.loc[n.links["bus" + i], "carrier"] == carrier]
techs = n.links.loc[all_links, "carrier"].value_counts().index techs = n.links.loc[all_links, "carrier"].value_counts().index
@ -478,39 +575,51 @@ def calculate_market_values(n, label, market_values):
for tech in techs: for tech in techs:
links = all_links[n.links.loc[all_links, "carrier"] == tech] links = all_links[n.links.loc[all_links, "carrier"] == tech]
dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links, "bus"+i], axis=1).sum().reindex(columns=buses, fill_value=0.) dispatch = (
n.links_t["p" + i][links]
.groupby(n.links.loc[links, "bus" + i], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses] revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech,label] = revenue.sum().sum() / dispatch.sum().sum() market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
return market_values return market_values
def calculate_price_statistics(n, label, price_statistics): def calculate_price_statistics(n, label, price_statistics):
price_statistics = price_statistics.reindex(
price_statistics.index.union(
price_statistics = price_statistics.reindex(price_statistics.index.union(pd.Index(["zero_hours", "mean", "standard_deviation"]))) pd.Index(["zero_hours", "mean", "standard_deviation"])
)
)
buses = n.buses.index[n.buses.carrier == "AC"] buses = n.buses.index[n.buses.carrier == "AC"]
threshold = 0.1 # higher than phoney marginal_cost of wind/solar threshold = 0.1 # higher than phoney marginal_cost of wind/solar
df = pd.DataFrame(data=0., columns=buses, index=n.snapshots) df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots)
df[n.buses_t.marginal_price[buses] < threshold] = 1. df[n.buses_t.marginal_price[buses] < threshold] = 1.0
price_statistics.at["zero_hours", label] = df.sum().sum() / (df.shape[0] * df.shape[1]) price_statistics.at["zero_hours", label] = df.sum().sum() / (
df.shape[0] * df.shape[1]
)
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean() price_statistics.at["mean", label] = (
n.buses_t.marginal_price[buses].unstack().mean()
)
price_statistics.at["standard_deviation", label] = n.buses_t.marginal_price[buses].unstack().std() price_statistics.at["standard_deviation", label] = (
n.buses_t.marginal_price[buses].unstack().std()
)
return price_statistics return price_statistics
def make_summaries(networks_dict): def make_summaries(networks_dict):
outputs = [ outputs = [
"nodal_costs", "nodal_costs",
"nodal_capacities", "nodal_capacities",
@ -530,8 +639,7 @@ def make_summaries(networks_dict):
] ]
columns = pd.MultiIndex.from_tuples( columns = pd.MultiIndex.from_tuples(
networks_dict.keys(), networks_dict.keys(), names=["cluster", "lv", "opt", "planning_horizon"]
names=["cluster", "lv", "opt", "planning_horizon"]
) )
df = {} df = {}
@ -560,41 +668,48 @@ def to_csv(df):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('make_summary')
snakemake = mock_snakemake("make_summary")
logging.basicConfig(level=snakemake.config['logging_level'])
logging.basicConfig(level=snakemake.config["logging_level"])
networks_dict = { networks_dict = {
(cluster, lv, opt+sector_opt, planning_horizon) : (cluster, lv, opt + sector_opt, planning_horizon): snakemake.config[
snakemake.config['results_dir'] + snakemake.config['run'] + f'/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc' \ "results_dir"
for simpl in snakemake.config['scenario']['simpl'] \ ]
for cluster in snakemake.config['scenario']['clusters'] \ + snakemake.config["run"]
for opt in snakemake.config['scenario']['opts'] \ + f"/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc"
for sector_opt in snakemake.config['scenario']['sector_opts'] \ for simpl in snakemake.config["scenario"]["simpl"]
for lv in snakemake.config['scenario']['lv'] \ for cluster in snakemake.config["scenario"]["clusters"]
for planning_horizon in snakemake.config['scenario']['planning_horizons'] for opt in snakemake.config["scenario"]["opts"]
for sector_opt in snakemake.config["scenario"]["sector_opts"]
for lv in snakemake.config["scenario"]["lv"]
for planning_horizon in snakemake.config["scenario"]["planning_horizons"]
} }
Nyears = 1 Nyears = 1
costs_db = prepare_costs( costs_db = prepare_costs(
snakemake.input.costs, snakemake.input.costs,
snakemake.config['costs']['USD2013_to_EUR2013'], snakemake.config["costs"]["USD2013_to_EUR2013"],
snakemake.config['costs']['discountrate'], snakemake.config["costs"]["discountrate"],
Nyears, Nyears,
snakemake.config['costs']['lifetime'] snakemake.config["costs"]["lifetime"],
) )
df = make_summaries(networks_dict) df = make_summaries(networks_dict)
df["metrics"].loc["total costs"] = df["costs"].sum() df["metrics"].loc["total costs"] = df["costs"].sum()
to_csv(df) to_csv(df)
if snakemake.config["foresight"]=='myopic': if snakemake.config["foresight"] == "myopic":
cumulative_cost=calculate_cumulative_cost() cumulative_cost = calculate_cumulative_cost()
cumulative_cost.to_csv(snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/csvs/cumulative_cost.csv') cumulative_cost.to_csv(
snakemake.config["summary_dir"]
+ "/"
+ snakemake.config["run"]
+ "/csvs/cumulative_cost.csv"
)

View File

@ -1,20 +1,19 @@
# -*- coding: utf-8 -*-
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import pypsa import cartopy.crs as ccrs
import pandas as pd
import geopandas as gpd import geopandas as gpd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import cartopy.crs as ccrs import pandas as pd
import pypsa
from pypsa.plot import add_legend_circles, add_legend_patches, add_legend_lines
from make_summary import assign_carriers
from plot_summary import rename_techs, preferred_order
from helper import override_component_attrs from helper import override_component_attrs
from make_summary import assign_carriers
from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
plt.style.use(['ggplot', "matplotlibrc"]) plt.style.use(["ggplot", "matplotlibrc"])
def rename_techs_tyndp(tech): def rename_techs_tyndp(tech):
@ -46,15 +45,20 @@ def assign_location(n):
ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
for i in ifind.value_counts().index: for i in ifind.value_counts().index:
# these have already been assigned defaults # these have already been assigned defaults
if i == -1: continue if i == -1:
continue
names = ifind.index[ifind == i] names = ifind.index[ifind == i]
c.df.loc[names, 'location'] = names.str[:i] c.df.loc[names, "location"] = names.str[:i]
def plot_map(network, components=["links", "stores", "storage_units", "generators"], def plot_map(
bus_size_factor=1.7e10, transmission=False, with_legend=True): network,
components=["links", "stores", "storage_units", "generators"],
tech_colors = snakemake.config['plotting']['tech_colors'] bus_size_factor=1.7e10,
transmission=False,
with_legend=True,
):
tech_colors = snakemake.config["plotting"]["tech_colors"]
n = network.copy() n = network.copy()
assign_location(n) assign_location(n)
@ -73,19 +77,24 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" attr = "e_nom_opt" if comp == "stores" else "p_nom_opt"
costs_c = ((df_c.capital_cost * df_c[attr]) costs_c = (
.groupby([df_c.location, df_c.nice_group]).sum() (df_c.capital_cost * df_c[attr])
.unstack().fillna(0.)) .groupby([df_c.location, df_c.nice_group])
.sum()
.unstack()
.fillna(0.0)
)
costs = pd.concat([costs, costs_c], axis=1) costs = pd.concat([costs, costs_c], axis=1)
logger.debug(f"{comp}, {costs}") logger.debug(f"{comp}, {costs}")
costs = costs.groupby(costs.columns, axis=1).sum() costs = costs.groupby(costs.columns, axis=1).sum()
costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True) costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True)
new_columns = (preferred_order.intersection(costs.columns) new_columns = preferred_order.intersection(costs.columns).append(
.append(costs.columns.difference(preferred_order))) costs.columns.difference(preferred_order)
)
costs = costs[new_columns] costs = costs[new_columns]
for item in new_columns: for item in new_columns:
@ -95,12 +104,16 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
costs = costs.stack() # .sort_index() costs = costs.stack() # .sort_index()
# hack because impossible to drop buses... # hack because impossible to drop buses...
eu_location = snakemake.config["plotting"].get("eu_node_location", dict(x=-5.5, y=46)) eu_location = snakemake.config["plotting"].get(
"eu_node_location", dict(x=-5.5, y=46)
)
n.buses.loc["EU gas", "x"] = eu_location["x"] n.buses.loc["EU gas", "x"] = eu_location["x"]
n.buses.loc["EU gas", "y"] = eu_location["y"] n.buses.loc["EU gas", "y"] = eu_location["y"]
n.links.drop(n.links.index[(n.links.carrier != "DC") & ( n.links.drop(
n.links.carrier != "B2B")], inplace=True) n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
inplace=True,
)
# drop non-bus # drop non-bus
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
@ -111,13 +124,13 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
# make sure they are removed from index # make sure they are removed from index
costs.index = pd.MultiIndex.from_tuples(costs.index.values) costs.index = pd.MultiIndex.from_tuples(costs.index.values)
threshold = 100e6 # 100 mEUR/a threshold = 100e6 # 100 mEUR/a
carriers = costs.groupby(level=1).sum() carriers = costs.groupby(level=1).sum()
carriers = carriers.where(carriers > threshold).dropna() carriers = carriers.where(carriers > threshold).dropna()
carriers = list(carriers.index) carriers = list(carriers.index)
# PDF has minimum width, so set these to zero # PDF has minimum width, so set these to zero
line_lower_threshold = 500. line_lower_threshold = 500.0
line_upper_threshold = 1e4 line_upper_threshold = 1e4
linewidth_factor = 4e3 linewidth_factor = 4e3
ac_color = "rosybrown" ac_color = "rosybrown"
@ -133,7 +146,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
line_widths = n.lines.s_nom_opt line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt link_widths = n.links.p_nom_opt
linewidth_factor = 2e3 linewidth_factor = 2e3
line_lower_threshold = 0. line_lower_threshold = 0.0
title = "current grid" title = "current grid"
else: else:
line_widths = n.lines.s_nom_opt - n.lines.s_nom_min line_widths = n.lines.s_nom_opt - n.lines.s_nom_min
@ -144,12 +157,12 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
line_widths = n.lines.s_nom_opt line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt link_widths = n.links.p_nom_opt
title = "total grid" title = "total grid"
line_widths = line_widths.clip(line_lower_threshold,line_upper_threshold)
link_widths = link_widths.clip(line_lower_threshold,line_upper_threshold)
line_widths = line_widths.replace(line_lower_threshold,0) line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold)
link_widths = link_widths.replace(line_lower_threshold,0) link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold)
line_widths = line_widths.replace(line_lower_threshold, 0)
link_widths = link_widths.replace(line_lower_threshold, 0)
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.EqualEarth()}) fig, ax = plt.subplots(subplot_kw={"projection": ccrs.EqualEarth()})
fig.set_size_inches(7, 6) fig.set_size_inches(7, 6)
@ -161,12 +174,13 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
link_colors=dc_color, link_colors=dc_color,
line_widths=line_widths / linewidth_factor, line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor, link_widths=link_widths / linewidth_factor,
ax=ax, **map_opts ax=ax,
**map_opts,
) )
sizes = [20, 10, 5] sizes = [20, 10, 5]
labels = [f"{s} bEUR/a" for s in sizes] labels = [f"{s} bEUR/a" for s in sizes]
sizes = [s/bus_size_factor*1e9 for s in sizes] sizes = [s / bus_size_factor * 1e9 for s in sizes]
legend_kw = dict( legend_kw = dict(
loc="upper left", loc="upper left",
@ -174,7 +188,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
labelspacing=0.8, labelspacing=0.8,
frameon=False, frameon=False,
handletextpad=0, handletextpad=0,
title='system cost', title="system cost",
) )
add_legend_circles( add_legend_circles(
@ -183,13 +197,13 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
labels, labels,
srid=n.srid, srid=n.srid,
patch_kw=dict(facecolor="lightgrey"), patch_kw=dict(facecolor="lightgrey"),
legend_kw=legend_kw legend_kw=legend_kw,
) )
sizes = [10, 5] sizes = [10, 5]
labels = [f"{s} GW" for s in sizes] labels = [f"{s} GW" for s in sizes]
scale = 1e3 / linewidth_factor scale = 1e3 / linewidth_factor
sizes = [s*scale for s in sizes] sizes = [s * scale for s in sizes]
legend_kw = dict( legend_kw = dict(
loc="upper left", loc="upper left",
@ -197,24 +211,19 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
frameon=False, frameon=False,
labelspacing=0.8, labelspacing=0.8,
handletextpad=1, handletextpad=1,
title=title title=title,
) )
add_legend_lines( add_legend_lines(
ax, ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw
sizes,
labels,
patch_kw=dict(color='lightgrey'),
legend_kw=legend_kw
) )
legend_kw = dict( legend_kw = dict(
bbox_to_anchor=(1.52, 1.04), bbox_to_anchor=(1.52, 1.04),
frameon=False, frameon=False,
) )
if with_legend: if with_legend:
colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color]
labels = carriers + ["HVAC line", "HVDC link"] labels = carriers + ["HVAC line", "HVDC link"]
@ -225,14 +234,12 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
legend_kw=legend_kw, legend_kw=legend_kw,
) )
fig.savefig( fig.savefig(snakemake.output.map, transparent=True, bbox_inches="tight")
snakemake.output.map,
transparent=True,
bbox_inches="tight"
)
def group_pipes(df, drop_direction=False): def group_pipes(df, drop_direction=False):
"""Group pipes which connect same buses and return overall capacity. """
Group pipes which connect same buses and return overall capacity.
""" """
if drop_direction: if drop_direction:
positive_order = df.bus0 < df.bus1 positive_order = df.bus0 < df.bus1
@ -244,16 +251,17 @@ def group_pipes(df, drop_direction=False):
# there are pipes for each investment period rename to AC buses name for plotting # there are pipes for each investment period rename to AC buses name for plotting
df.index = df.apply( df.index = df.apply(
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
axis=1 axis=1,
) )
# group pipe lines connecting the same buses and rename them for plotting # group pipe lines connecting the same buses and rename them for plotting
pipe_capacity = df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"}) pipe_capacity = df.groupby(level=0).agg(
{"p_nom_opt": sum, "bus0": "first", "bus1": "first"}
)
return pipe_capacity return pipe_capacity
def plot_h2_map(network, regions): def plot_h2_map(network, regions):
n = network.copy() n = network.copy()
if "H2 pipeline" not in n.links.carrier.unique(): if "H2 pipeline" not in n.links.carrier.unique():
return return
@ -261,7 +269,11 @@ def plot_h2_map(network, regions):
assign_location(n) assign_location(n)
h2_storage = n.stores.query("carrier == 'H2'") h2_storage = n.stores.query("carrier == 'H2'")
regions["H2"] = h2_storage.rename(index=h2_storage.bus.map(n.buses.location)).e_nom_opt.div(1e6) # TWh regions["H2"] = h2_storage.rename(
index=h2_storage.bus.map(n.buses.location)
).e_nom_opt.div(
1e6
) # TWh
regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) regions["H2"] = regions["H2"].where(regions["H2"] > 0.1)
bus_size_factor = 1e5 bus_size_factor = 1e5
@ -276,26 +288,33 @@ def plot_h2_map(network, regions):
elec = n.links[n.links.carrier.isin(carriers)].index elec = n.links[n.links.carrier.isin(carriers)].index
bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() / bus_size_factor bus_sizes = (
n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum()
/ bus_size_factor
)
# make a fake MultiIndex so that area is correct for legend # make a fake MultiIndex so that area is correct for legend
bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True)
# drop all links which are not H2 pipelines # drop all links which are not H2 pipelines
n.links.drop(n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True) n.links.drop(
n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True
)
h2_new = n.links[n.links.carrier=="H2 pipeline"] h2_new = n.links[n.links.carrier == "H2 pipeline"]
h2_retro = n.links[n.links.carrier=='H2 pipeline retrofitted'] h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"]
if snakemake.config['foresight'] == 'myopic': if snakemake.config["foresight"] == "myopic":
# sum capacitiy for pipelines from different investment periods # sum capacitiy for pipelines from different investment periods
h2_new = group_pipes(h2_new) h2_new = group_pipes(h2_new)
if not h2_retro.empty: if not h2_retro.empty:
h2_retro = group_pipes(h2_retro, drop_direction=True).reindex(h2_new.index).fillna(0) h2_retro = (
group_pipes(h2_retro, drop_direction=True)
.reindex(h2_new.index)
.fillna(0)
)
if not h2_retro.empty: if not h2_retro.empty:
positive_order = h2_retro.bus0 < h2_retro.bus1 positive_order = h2_retro.bus0 < h2_retro.bus1
h2_retro_p = h2_retro[positive_order] h2_retro_p = h2_retro[positive_order]
swap_buses = {"bus0": "bus1", "bus1": "bus0"} swap_buses = {"bus0": "bus1", "bus1": "bus0"}
@ -305,7 +324,7 @@ def plot_h2_map(network, regions):
h2_retro["index_orig"] = h2_retro.index h2_retro["index_orig"] = h2_retro.index
h2_retro.index = h2_retro.apply( h2_retro.index = h2_retro.apply(
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
axis=1 axis=1,
) )
retro_w_new_i = h2_retro.index.intersection(h2_new.index) retro_w_new_i = h2_retro.index.intersection(h2_new.index)
@ -319,19 +338,20 @@ def plot_h2_map(network, regions):
h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum()
else: else:
h2_total = h2_new.p_nom_opt h2_total = h2_new.p_nom_opt
link_widths_total = h2_total / linewidth_factor link_widths_total = h2_total / linewidth_factor
n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) n.links.rename(index=lambda x: x.split("-2")[0], inplace=True)
n.links = n.links.groupby(level=0).first() n.links = n.links.groupby(level=0).first()
link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.) link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0)
link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0. link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0
retro = n.links.p_nom_opt.where(n.links.carrier=='H2 pipeline retrofitted', other=0.) retro = n.links.p_nom_opt.where(
n.links.carrier == "H2 pipeline retrofitted", other=0.0
)
link_widths_retro = retro / linewidth_factor link_widths_retro = retro / linewidth_factor
link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0. link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0
n.links.bus0 = n.links.bus0.str.replace(" H2", "") n.links.bus0 = n.links.bus0.str.replace(" H2", "")
n.links.bus1 = n.links.bus1.str.replace(" H2", "") n.links.bus1 = n.links.bus1.str.replace(" H2", "")
@ -339,18 +359,12 @@ def plot_h2_map(network, regions):
proj = ccrs.EqualEarth() proj = ccrs.EqualEarth()
regions = regions.to_crs(proj.proj4_init) regions = regions.to_crs(proj.proj4_init)
fig, ax = plt.subplots( fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj})
figsize=(7, 6),
subplot_kw={"projection": proj}
)
color_h2_pipe = '#b3f3f4' color_h2_pipe = "#b3f3f4"
color_retrofit = '#499a9c' color_retrofit = "#499a9c"
bus_colors = { bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"}
"H2 Electrolysis": "#ff29d9",
"H2 Fuel Cell": '#805394'
}
n.plot( n.plot(
geomap=True, geomap=True,
@ -360,7 +374,7 @@ def plot_h2_map(network, regions):
link_widths=link_widths_total, link_widths=link_widths_total,
branch_components=["Link"], branch_components=["Link"],
ax=ax, ax=ax,
**map_opts **map_opts,
) )
n.plot( n.plot(
@ -371,13 +385,13 @@ def plot_h2_map(network, regions):
branch_components=["Link"], branch_components=["Link"],
ax=ax, ax=ax,
color_geomap=False, color_geomap=False,
boundaries=map_opts["boundaries"] boundaries=map_opts["boundaries"],
) )
regions.plot( regions.plot(
ax=ax, ax=ax,
column="H2", column="H2",
cmap='Blues', cmap="Blues",
linewidths=0, linewidths=0,
legend=True, legend=True,
vmax=6, vmax=6,
@ -391,7 +405,7 @@ def plot_h2_map(network, regions):
sizes = [50, 10] sizes = [50, 10]
labels = [f"{s} GW" for s in sizes] labels = [f"{s} GW" for s in sizes]
sizes = [s/bus_size_factor*1e3 for s in sizes] sizes = [s / bus_size_factor * 1e3 for s in sizes]
legend_kw = dict( legend_kw = dict(
loc="upper left", loc="upper left",
@ -401,16 +415,19 @@ def plot_h2_map(network, regions):
frameon=False, frameon=False,
) )
add_legend_circles(ax, sizes, labels, add_legend_circles(
ax,
sizes,
labels,
srid=n.srid, srid=n.srid,
patch_kw=dict(facecolor='lightgrey'), patch_kw=dict(facecolor="lightgrey"),
legend_kw=legend_kw legend_kw=legend_kw,
) )
sizes = [30, 10] sizes = [30, 10]
labels = [f"{s} GW" for s in sizes] labels = [f"{s} GW" for s in sizes]
scale = 1e3 / linewidth_factor scale = 1e3 / linewidth_factor
sizes = [s*scale for s in sizes] sizes = [s * scale for s in sizes]
legend_kw = dict( legend_kw = dict(
loc="upper left", loc="upper left",
@ -424,7 +441,7 @@ def plot_h2_map(network, regions):
ax, ax,
sizes, sizes,
labels, labels,
patch_kw=dict(color='lightgrey'), patch_kw=dict(color="lightgrey"),
legend_kw=legend_kw, legend_kw=legend_kw,
) )
@ -438,23 +455,16 @@ def plot_h2_map(network, regions):
frameon=False, frameon=False,
) )
add_legend_patches( add_legend_patches(ax, colors, labels, legend_kw=legend_kw)
ax,
colors,
labels,
legend_kw=legend_kw
)
ax.set_facecolor("white") ax.set_facecolor("white")
fig.savefig( fig.savefig(
snakemake.output.map.replace("-costs-all","-h2_network"), snakemake.output.map.replace("-costs-all", "-h2_network"), bbox_inches="tight"
bbox_inches="tight"
) )
def plot_ch4_map(network): def plot_ch4_map(network):
n = network.copy() n = network.copy()
if "gas pipeline" not in n.links.carrier.unique(): if "gas pipeline" not in n.links.carrier.unique():
@ -470,22 +480,54 @@ def plot_ch4_map(network):
# Drop non-electric buses so they don't clutter the plot # Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
fossil_gas_i = n.generators[n.generators.carrier=="gas"].index fossil_gas_i = n.generators[n.generators.carrier == "gas"].index
fossil_gas = n.generators_t.p.loc[:,fossil_gas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.generators.loc[fossil_gas_i,"bus"]).sum() / bus_size_factor fossil_gas = (
n.generators_t.p.loc[:, fossil_gas_i]
.mul(n.snapshot_weightings.generators, axis=0)
.sum()
.groupby(n.generators.loc[fossil_gas_i, "bus"])
.sum()
/ bus_size_factor
)
fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True)
fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0)
# make a fake MultiIndex so that area is correct for legend # make a fake MultiIndex so that area is correct for legend
fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]])
methanation_i = n.links[n.links.carrier.isin(["helmeth", "Sabatier"])].index methanation_i = n.links[n.links.carrier.isin(["helmeth", "Sabatier"])].index
methanation = abs(n.links_t.p1.loc[:,methanation_i].mul(n.snapshot_weightings.generators, axis=0)).sum().groupby(n.links.loc[methanation_i,"bus1"]).sum() / bus_size_factor methanation = (
methanation = methanation.groupby(methanation.index).sum().rename(index=lambda x: x.replace(" gas", "")) abs(
n.links_t.p1.loc[:, methanation_i].mul(
n.snapshot_weightings.generators, axis=0
)
)
.sum()
.groupby(n.links.loc[methanation_i, "bus1"])
.sum()
/ bus_size_factor
)
methanation = (
methanation.groupby(methanation.index)
.sum()
.rename(index=lambda x: x.replace(" gas", ""))
)
# make a fake MultiIndex so that area is correct for legend # make a fake MultiIndex so that area is correct for legend
methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]])
biogas_i = n.stores[n.stores.carrier=="biogas"].index biogas_i = n.stores[n.stores.carrier == "biogas"].index
biogas = n.stores_t.p.loc[:,biogas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.stores.loc[biogas_i,"bus"]).sum() / bus_size_factor biogas = (
biogas = biogas.groupby(biogas.index).sum().rename(index=lambda x: x.replace(" biogas", "")) n.stores_t.p.loc[:, biogas_i]
.mul(n.snapshot_weightings.generators, axis=0)
.sum()
.groupby(n.stores.loc[biogas_i, "bus"])
.sum()
/ bus_size_factor
)
biogas = (
biogas.groupby(biogas.index)
.sum()
.rename(index=lambda x: x.replace(" biogas", ""))
)
# make a fake MultiIndex so that area is correct for legend # make a fake MultiIndex so that area is correct for legend
biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]])
@ -496,22 +538,22 @@ def plot_ch4_map(network):
n.links.drop(to_remove, inplace=True) n.links.drop(to_remove, inplace=True)
link_widths_rem = n.links.p_nom_opt / linewidth_factor link_widths_rem = n.links.p_nom_opt / linewidth_factor
link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0. link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0
link_widths_orig = n.links.p_nom / linewidth_factor link_widths_orig = n.links.p_nom / linewidth_factor
link_widths_orig[n.links.p_nom < line_lower_threshold] = 0. link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0
max_usage = n.links_t.p0.abs().max(axis=0) max_usage = n.links_t.p0.abs().max(axis=0)
link_widths_used = max_usage / linewidth_factor link_widths_used = max_usage / linewidth_factor
link_widths_used[max_usage < line_lower_threshold] = 0. link_widths_used[max_usage < line_lower_threshold] = 0.0
tech_colors = snakemake.config['plotting']['tech_colors'] tech_colors = snakemake.config["plotting"]["tech_colors"]
pipe_colors = { pipe_colors = {
"gas pipeline": "#f08080", "gas pipeline": "#f08080",
"gas pipeline new": "#c46868", "gas pipeline new": "#c46868",
"gas pipeline (in 2020)": 'lightgrey', "gas pipeline (in 2020)": "lightgrey",
"gas pipeline (available)": '#e8d1d1', "gas pipeline (available)": "#e8d1d1",
} }
link_color_used = n.links.carrier.map(pipe_colors) link_color_used = n.links.carrier.map(pipe_colors)
@ -522,88 +564,88 @@ def plot_ch4_map(network):
bus_colors = { bus_colors = {
"fossil gas": tech_colors["fossil gas"], "fossil gas": tech_colors["fossil gas"],
"methanation": tech_colors["methanation"], "methanation": tech_colors["methanation"],
"biogas": "seagreen" "biogas": "seagreen",
} }
fig, ax = plt.subplots(figsize=(7,6), subplot_kw={"projection": ccrs.EqualEarth()}) fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()})
n.plot( n.plot(
bus_sizes=bus_sizes, bus_sizes=bus_sizes,
bus_colors=bus_colors, bus_colors=bus_colors,
link_colors=pipe_colors['gas pipeline (in 2020)'], link_colors=pipe_colors["gas pipeline (in 2020)"],
link_widths=link_widths_orig, link_widths=link_widths_orig,
branch_components=["Link"], branch_components=["Link"],
ax=ax, ax=ax,
**map_opts **map_opts,
) )
n.plot( n.plot(
ax=ax, ax=ax,
bus_sizes=0., bus_sizes=0.0,
link_colors=pipe_colors['gas pipeline (available)'], link_colors=pipe_colors["gas pipeline (available)"],
link_widths=link_widths_rem, link_widths=link_widths_rem,
branch_components=["Link"], branch_components=["Link"],
color_geomap=False, color_geomap=False,
boundaries=map_opts["boundaries"] boundaries=map_opts["boundaries"],
) )
n.plot( n.plot(
ax=ax, ax=ax,
bus_sizes=0., bus_sizes=0.0,
link_colors=link_color_used, link_colors=link_color_used,
link_widths=link_widths_used, link_widths=link_widths_used,
branch_components=["Link"], branch_components=["Link"],
color_geomap=False, color_geomap=False,
boundaries=map_opts["boundaries"] boundaries=map_opts["boundaries"],
) )
sizes = [100, 10] sizes = [100, 10]
labels = [f"{s} TWh" for s in sizes] labels = [f"{s} TWh" for s in sizes]
sizes = [s/bus_size_factor*1e6 for s in sizes] sizes = [s / bus_size_factor * 1e6 for s in sizes]
legend_kw = dict( legend_kw = dict(
loc="upper left", loc="upper left",
bbox_to_anchor=(0, 1.03), bbox_to_anchor=(0, 1.03),
labelspacing=0.8, labelspacing=0.8,
frameon=False, frameon=False,
handletextpad=1, handletextpad=1,
title='gas sources', title="gas sources",
) )
add_legend_circles( add_legend_circles(
ax, ax,
sizes, sizes,
labels, labels,
srid=n.srid, srid=n.srid,
patch_kw=dict(facecolor='lightgrey'), patch_kw=dict(facecolor="lightgrey"),
legend_kw=legend_kw, legend_kw=legend_kw,
) )
sizes = [50, 10] sizes = [50, 10]
labels = [f"{s} GW" for s in sizes] labels = [f"{s} GW" for s in sizes]
scale = 1e3 / linewidth_factor scale = 1e3 / linewidth_factor
sizes = [s*scale for s in sizes] sizes = [s * scale for s in sizes]
legend_kw = dict( legend_kw = dict(
loc="upper left", loc="upper left",
bbox_to_anchor=(0.25, 1.03), bbox_to_anchor=(0.25, 1.03),
frameon=False, frameon=False,
labelspacing=0.8, labelspacing=0.8,
handletextpad=1, handletextpad=1,
title='gas pipeline' title="gas pipeline",
) )
add_legend_lines( add_legend_lines(
ax, ax,
sizes, sizes,
labels, labels,
patch_kw=dict(color='lightgrey'), patch_kw=dict(color="lightgrey"),
legend_kw=legend_kw, legend_kw=legend_kw,
) )
colors = list(pipe_colors.values()) + list(bus_colors.values()) colors = list(pipe_colors.values()) + list(bus_colors.values())
labels = list(pipe_colors.keys()) + list(bus_colors.keys()) labels = list(pipe_colors.keys()) + list(bus_colors.keys())
# legend on the side # legend on the side
# legend_kw = dict( # legend_kw = dict(
# bbox_to_anchor=(1.47, 1.04), # bbox_to_anchor=(1.47, 1.04),
@ -611,7 +653,7 @@ def plot_ch4_map(network):
# ) # )
legend_kw = dict( legend_kw = dict(
loc='upper left', loc="upper left",
bbox_to_anchor=(0, 1.24), bbox_to_anchor=(0, 1.24),
ncol=2, ncol=2,
frameon=False, frameon=False,
@ -625,26 +667,21 @@ def plot_ch4_map(network):
) )
fig.savefig( fig.savefig(
snakemake.output.map.replace("-costs-all","-ch4_network"), snakemake.output.map.replace("-costs-all", "-ch4_network"), bbox_inches="tight"
bbox_inches="tight"
) )
def plot_map_without(network): def plot_map_without(network):
n = network.copy() n = network.copy()
assign_location(n) assign_location(n)
# Drop non-electric buses so they don't clutter the plot # Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
fig, ax = plt.subplots( fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()})
figsize=(7, 6),
subplot_kw={"projection": ccrs.EqualEarth()}
)
# PDF has minimum width, so set these to zero # PDF has minimum width, so set these to zero
line_lower_threshold = 200. line_lower_threshold = 200.0
line_upper_threshold = 1e4 line_upper_threshold = 1e4
linewidth_factor = 3e3 linewidth_factor = 3e3
ac_color = "rosybrown" ac_color = "rosybrown"
@ -652,9 +689,11 @@ def plot_map_without(network):
# hack because impossible to drop buses... # hack because impossible to drop buses...
if "EU gas" in n.buses.index: if "EU gas" in n.buses.index:
eu_location = snakemake.config["plotting"].get("eu_node_location", dict(x=-5.5, y=46)) eu_location = snakemake.config["plotting"].get(
n.buses.loc["EU gas", "x"] = eu_location["x"] "eu_node_location", dict(x=-5.5, y=46)
n.buses.loc["EU gas", "y"] = eu_location["y"] )
n.buses.loc["EU gas", "x"] = eu_location["x"]
n.buses.loc["EU gas", "y"] = eu_location["y"]
to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")] to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")]
n.links.drop(to_drop, inplace=True) n.links.drop(to_drop, inplace=True)
@ -666,11 +705,11 @@ def plot_map_without(network):
line_widths = n.lines.s_nom_min line_widths = n.lines.s_nom_min
link_widths = n.links.p_nom_min link_widths = n.links.p_nom_min
line_widths = line_widths.clip(line_lower_threshold,line_upper_threshold) line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold)
link_widths = link_widths.clip(line_lower_threshold,line_upper_threshold) link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold)
line_widths = line_widths.replace(line_lower_threshold,0) line_widths = line_widths.replace(line_lower_threshold, 0)
link_widths = link_widths.replace(line_lower_threshold,0) link_widths = link_widths.replace(line_lower_threshold, 0)
n.plot( n.plot(
bus_colors="k", bus_colors="k",
@ -678,32 +717,34 @@ def plot_map_without(network):
link_colors=dc_color, link_colors=dc_color,
line_widths=line_widths / linewidth_factor, line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor, link_widths=link_widths / linewidth_factor,
ax=ax, **map_opts ax=ax,
**map_opts,
) )
handles = [] handles = []
labels = [] labels = []
for s in (10, 5): for s in (10, 5):
handles.append(plt.Line2D([0], [0], color=ac_color, handles.append(
linewidth=s * 1e3 / linewidth_factor)) plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor)
)
labels.append(f"{s} GW") labels.append(f"{s} GW")
l1_1 = ax.legend(handles, labels, l1_1 = ax.legend(
loc="upper left", bbox_to_anchor=(0.05, 1.01), handles,
frameon=False, labels,
labelspacing=0.8, handletextpad=1.5, loc="upper left",
title='Today\'s transmission') bbox_to_anchor=(0.05, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title="Today's transmission",
)
ax.add_artist(l1_1) ax.add_artist(l1_1)
fig.savefig( fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight")
snakemake.output.today,
transparent=True,
bbox_inches="tight"
)
def plot_series(network, carrier="AC", name="test"): def plot_series(network, carrier="AC", name="test"):
n = network.copy() n = network.copy()
assign_location(n) assign_location(n)
assign_carriers(n) assign_carriers(n)
@ -712,28 +753,41 @@ def plot_series(network, carrier="AC", name="test"):
supply = pd.DataFrame(index=n.snapshots) supply = pd.DataFrame(index=n.snapshots)
for c in n.iterate_components(n.branch_components): for c in n.iterate_components(n.branch_components):
n_port = 4 if c.name=='Link' else 2 n_port = 4 if c.name == "Link" else 2
for i in range(n_port): for i in range(n_port):
supply = pd.concat((supply, supply = pd.concat(
(-1) * c.pnl["p" + str(i)].loc[:, (
c.df.index[c.df["bus" + str(i)].isin(buses)]].groupby(c.df.carrier, supply,
axis=1).sum()), (-1)
axis=1) * c.pnl["p" + str(i)]
.loc[:, c.df.index[c.df["bus" + str(i)].isin(buses)]]
.groupby(c.df.carrier, axis=1)
.sum(),
),
axis=1,
)
for c in n.iterate_components(n.one_port_components): for c in n.iterate_components(n.one_port_components):
comps = c.df.index[c.df.bus.isin(buses)] comps = c.df.index[c.df.bus.isin(buses)]
supply = pd.concat((supply, ((c.pnl["p"].loc[:, comps]).multiply( supply = pd.concat(
c.df.loc[comps, "sign"])).groupby(c.df.carrier, axis=1).sum()), axis=1) (
supply,
((c.pnl["p"].loc[:, comps]).multiply(c.df.loc[comps, "sign"]))
.groupby(c.df.carrier, axis=1)
.sum(),
),
axis=1,
)
supply = supply.groupby(rename_techs_tyndp, axis=1).sum() supply = supply.groupby(rename_techs_tyndp, axis=1).sum()
both = supply.columns[(supply < 0.).any() & (supply > 0.).any()] both = supply.columns[(supply < 0.0).any() & (supply > 0.0).any()]
positive_supply = supply[both] positive_supply = supply[both]
negative_supply = supply[both] negative_supply = supply[both]
positive_supply[positive_supply < 0.] = 0. positive_supply[positive_supply < 0.0] = 0.0
negative_supply[negative_supply > 0.] = 0. negative_supply[negative_supply > 0.0] = 0.0
supply[both] = positive_supply supply[both] = positive_supply
@ -761,48 +815,61 @@ def plot_series(network, carrier="AC", name="test"):
supply = supply / 1e3 supply = supply / 1e3
supply.rename(columns={"electricity": "electric demand", supply.rename(
"heat": "heat demand"}, columns={"electricity": "electric demand", "heat": "heat demand"}, inplace=True
inplace=True) )
supply.columns = supply.columns.str.replace("residential ", "") supply.columns = supply.columns.str.replace("residential ", "")
supply.columns = supply.columns.str.replace("services ", "") supply.columns = supply.columns.str.replace("services ", "")
supply.columns = supply.columns.str.replace("urban decentral ", "decentral ") supply.columns = supply.columns.str.replace("urban decentral ", "decentral ")
preferred_order = pd.Index(["electric demand", preferred_order = pd.Index(
"transmission lines", [
"hydroelectricity", "electric demand",
"hydro reservoir", "transmission lines",
"run of river", "hydroelectricity",
"pumped hydro storage", "hydro reservoir",
"CHP", "run of river",
"onshore wind", "pumped hydro storage",
"offshore wind", "CHP",
"solar PV", "onshore wind",
"solar thermal", "offshore wind",
"building retrofitting", "solar PV",
"ground heat pump", "solar thermal",
"air heat pump", "building retrofitting",
"resistive heater", "ground heat pump",
"OCGT", "air heat pump",
"gas boiler", "resistive heater",
"gas", "OCGT",
"natural gas", "gas boiler",
"methanation", "gas",
"hydrogen storage", "natural gas",
"battery storage", "methanation",
"hot water storage"]) "hydrogen storage",
"battery storage",
"hot water storage",
]
)
new_columns = (preferred_order.intersection(supply.columns) new_columns = preferred_order.intersection(supply.columns).append(
.append(supply.columns.difference(preferred_order))) supply.columns.difference(preferred_order)
)
supply = supply.groupby(supply.columns, axis=1).sum() supply = supply.groupby(supply.columns, axis=1).sum()
fig, ax = plt.subplots() fig, ax = plt.subplots()
fig.set_size_inches((8, 5)) fig.set_size_inches((8, 5))
(supply.loc[start:stop, new_columns] (
.plot(ax=ax, kind="area", stacked=True, linewidth=0., supply.loc[start:stop, new_columns].plot(
color=[snakemake.config['plotting']['tech_colors'][i.replace(suffix, "")] ax=ax,
for i in new_columns])) kind="area",
stacked=True,
linewidth=0.0,
color=[
snakemake.config["plotting"]["tech_colors"][i.replace(suffix, "")]
for i in new_columns
],
)
)
handles, labels = ax.get_legend_handles_labels() handles, labels = ax.get_legend_handles_labels()
@ -824,44 +891,53 @@ def plot_series(network, carrier="AC", name="test"):
ax.set_ylabel("Power [GW]") ax.set_ylabel("Power [GW]")
fig.tight_layout() fig.tight_layout()
fig.savefig("{}{}/maps/series-{}-{}-{}-{}-{}.pdf".format( fig.savefig(
snakemake.config['results_dir'], snakemake.config['run'], "{}{}/maps/series-{}-{}-{}-{}-{}.pdf".format(
snakemake.wildcards["lv"], snakemake.config["results_dir"],
carrier, start, stop, name), snakemake.config["run"],
transparent=True) snakemake.wildcards["lv"],
carrier,
start,
stop,
name,
),
transparent=True,
)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'plot_network', "plot_network",
simpl='', simpl="",
clusters="181", clusters="181",
lv='opt', lv="opt",
opts='', opts="",
sector_opts='Co2L0-730H-T-H-B-I-A-solar+p3-linemaxext10', sector_opts="Co2L0-730H-T-H-B-I-A-solar+p3-linemaxext10",
planning_horizons="2050", planning_horizons="2050",
) )
logging.basicConfig(level=snakemake.config['logging_level']) logging.basicConfig(level=snakemake.config["logging_level"])
overrides = override_component_attrs(snakemake.input.overrides) overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
regions = gpd.read_file(snakemake.input.regions).set_index("name") regions = gpd.read_file(snakemake.input.regions).set_index("name")
map_opts = snakemake.config['plotting']['map'] map_opts = snakemake.config["plotting"]["map"]
plot_map(n, plot_map(
n,
components=["generators", "links", "stores", "storage_units"], components=["generators", "links", "stores", "storage_units"],
bus_size_factor=2e10, bus_size_factor=2e10,
transmission=False transmission=False,
) )
plot_h2_map(n, regions) plot_h2_map(n, regions)
plot_ch4_map(n) plot_ch4_map(n)
plot_map_without(n) plot_map_without(n)
#plot_series(n, carrier="AC", name=suffix) # plot_series(n, carrier="AC", name=suffix)
#plot_series(n, carrier="heat", name=suffix) # plot_series(n, carrier="heat", name=suffix)

View File

@ -1,25 +1,27 @@
# -*- coding: utf-8 -*-
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import matplotlib.pyplot as plt
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import matplotlib.pyplot as plt plt.style.use("ggplot")
plt.style.use('ggplot')
from prepare_sector_network import co2_emissions_year
from helper import update_config_with_sector_opts from helper import update_config_with_sector_opts
from prepare_sector_network import co2_emissions_year
#consolidate and rename
# consolidate and rename
def rename_techs(label): def rename_techs(label):
prefix_to_remove = [ prefix_to_remove = [
"residential ", "residential ",
"services ", "services ",
"urban ", "urban ",
"rural ", "rural ",
"central ", "central ",
"decentral " "decentral ",
] ]
rename_if_contains = [ rename_if_contains = [
@ -30,7 +32,7 @@ def rename_techs(label):
"air heat pump", "air heat pump",
"ground heat pump", "ground heat pump",
"resistive heater", "resistive heater",
"Fischer-Tropsch" "Fischer-Tropsch",
] ]
rename_if_contains_dict = { rename_if_contains_dict = {
@ -58,151 +60,159 @@ def rename_techs(label):
"co2 stored": "CO2 sequestration", "co2 stored": "CO2 sequestration",
"AC": "transmission lines", "AC": "transmission lines",
"DC": "transmission lines", "DC": "transmission lines",
"B2B": "transmission lines" "B2B": "transmission lines",
} }
for ptr in prefix_to_remove: for ptr in prefix_to_remove:
if label[:len(ptr)] == ptr: if label[: len(ptr)] == ptr:
label = label[len(ptr):] label = label[len(ptr) :]
for rif in rename_if_contains: for rif in rename_if_contains:
if rif in label: if rif in label:
label = rif label = rif
for old,new in rename_if_contains_dict.items(): for old, new in rename_if_contains_dict.items():
if old in label: if old in label:
label = new label = new
for old,new in rename.items(): for old, new in rename.items():
if old == label: if old == label:
label = new label = new
return label return label
preferred_order = pd.Index([ preferred_order = pd.Index(
"transmission lines", [
"hydroelectricity", "transmission lines",
"hydro reservoir", "hydroelectricity",
"run of river", "hydro reservoir",
"pumped hydro storage", "run of river",
"solid biomass", "pumped hydro storage",
"biogas", "solid biomass",
"onshore wind", "biogas",
"offshore wind", "onshore wind",
"offshore wind (AC)", "offshore wind",
"offshore wind (DC)", "offshore wind (AC)",
"solar PV", "offshore wind (DC)",
"solar thermal", "solar PV",
"solar rooftop", "solar thermal",
"solar", "solar rooftop",
"building retrofitting", "solar",
"ground heat pump", "building retrofitting",
"air heat pump", "ground heat pump",
"heat pump", "air heat pump",
"resistive heater", "heat pump",
"power-to-heat", "resistive heater",
"gas-to-power/heat", "power-to-heat",
"CHP", "gas-to-power/heat",
"OCGT", "CHP",
"gas boiler", "OCGT",
"gas", "gas boiler",
"natural gas", "gas",
"helmeth", "natural gas",
"methanation", "helmeth",
"ammonia", "methanation",
"hydrogen storage", "ammonia",
"power-to-gas", "hydrogen storage",
"power-to-liquid", "power-to-gas",
"battery storage", "power-to-liquid",
"hot water storage", "battery storage",
"CO2 sequestration" "hot water storage",
]) "CO2 sequestration",
]
)
def plot_costs(): def plot_costs():
cost_df = pd.read_csv( cost_df = pd.read_csv(
snakemake.input.costs, snakemake.input.costs, index_col=list(range(3)), header=list(range(n_header))
index_col=list(range(3)),
header=list(range(n_header))
) )
df = cost_df.groupby(cost_df.index.get_level_values(2)).sum() df = cost_df.groupby(cost_df.index.get_level_values(2)).sum()
#convert to billions # convert to billions
df = df / 1e9 df = df / 1e9
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.max(axis=1) < snakemake.config['plotting']['costs_threshold']] to_drop = df.index[df.max(axis=1) < snakemake.config["plotting"]["costs_threshold"]]
logger.info(f"Dropping technology with costs below {snakemake.config['plotting']['costs_threshold']} EUR billion per year") logger.info(
f"Dropping technology with costs below {snakemake.config['plotting']['costs_threshold']} EUR billion per year"
)
logger.debug(df.loc[to_drop]) logger.debug(df.loc[to_drop])
df = df.drop(to_drop) df = df.drop(to_drop)
logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year") logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year")
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) new_index = preferred_order.intersection(df.index).append(
df.index.difference(preferred_order)
)
new_columns = df.sum().sort_values().index new_columns = df.sum().sort_values().index
fig, ax = plt.subplots(figsize=(12,8)) fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index,new_columns].T.plot( df.loc[new_index, new_columns].T.plot(
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index] color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index],
) )
handles,labels = ax.get_legend_handles_labels() handles, labels = ax.get_legend_handles_labels()
handles.reverse() handles.reverse()
labels.reverse() labels.reverse()
ax.set_ylim([0,snakemake.config['plotting']['costs_max']]) ax.set_ylim([0, snakemake.config["plotting"]["costs_max"]])
ax.set_ylabel("System Cost [EUR billion per year]") ax.set_ylabel("System Cost [EUR billion per year]")
ax.set_xlabel("") ax.set_xlabel("")
ax.grid(axis='x') ax.grid(axis="x")
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1,1], frameon=False) ax.legend(
handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False
)
fig.savefig(snakemake.output.costs, bbox_inches='tight') fig.savefig(snakemake.output.costs, bbox_inches="tight")
def plot_energy(): def plot_energy():
energy_df = pd.read_csv( energy_df = pd.read_csv(
snakemake.input.energy, snakemake.input.energy, index_col=list(range(2)), header=list(range(n_header))
index_col=list(range(2)),
header=list(range(n_header))
) )
df = energy_df.groupby(energy_df.index.get_level_values(1)).sum() df = energy_df.groupby(energy_df.index.get_level_values(1)).sum()
#convert MWh to TWh # convert MWh to TWh
df = df / 1e6 df = df / 1e6
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']] to_drop = df.index[
df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"]
]
logger.info(f"Dropping all technology with energy consumption or production below {snakemake.config['plotting']['energy_threshold']} TWh/a") logger.info(
f"Dropping all technology with energy consumption or production below {snakemake.config['plotting']['energy_threshold']} TWh/a"
)
logger.debug(df.loc[to_drop]) logger.debug(df.loc[to_drop])
df = df.drop(to_drop) df = df.drop(to_drop)
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a") logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) new_index = preferred_order.intersection(df.index).append(
df.index.difference(preferred_order)
)
new_columns = df.columns.sort_values() new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12,8)) fig, ax = plt.subplots(figsize=(12, 8))
logger.debug(df.loc[new_index, new_columns]) logger.debug(df.loc[new_index, new_columns])
@ -210,15 +220,20 @@ def plot_energy():
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index] color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index],
) )
handles,labels = ax.get_legend_handles_labels() handles, labels = ax.get_legend_handles_labels()
handles.reverse() handles.reverse()
labels.reverse() labels.reverse()
ax.set_ylim([snakemake.config['plotting']['energy_min'], snakemake.config['plotting']['energy_max']]) ax.set_ylim(
[
snakemake.config["plotting"]["energy_min"],
snakemake.config["plotting"]["energy_max"],
]
)
ax.set_ylabel("Energy [TWh/a]") ax.set_ylabel("Energy [TWh/a]")
@ -226,48 +241,56 @@ def plot_energy():
ax.grid(axis="x") ax.grid(axis="x")
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False) ax.legend(
handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False
fig.savefig(snakemake.output.energy, bbox_inches='tight') )
fig.savefig(snakemake.output.energy, bbox_inches="tight")
def plot_balances(): def plot_balances():
co2_carriers = ["co2", "co2 stored", "process emissions"] co2_carriers = ["co2", "co2 stored", "process emissions"]
balances_df = pd.read_csv( balances_df = pd.read_csv(
snakemake.input.balances, snakemake.input.balances, index_col=list(range(3)), header=list(range(n_header))
index_col=list(range(3)),
header=list(range(n_header))
) )
balances = {i.replace(" ","_"): [i] for i in balances_df.index.levels[0]} balances = {i.replace(" ", "_"): [i] for i in balances_df.index.levels[0]}
balances["energy"] = [i for i in balances_df.index.levels[0] if i not in co2_carriers] balances["energy"] = [
i for i in balances_df.index.levels[0] if i not in co2_carriers
fig, ax = plt.subplots(figsize=(12,8)) ]
fig, ax = plt.subplots(figsize=(12, 8))
for k, v in balances.items(): for k, v in balances.items():
df = balances_df.loc[v] df = balances_df.loc[v]
df = df.groupby(df.index.get_level_values(2)).sum() df = df.groupby(df.index.get_level_values(2)).sum()
#convert MWh to TWh # convert MWh to TWh
df = df / 1e6 df = df / 1e6
#remove trailing link ports # remove trailing link ports
df.index = [i[:-1] if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0","1","2","3"])) else i for i in df.index] df.index = [
i[:-1]
if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"]))
else i
for i in df.index
]
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']/10] to_drop = df.index[
df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"] / 10
]
if v[0] in co2_carriers: if v[0] in co2_carriers:
units = "MtCO2/a" units = "MtCO2/a"
else: else:
units = "TWh/a" units = "TWh/a"
logger.debug(f"Dropping technology energy balance smaller than {snakemake.config['plotting']['energy_threshold']/10} {units}") logger.debug(
f"Dropping technology energy balance smaller than {snakemake.config['plotting']['energy_threshold']/10} {units}"
)
logger.debug(df.loc[to_drop]) logger.debug(df.loc[to_drop])
df = df.drop(to_drop) df = df.drop(to_drop)
@ -277,14 +300,20 @@ def plot_balances():
if df.empty: if df.empty:
continue continue
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) new_index = preferred_order.intersection(df.index).append(
df.index.difference(preferred_order)
)
new_columns = df.columns.sort_values() new_columns = df.columns.sort_values()
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index],
)
handles, labels = ax.get_legend_handles_labels()
handles,labels = ax.get_legend_handles_labels()
handles.reverse() handles.reverse()
labels.reverse() labels.reverse()
@ -298,153 +327,225 @@ def plot_balances():
ax.grid(axis="x") ax.grid(axis="x")
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False) ax.legend(
handles,
labels,
ncol=1,
loc="upper left",
bbox_to_anchor=[1, 1],
frameon=False,
)
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches='tight')
plt.cla() plt.cla()
def historical_emissions(cts): def historical_emissions(cts):
""" """
read historical emissions to add them to the carbon budget plot Read historical emissions to add them to the carbon budget plot.
""" """
#https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
#downloaded 201228 (modified by EEA last on 201221) # downloaded 201228 (modified by EEA last on 201221)
fn = "data/eea/UNFCCC_v23.csv" fn = "data/eea/UNFCCC_v23.csv"
df = pd.read_csv(fn, encoding="latin-1") df = pd.read_csv(fn, encoding="latin-1")
df.loc[df["Year"] == "1985-1987","Year"] = 1986 df.loc[df["Year"] == "1985-1987", "Year"] = 1986
df["Year"] = df["Year"].astype(int) df["Year"] = df["Year"].astype(int)
df = df.set_index(['Year', 'Sector_name', 'Country_code', 'Pollutant_name']).sort_index() df = df.set_index(
["Year", "Sector_name", "Country_code", "Pollutant_name"]
).sort_index()
e = pd.Series() e = pd.Series()
e["electricity"] = '1.A.1.a - Public Electricity and Heat Production' e["electricity"] = "1.A.1.a - Public Electricity and Heat Production"
e['residential non-elec'] = '1.A.4.b - Residential' e["residential non-elec"] = "1.A.4.b - Residential"
e['services non-elec'] = '1.A.4.a - Commercial/Institutional' e["services non-elec"] = "1.A.4.a - Commercial/Institutional"
e['rail non-elec'] = "1.A.3.c - Railways" e["rail non-elec"] = "1.A.3.c - Railways"
e["road non-elec"] = '1.A.3.b - Road Transportation' e["road non-elec"] = "1.A.3.b - Road Transportation"
e["domestic navigation"] = "1.A.3.d - Domestic Navigation" e["domestic navigation"] = "1.A.3.d - Domestic Navigation"
e['international navigation'] = '1.D.1.b - International Navigation' e["international navigation"] = "1.D.1.b - International Navigation"
e["domestic aviation"] = '1.A.3.a - Domestic Aviation' e["domestic aviation"] = "1.A.3.a - Domestic Aviation"
e["international aviation"] = '1.D.1.a - International Aviation' e["international aviation"] = "1.D.1.a - International Aviation"
e['total energy'] = '1 - Energy' e["total energy"] = "1 - Energy"
e['industrial processes'] = '2 - Industrial Processes and Product Use' e["industrial processes"] = "2 - Industrial Processes and Product Use"
e['agriculture'] = '3 - Agriculture' e["agriculture"] = "3 - Agriculture"
e['LULUCF'] = '4 - Land Use, Land-Use Change and Forestry' e["LULUCF"] = "4 - Land Use, Land-Use Change and Forestry"
e['waste management'] = '5 - Waste management' e["waste management"] = "5 - Waste management"
e['other'] = '6 - Other Sector' e["other"] = "6 - Other Sector"
e['indirect'] = 'ind_CO2 - Indirect CO2' e["indirect"] = "ind_CO2 - Indirect CO2"
e["total wL"] = "Total (with LULUCF)" e["total wL"] = "Total (with LULUCF)"
e["total woL"] = "Total (without LULUCF)" e["total woL"] = "Total (without LULUCF)"
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"] pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
cts cts
if "GB" in cts: if "GB" in cts:
cts.remove("GB") cts.remove("GB")
cts.append("UK") cts.append("UK")
year = np.arange(1990,2018).tolist() year = np.arange(1990, 2018).tolist()
idx = pd.IndexSlice idx = pd.IndexSlice
co2_totals = df.loc[idx[year,e.values,cts,pol],"emissions"].unstack("Year").rename(index=pd.Series(e.index,e.values)) co2_totals = (
df.loc[idx[year, e.values, cts, pol], "emissions"]
.unstack("Year")
.rename(index=pd.Series(e.index, e.values))
)
co2_totals = (1/1e6)*co2_totals.groupby(level=0, axis=0).sum() #Gton CO2 co2_totals = (1 / 1e6) * co2_totals.groupby(level=0, axis=0).sum() # Gton CO2
co2_totals.loc['industrial non-elec'] = co2_totals.loc['total energy'] - co2_totals.loc[['electricity', 'services non-elec','residential non-elec', 'road non-elec', co2_totals.loc["industrial non-elec"] = (
'rail non-elec', 'domestic aviation', 'international aviation', 'domestic navigation', co2_totals.loc["total energy"]
'international navigation']].sum() - co2_totals.loc[
[
"electricity",
"services non-elec",
"residential non-elec",
"road non-elec",
"rail non-elec",
"domestic aviation",
"international aviation",
"domestic navigation",
"international navigation",
]
].sum()
)
emissions = co2_totals.loc["electricity"] emissions = co2_totals.loc["electricity"]
if "T" in opts: if "T" in opts:
emissions += co2_totals.loc[[i+ " non-elec" for i in ["rail","road"]]].sum() emissions += co2_totals.loc[[i + " non-elec" for i in ["rail", "road"]]].sum()
if "H" in opts: if "H" in opts:
emissions += co2_totals.loc[[i+ " non-elec" for i in ["residential","services"]]].sum() emissions += co2_totals.loc[
[i + " non-elec" for i in ["residential", "services"]]
].sum()
if "I" in opts: if "I" in opts:
emissions += co2_totals.loc[["industrial non-elec","industrial processes", emissions += co2_totals.loc[
"domestic aviation","international aviation", [
"domestic navigation","international navigation"]].sum() "industrial non-elec",
"industrial processes",
"domestic aviation",
"international aviation",
"domestic navigation",
"international navigation",
]
].sum()
return emissions return emissions
def plot_carbon_budget_distribution(input_eurostat): def plot_carbon_budget_distribution(input_eurostat):
""" """
Plot historical carbon emissions in the EU and decarbonization path Plot historical carbon emissions in the EU and decarbonization path.
""" """
import matplotlib.gridspec as gridspec import matplotlib.gridspec as gridspec
import seaborn as sns; sns.set()
sns.set_style('ticks') import seaborn as sns
plt.style.use('seaborn-ticks')
plt.rcParams['xtick.direction'] = 'in' sns.set()
plt.rcParams['ytick.direction'] = 'in' sns.set_style("ticks")
plt.rcParams['xtick.labelsize'] = 20 plt.style.use("seaborn-ticks")
plt.rcParams['ytick.labelsize'] = 20 plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["xtick.labelsize"] = 20
plt.rcParams["ytick.labelsize"] = 20
plt.figure(figsize=(10, 7)) plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1) gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0,0]) ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel('CO$_2$ emissions (Gt per year)',fontsize=22) ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22)
ax1.set_ylim([0,5]) ax1.set_ylim([0, 5])
ax1.set_xlim([1990,snakemake.config['scenario']['planning_horizons'][-1]+1]) ax1.set_xlim([1990, snakemake.config["scenario"]["planning_horizons"][-1] + 1])
path_cb = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/' path_cb = snakemake.config["results_dir"] + snakemake.config["run"] + "/csvs/"
countries = pd.read_csv(snakemake.input.country_codes, index_col=1) countries = pd.read_csv(snakemake.input.country_codes, index_col=1)
cts = countries.index.to_list() cts = countries.index.to_list()
e_1990 = co2_emissions_year(cts, input_eurostat, opts, year=1990) e_1990 = co2_emissions_year(cts, input_eurostat, opts, year=1990)
CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv', CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
index_col=0)
ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None)
ax1.plot(e_1990*CO2_CAP[o],linewidth=3,
color='dodgerblue', label=None)
emissions = historical_emissions(cts) emissions = historical_emissions(cts)
ax1.plot(emissions, color='black', linewidth=3, label=None) ax1.plot(emissions, color="black", linewidth=3, label=None)
#plot committed and uder-discussion targets # plot committed and uder-discussion targets
#(notice that historical emissions include all countries in the # (notice that historical emissions include all countries in the
# network, but targets refer to EU) # network, but targets refer to EU)
ax1.plot([2020],[0.8*emissions[1990]], ax1.plot(
marker='*', markersize=12, markerfacecolor='black', [2020],
markeredgecolor='black') [0.8 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="black",
markeredgecolor="black",
)
ax1.plot([2030],[0.45*emissions[1990]], ax1.plot(
marker='*', markersize=12, markerfacecolor='white', [2030],
markeredgecolor='black') [0.45 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="white",
markeredgecolor="black",
)
ax1.plot([2030],[0.6*emissions[1990]], ax1.plot(
marker='*', markersize=12, markerfacecolor='black', [2030],
markeredgecolor='black') [0.6 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="black",
markeredgecolor="black",
)
ax1.plot([2050, 2050],[x*emissions[1990] for x in [0.2, 0.05]], ax1.plot(
color='gray', linewidth=2, marker='_', alpha=0.5) [2050, 2050],
[x * emissions[1990] for x in [0.2, 0.05]],
color="gray",
linewidth=2,
marker="_",
alpha=0.5,
)
ax1.plot([2050],[0.01*emissions[1990]], ax1.plot(
marker='*', markersize=12, markerfacecolor='white', [2050],
linewidth=0, markeredgecolor='black', [0.01 * emissions[1990]],
label='EU under-discussion target', zorder=10, marker="*",
clip_on=False) markersize=12,
markerfacecolor="white",
linewidth=0,
markeredgecolor="black",
label="EU under-discussion target",
zorder=10,
clip_on=False,
)
ax1.plot([2050],[0.125*emissions[1990]],'ro', ax1.plot(
marker='*', markersize=12, markerfacecolor='black', [2050],
markeredgecolor='black', label='EU committed target') [0.125 * emissions[1990]],
"ro",
marker="*",
markersize=12,
markerfacecolor="black",
markeredgecolor="black",
label="EU committed target",
)
ax1.legend(fancybox=True, fontsize=18, loc=(0.01,0.01), ax1.legend(
facecolor='white', frameon=True) fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
)
path_cb_plot = snakemake.config['results_dir'] + snakemake.config['run'] + '/graphs/' path_cb_plot = (
plt.savefig(path_cb_plot+'carbon_budget_plot.pdf', dpi=300) snakemake.config["results_dir"] + snakemake.config["run"] + "/graphs/"
)
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('plot_summary')
logging.basicConfig(level=snakemake.config['logging_level']) snakemake = mock_snakemake("plot_summary")
logging.basicConfig(level=snakemake.config["logging_level"])
n_header = 4 n_header = 4
@ -454,8 +555,8 @@ if __name__ == "__main__":
plot_balances() plot_balances()
for sector_opts in snakemake.config['scenario']['sector_opts']: for sector_opts in snakemake.config["scenario"]["sector_opts"]:
opts=sector_opts.split('-') opts = sector_opts.split("-")
for o in opts: for o in opts:
if "cb" in o: if "cb" in o:
plot_carbon_budget_distribution(snakemake.input.eurostat) plot_carbon_budget_distribution(snakemake.input.eurostat)

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +1,26 @@
# -*- coding: utf-8 -*-
""" """
Retrieve gas infrastructure data from https://zenodo.org/record/4767098/files/IGGIELGN.zip Retrieve gas infrastructure data from
https://zenodo.org/record/4767098/files/IGGIELGN.zip.
""" """
import logging import logging
from helper import progress_retrieve
import zipfile import zipfile
from pathlib import Path from pathlib import Path
from helper import progress_retrieve
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake('retrieve_gas_network_data')
rootpath = '..' snakemake = mock_snakemake("retrieve_gas_network_data")
rootpath = ".."
else: else:
rootpath = '.' rootpath = "."
url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip" url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip"

View File

@ -1,8 +1,10 @@
# -*- coding: utf-8 -*-
""" """
Retrieve and extract sector data bundle. Retrieve and extract sector data bundle.
""" """
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import os import os
@ -13,8 +15,7 @@ from pathlib import Path
# Add pypsa-eur scripts to path for import of _helpers # Add pypsa-eur scripts to path for import of _helpers
sys.path.insert(0, os.getcwd() + "/../pypsa-eur/scripts") sys.path.insert(0, os.getcwd() + "/../pypsa-eur/scripts")
from _helpers import progress_retrieve, configure_logging from _helpers import configure_logging, progress_retrieve
if __name__ == "__main__": if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
@ -32,4 +33,4 @@ if __name__ == "__main__":
tarball_fn.unlink() tarball_fn.unlink()
logger.info(f"Databundle available in '{to_fn}'.") logger.info(f"Databundle available in '{to_fn}'.")

View File

@ -1,40 +1,51 @@
"""Solve network.""" # -*- coding: utf-8 -*-
"""
import pypsa Solve network.
import numpy as np """
from vresutils.benchmark import memory_logger
from helper import override_component_attrs, update_config_with_sector_opts
import logging import logging
import numpy as np
import pypsa
from helper import override_component_attrs, update_config_with_sector_opts
from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING) pypsa.pf.logger.setLevel(logging.WARNING)
def add_land_use_constraint(n): def add_land_use_constraint(n):
if "m" in snakemake.wildcards.clusters:
if 'm' in snakemake.wildcards.clusters:
_add_land_use_constraint_m(n) _add_land_use_constraint_m(n)
else: else:
_add_land_use_constraint(n) _add_land_use_constraint(n)
def _add_land_use_constraint(n): def _add_land_use_constraint(n):
#warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' # warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']: for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
ext_i = (n.generators.carrier==carrier) & ~n.generators.p_nom_extendable ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
existing = n.generators.loc[ext_i,"p_nom"].groupby(n.generators.bus.map(n.buses.location)).sum() existing = (
n.generators.loc[ext_i, "p_nom"]
.groupby(n.generators.bus.map(n.buses.location))
.sum()
)
existing.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons existing.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons
n.generators.loc[existing.index,"p_nom_max"] -= existing n.generators.loc[existing.index, "p_nom_max"] -= existing
# check if existing capacities are larger than technical potential # check if existing capacities are larger than technical potential
existing_large = n.generators[n.generators["p_nom_min"] > n.generators["p_nom_max"]].index existing_large = n.generators[
n.generators["p_nom_min"] > n.generators["p_nom_max"]
].index
if len(existing_large): if len(existing_large):
logger.warning(f"Existing capacities larger than technical potential for {existing_large},\ logger.warning(
adjust technical potential to existing capacities") f"Existing capacities larger than technical potential for {existing_large},\
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[existing_large, "p_nom_min"] adjust technical potential to existing capacities"
)
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
existing_large, "p_nom_min"
]
n.generators.p_nom_max.clip(lower=0, inplace=True) n.generators.p_nom_max.clip(lower=0, inplace=True)
@ -46,80 +57,109 @@ def _add_land_use_constraint_m(n):
grouping_years = snakemake.config["existing_capacities"]["grouping_years"] grouping_years = snakemake.config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons current_horizon = snakemake.wildcards.planning_horizons
for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']: for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
existing = n.generators.loc[n.generators.carrier==carrier,"p_nom"] ind = list(
ind = list(set([i.split(sep=" ")[0] + ' ' + i.split(sep=" ")[1] for i in existing.index])) set(
[
i.split(sep=" ")[0] + " " + i.split(sep=" ")[1]
for i in existing.index
]
)
)
previous_years = [ previous_years = [
str(y) for y in str(y)
planning_horizons + grouping_years for y in planning_horizons + grouping_years
if y < int(snakemake.wildcards.planning_horizons) if y < int(snakemake.wildcards.planning_horizons)
] ]
for p_year in previous_years: for p_year in previous_years:
ind2 = [i for i in ind if i + " " + carrier + "-" + p_year in existing.index] ind2 = [
i for i in ind if i + " " + carrier + "-" + p_year in existing.index
]
sel_current = [i + " " + carrier + "-" + current_horizon for i in ind2] sel_current = [i + " " + carrier + "-" + current_horizon for i in ind2]
sel_p_year = [i + " " + carrier + "-" + p_year for i in ind2] sel_p_year = [i + " " + carrier + "-" + p_year for i in ind2]
n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[sel_p_year].rename(lambda x: x[:-4] + current_horizon) n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[
sel_p_year
].rename(lambda x: x[:-4] + current_horizon)
n.generators.p_nom_max.clip(lower=0, inplace=True) n.generators.p_nom_max.clip(lower=0, inplace=True)
def add_co2_sequestration_limit(n, limit=200): def add_co2_sequestration_limit(n, limit=200):
"""Add a global constraint on the amount of Mt CO2 that can be sequestered.""" """
Add a global constraint on the amount of Mt CO2 that can be sequestered.
"""
n.carriers.loc["co2 stored", "co2_absorptions"] = -1 n.carriers.loc["co2 stored", "co2_absorptions"] = -1
n.carriers.co2_absorptions = n.carriers.co2_absorptions.fillna(0) n.carriers.co2_absorptions = n.carriers.co2_absorptions.fillna(0)
limit = limit * 1e6 limit = limit * 1e6
for o in opts: for o in opts:
if not "seq" in o: continue if not "seq" in o:
limit = float(o[o.find("seq")+3:]) * 1e6 continue
limit = float(o[o.find("seq") + 3 :]) * 1e6
break break
n.add("GlobalConstraint", 'co2_sequestration_limit', sense="<=", constant=limit, n.add(
type="primary_energy", carrier_attribute="co2_absorptions") "GlobalConstraint",
"co2_sequestration_limit",
sense="<=",
constant=limit,
type="primary_energy",
carrier_attribute="co2_absorptions",
)
def prepare_network(n, solve_opts=None, config=None): def prepare_network(n, solve_opts=None, config=None):
if "clip_p_max_pu" in solve_opts:
for df in (
n.generators_t.p_max_pu,
n.generators_t.p_min_pu,
n.storage_units_t.inflow,
):
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
if 'clip_p_max_pu' in solve_opts: if solve_opts.get("load_shedding"):
for df in (n.generators_t.p_max_pu, n.generators_t.p_min_pu, n.storage_units_t.inflow):
df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True)
if solve_opts.get('load_shedding'):
# intersect between macroeconomic and surveybased willingness to pay # intersect between macroeconomic and surveybased willingness to pay
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
n.add("Carrier", "Load") n.add("Carrier", "Load")
n.madd("Generator", n.buses.index, " load", n.madd(
bus=n.buses.index, "Generator",
carrier='load', n.buses.index,
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW " load",
marginal_cost=1e2, # Eur/kWh bus=n.buses.index,
p_nom=1e9 # kW carrier="load",
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
marginal_cost=1e2, # Eur/kWh
p_nom=1e9, # kW
) )
if solve_opts.get('noisy_costs'): if solve_opts.get("noisy_costs"):
for t in n.iterate_components(): for t in n.iterate_components():
#if 'capital_cost' in t.df: # if 'capital_cost' in t.df:
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5) # t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
if 'marginal_cost' in t.df: if "marginal_cost" in t.df:
np.random.seed(174) np.random.seed(174)
t.df['marginal_cost'] += 1e-2 + 2e-3 * (np.random.random(len(t.df)) - 0.5) t.df["marginal_cost"] += 1e-2 + 2e-3 * (
np.random.random(len(t.df)) - 0.5
)
for t in n.iterate_components(['Line', 'Link']): for t in n.iterate_components(["Line", "Link"]):
np.random.seed(123) np.random.seed(123)
t.df['capital_cost'] += (1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)) * t.df['length'] t.df["capital_cost"] += (
1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)
) * t.df["length"]
if solve_opts.get('nhours'): if solve_opts.get("nhours"):
nhours = solve_opts['nhours'] nhours = solve_opts["nhours"]
n.set_snapshots(n.snapshots[:nhours]) n.set_snapshots(n.snapshots[:nhours])
n.snapshot_weightings[:] = 8760./nhours n.snapshot_weightings[:] = 8760.0 / nhours
if snakemake.config['foresight'] == 'myopic': if snakemake.config["foresight"] == "myopic":
add_land_use_constraint(n) add_land_use_constraint(n)
if n.stores.carrier.eq('co2 stored').any(): if n.stores.carrier.eq("co2 stored").any():
limit = config["sector"].get("co2_sequestration_potential", 200) limit = config["sector"].get("co2_sequestration_potential", 200)
add_co2_sequestration_limit(n, limit=limit) add_co2_sequestration_limit(n, limit=limit)
@ -134,23 +174,29 @@ def add_battery_constraints(n):
discharger_bool = n.links.index.str.contains("battery discharger") discharger_bool = n.links.index.str.contains("battery discharger")
charger_bool = n.links.index.str.contains("battery charger") charger_bool = n.links.index.str.contains("battery charger")
dischargers_ext= n.links[discharger_bool].query("p_nom_extendable").index dischargers_ext = n.links[discharger_bool].query("p_nom_extendable").index
chargers_ext= n.links[charger_bool].query("p_nom_extendable").index chargers_ext = n.links[charger_bool].query("p_nom_extendable").index
eff = n.links.efficiency[dischargers_ext].values eff = n.links.efficiency[dischargers_ext].values
lhs = n.model["Link-p_nom"].loc[chargers_ext] - n.model["Link-p_nom"].loc[dischargers_ext] * eff lhs = (
n.model["Link-p_nom"].loc[chargers_ext]
- n.model["Link-p_nom"].loc[dischargers_ext] * eff
)
n.model.add_constraints(lhs == 0, name="Link-charger_ratio") n.model.add_constraints(lhs == 0, name="Link-charger_ratio")
def add_chp_constraints(n): def add_chp_constraints(n):
electric = (
electric = (n.links.index.str.contains("urban central") n.links.index.str.contains("urban central")
& n.links.index.str.contains("CHP") & n.links.index.str.contains("CHP")
& n.links.index.str.contains("electric")) & n.links.index.str.contains("electric")
heat = (n.links.index.str.contains("urban central") )
& n.links.index.str.contains("CHP") heat = (
& n.links.index.str.contains("heat")) n.links.index.str.contains("urban central")
& n.links.index.str.contains("CHP")
& n.links.index.str.contains("heat")
)
electric_ext = n.links[electric].query("p_nom_extendable").index electric_ext = n.links[electric].query("p_nom_extendable").index
heat_ext = n.links[heat].query("p_nom_extendable").index heat_ext = n.links[heat].query("p_nom_extendable").index
@ -158,38 +204,50 @@ def add_chp_constraints(n):
electric_fix = n.links[electric].query("~p_nom_extendable").index electric_fix = n.links[electric].query("~p_nom_extendable").index
heat_fix = n.links[heat].query("~p_nom_extendable").index heat_fix = n.links[heat].query("~p_nom_extendable").index
p = n.model["Link-p"] # dimension: [time, link] p = n.model["Link-p"] # dimension: [time, link]
# output ratio between heat and electricity and top_iso_fuel_line for extendable # output ratio between heat and electricity and top_iso_fuel_line for extendable
if not electric_ext.empty: if not electric_ext.empty:
p_nom = n.model["Link-p_nom"] p_nom = n.model["Link-p_nom"]
lhs = (p_nom.loc[electric_ext] * (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values - lhs = (
p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values) p_nom.loc[electric_ext]
n.model.add_constraints(lhs == 0, name='chplink-fix_p_nom_ratio') * (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values
- p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values
)
n.model.add_constraints(lhs == 0, name="chplink-fix_p_nom_ratio")
rename = {"Link-ext": "Link"} rename = {"Link-ext": "Link"}
lhs = p.loc[:, electric_ext] + p.loc[:, heat_ext] - p_nom.rename(rename).loc[electric_ext] lhs = (
n.model.add_constraints(lhs <= 0, name='chplink-top_iso_fuel_line_ext') p.loc[:, electric_ext]
+ p.loc[:, heat_ext]
- p_nom.rename(rename).loc[electric_ext]
)
n.model.add_constraints(lhs <= 0, name="chplink-top_iso_fuel_line_ext")
# top_iso_fuel_line for fixed # top_iso_fuel_line for fixed
if not electric_fix.empty: if not electric_fix.empty:
lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix] lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix]
rhs = n.links.p_nom[electric_fix] rhs = n.links.p_nom[electric_fix]
n.model.add_constraints(lhs <= rhs, name='chplink-top_iso_fuel_line_fix') n.model.add_constraints(lhs <= rhs, name="chplink-top_iso_fuel_line_fix")
# back-pressure # back-pressure
if not electric.empty: if not electric.empty:
lhs = (p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values) - lhs = (
p.loc[:, electric] * n.links.efficiency[electric]) p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values)
n.model.add_constraints(lhs <= rhs, name='chplink-backpressure') - p.loc[:, electric] * n.links.efficiency[electric]
)
n.model.add_constraints(lhs <= rhs, name="chplink-backpressure")
def add_pipe_retrofit_constraint(n): def add_pipe_retrofit_constraint(n):
"""Add constraint for retrofitting existing CH4 pipelines to H2 pipelines.""" """
Add constraint for retrofitting existing CH4 pipelines to H2 pipelines.
"""
gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index
h2_retrofitted_i = n.links.query("carrier == 'H2 pipeline retrofitted' and p_nom_extendable").index h2_retrofitted_i = n.links.query(
"carrier == 'H2 pipeline retrofitted' and p_nom_extendable"
).index
if h2_retrofitted_i.empty or gas_pipes_i.empty: if h2_retrofitted_i.empty or gas_pipes_i.empty:
return return
@ -200,7 +258,7 @@ def add_pipe_retrofit_constraint(n):
lhs = p_nom.loc[gas_pipes_i] + CH4_per_H2 * p_nom.loc[h2_retrofitted_i] lhs = p_nom.loc[gas_pipes_i] + CH4_per_H2 * p_nom.loc[h2_retrofitted_i]
rhs = n.links.p_nom[gas_pipes_i].rename_axis("Link-ext") rhs = n.links.p_nom[gas_pipes_i].rename_axis("Link-ext")
n.model.add_constraints(lhs == rhs, name='Link-pipe_retrofit') n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit")
def extra_functionality(n, snapshots): def extra_functionality(n, snapshots):
@ -209,9 +267,11 @@ def extra_functionality(n, snapshots):
def solve_network(n, config, opts="", **kwargs): def solve_network(n, config, opts="", **kwargs):
set_of_options = config['solving']['solver']['options'] set_of_options = config["solving"]["solver"]["options"]
solver_options = config['solving']["solver_options"][set_of_options] if set_of_options else {} solver_options = (
solver_name = config['solving']['solver']['name'] config["solving"]["solver_options"][set_of_options] if set_of_options else {}
)
solver_name = config["solving"]["solver"]["name"]
cf_solving = config["solving"]["options"] cf_solving = config["solving"]["options"]
track_iterations = cf_solving.get("track_iterations", False) track_iterations = cf_solving.get("track_iterations", False)
min_iterations = cf_solving.get("min_iterations", 4) min_iterations = cf_solving.get("min_iterations", 4)
@ -245,46 +305,52 @@ def solve_network(n, config, opts="", **kwargs):
) )
if status != "ok": if status != "ok":
logger.warning(f"Solving status '{status}' with termination condition '{condition}'") logger.warning(
f"Solving status '{status}' with termination condition '{condition}'"
)
return n return n
#%% # %%
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if "snakemake" not in globals():
from helper import mock_snakemake from helper import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
'solve_network_myopic', "solve_network_myopic",
simpl='', simpl="",
opts="", opts="",
clusters="45", clusters="45",
lv=1.0, lv=1.0,
sector_opts='8760H-T-H-B-I-A-solar+p3-dist1', sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2020", planning_horizons="2020",
) )
logging.basicConfig(filename=snakemake.log.python, logging.basicConfig(
level=snakemake.config['logging_level']) filename=snakemake.log.python, level=snakemake.config["logging_level"]
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
tmpdir = snakemake.config['solving'].get('tmpdir') tmpdir = snakemake.config["solving"].get("tmpdir")
if tmpdir is not None: if tmpdir is not None:
from pathlib import Path from pathlib import Path
Path(tmpdir).mkdir(parents=True, exist_ok=True) Path(tmpdir).mkdir(parents=True, exist_ok=True)
opts = snakemake.wildcards.sector_opts.split('-') opts = snakemake.wildcards.sector_opts.split("-")
solve_opts = snakemake.config['solving']['options'] solve_opts = snakemake.config["solving"]["options"]
fn = getattr(snakemake.log, 'memory', None)
with memory_logger(filename=fn, interval=30.) as mem:
fn = getattr(snakemake.log, "memory", None)
with memory_logger(filename=fn, interval=30.0) as mem:
overrides = override_component_attrs(snakemake.input.overrides) overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
n = prepare_network(n, solve_opts, config=snakemake.config) n = prepare_network(n, solve_opts, config=snakemake.config)
n = solve_network(n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver) n = solve_network(
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
)
if "lv_limit" in n.global_constraints.index: if "lv_limit" in n.global_constraints.index:
n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"] n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"]

View File

@ -3,15 +3,15 @@ foresight: myopic
scenario: scenario:
lv: lv:
- 1.5 - 1.5
clusters: clusters:
- 5 - 5
sector_opts: sector_opts:
- 191H-T-H-B-I-A-solar+p3-dist1 - 191H-T-H-B-I-A-solar+p3-dist1
planning_horizons: planning_horizons:
- 2030 - 2030
- 2040 - 2040
- 2050 - 2050
snapshots: snapshots:
start: "2013-03-01" start: "2013-03-01"
@ -25,4 +25,3 @@ solving:
name: cbc name: cbc
options: cbc-default options: cbc-default
mem: 4000 mem: 4000

View File

@ -3,13 +3,13 @@ foresight: overnight
scenario: scenario:
lv: lv:
- 1.5 - 1.5
clusters: clusters:
- 5 - 5
sector_opts: sector_opts:
- CO2L0-191H-T-H-B-I-A-solar+p3-dist1 - CO2L0-191H-T-H-B-I-A-solar+p3-dist1
planning_horizons: planning_horizons:
- 2030 - 2030
snapshots: snapshots:
start: "2013-03-01" start: "2013-03-01"