commit
5611817513
1
.git-blame-ignore-revs
Normal file
1
.git-blame-ignore-revs
Normal file
@ -0,0 +1 @@
|
||||
13769f90af4500948b0376d57df4cceaa13e78b5
|
92
.pre-commit-config.yaml
Normal file
92
.pre-commit-config.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: CC0-1.0
|
||||
exclude: "^LICENSES"
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
- id: end-of-file-fixer
|
||||
- id: fix-encoding-pragma
|
||||
- id: mixed-line-ending
|
||||
- id: trailing-whitespace
|
||||
- id: check-added-large-files
|
||||
args: ["--maxkb=2000"]
|
||||
|
||||
# Sort package imports alphabetically
|
||||
- repo: https://github.com/PyCQA/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort
|
||||
args: ["--profile", "black", "--filter-files"]
|
||||
|
||||
# Convert relative imports to absolute imports
|
||||
- repo: https://github.com/MarcoGorelli/absolufy-imports
|
||||
rev: v0.3.1
|
||||
hooks:
|
||||
- id: absolufy-imports
|
||||
|
||||
# Find common spelling mistakes in comments and docstrings
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: v2.2.2
|
||||
hooks:
|
||||
- id: codespell
|
||||
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco'] # Ignore capital case words, e.g. country codes
|
||||
types_or: [python, rst, markdown]
|
||||
files: ^(scripts|doc)/
|
||||
|
||||
# Make docstrings PEP 257 compliant
|
||||
- repo: https://github.com/PyCQA/docformatter
|
||||
rev: v1.5.1
|
||||
hooks:
|
||||
- id: docformatter
|
||||
args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"]
|
||||
|
||||
- repo: https://github.com/keewis/blackdoc
|
||||
rev: v0.3.8
|
||||
hooks:
|
||||
- id: blackdoc
|
||||
|
||||
# Formatting with "black" coding style
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.1.0
|
||||
hooks:
|
||||
# Format Python files
|
||||
- id: black
|
||||
# Format Jupyter Python notebooks
|
||||
- id: black-jupyter
|
||||
|
||||
# Remove output from Jupyter notebooks
|
||||
- repo: https://github.com/aflc/pre-commit-jupyter
|
||||
rev: v1.2.1
|
||||
hooks:
|
||||
- id: jupyter-notebook-cleanup
|
||||
args: ["--remove-kernel-metadata"]
|
||||
|
||||
# Do YAML formatting (before the linter checks it for misses)
|
||||
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
|
||||
rev: v2.7.0
|
||||
hooks:
|
||||
- id: pretty-format-yaml
|
||||
args: [--autofix, --indent, "2", --preserve-quotes]
|
||||
|
||||
# Format Snakemake rule / workflow files
|
||||
# - repo: https://github.com/snakemake/snakefmt
|
||||
# rev: v0.8.1
|
||||
# hooks:
|
||||
# - id: snakefmt
|
||||
|
||||
# For cleaning jupyter notebooks
|
||||
- repo: https://github.com/aflc/pre-commit-jupyter
|
||||
rev: v1.2.1
|
||||
hooks:
|
||||
- id: jupyter-notebook-cleanup
|
||||
exclude: examples/solve-on-remote.ipynb
|
||||
|
||||
# Check for FSFE REUSE compliance (licensing)
|
||||
# - repo: https://github.com/fsfe/reuse-tool
|
||||
# rev: v1.1.2
|
||||
# hooks:
|
||||
# - id: reuse
|
97
doc/conf.py
97
doc/conf.py
@ -12,14 +12,14 @@
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
sys.path.insert(0, os.path.abspath('../scripts'))
|
||||
sys.path.insert(0, os.path.abspath("../scripts"))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
@ -32,48 +32,48 @@ sys.path.insert(0, os.path.abspath('../scripts'))
|
||||
extensions = [
|
||||
#'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.autosummary',
|
||||
'sphinx.ext.autosectionlabel',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.graphviz',
|
||||
"sphinx.ext.autosectionlabel",
|
||||
"sphinx.ext.intersphinx",
|
||||
"sphinx.ext.todo",
|
||||
"sphinx.ext.mathjax",
|
||||
"sphinx.ext.napoleon",
|
||||
"sphinx.ext.graphviz",
|
||||
#'sphinx.ext.pngmath',
|
||||
#'sphinxcontrib.tikz',
|
||||
#'rinoh.frontend.sphinx',
|
||||
'sphinx.ext.imgconverter', # for SVG conversion
|
||||
"sphinx.ext.imgconverter", # for SVG conversion
|
||||
]
|
||||
|
||||
autodoc_default_flags = ['members']
|
||||
autodoc_default_flags = ["members"]
|
||||
autosummary_generate = True
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ".rst"
|
||||
|
||||
# The encoding of source files.
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# General information about the project.
|
||||
project = u'PyPSA-Eur-Sec'
|
||||
copyright = u'2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)'
|
||||
author = u'2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)'
|
||||
project = "PyPSA-Eur-Sec"
|
||||
copyright = "2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)"
|
||||
author = "2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)"
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = u'0.7'
|
||||
version = "0.7"
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u'0.7.0'
|
||||
release = "0.7.0"
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -90,7 +90,7 @@ language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
exclude_patterns = ["_build"]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
@ -108,7 +108,7 @@ exclude_patterns = ['_build']
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = "sphinx"
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
# modindex_common_prefix = []
|
||||
@ -124,14 +124,14 @@ todo_include_todos = True
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
html_theme_options = {
|
||||
'display_version': True,
|
||||
'sticky_navigation': True,
|
||||
"display_version": True,
|
||||
"sticky_navigation": True,
|
||||
}
|
||||
|
||||
|
||||
@ -157,11 +157,11 @@ html_theme_options = {
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ["_static"]
|
||||
|
||||
html_context = {
|
||||
'css_files': [
|
||||
'_static/theme_overrides.css', # override wide tables in RTD theme
|
||||
"css_files": [
|
||||
"_static/theme_overrides.css", # override wide tables in RTD theme
|
||||
],
|
||||
}
|
||||
|
||||
@ -226,20 +226,17 @@ html_context = {
|
||||
# html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'PyPSAEurSecdoc'
|
||||
htmlhelp_basename = "PyPSAEurSecdoc"
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
@ -248,16 +245,25 @@ latex_elements = {
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'PyPSA-Eur-Sec.tex', u'PyPSA-Eur-Sec Documentation',
|
||||
u'author', 'manual'),
|
||||
(
|
||||
master_doc,
|
||||
"PyPSA-Eur-Sec.tex",
|
||||
"PyPSA-Eur-Sec Documentation",
|
||||
"author",
|
||||
"manual",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html
|
||||
rinoh_documents = [(master_doc, # top-level file (index.rst)
|
||||
'PyPSA-Eur-Sec', # output (target.pdf)
|
||||
'PyPSA-Eur-Sec Documentation', # document title
|
||||
'author')] # document author
|
||||
rinoh_documents = [
|
||||
(
|
||||
master_doc, # top-level file (index.rst)
|
||||
"PyPSA-Eur-Sec", # output (target.pdf)
|
||||
"PyPSA-Eur-Sec Documentation", # document title
|
||||
"author",
|
||||
)
|
||||
] # document author
|
||||
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
@ -285,10 +291,7 @@ rinoh_documents = [(master_doc, # top-level file (index.rst)
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'pypsa-eur-sec', u'PyPSA-Eur-Sec Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
man_pages = [(master_doc, "pypsa-eur-sec", "PyPSA-Eur-Sec Documentation", [author], 1)]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
# man_show_urls = False
|
||||
@ -300,9 +303,15 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'PyPSA-Eur-Sec', u'PyPSA-Eur-Sec Documentation',
|
||||
author, 'PyPSA-Eur-Sec', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
master_doc,
|
||||
"PyPSA-Eur-Sec",
|
||||
"PyPSA-Eur-Sec Documentation",
|
||||
author,
|
||||
"PyPSA-Eur-Sec",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
@ -319,4 +328,4 @@ texinfo_documents = [
|
||||
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {'https://docs.python.org/': None}
|
||||
intersphinx_mapping = {"https://docs.python.org/": None}
|
||||
|
@ -27,4 +27,3 @@ Building topologies and corresponding standard values,tabula-calculator-calcsetb
|
||||
Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unknown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/
|
||||
District heating most countries,jrc-idees-2015/,CC BY 4.0,https://ec.europa.eu/jrc/en/potencia/jrc-idees,,
|
||||
District heating missing countries,district_heat_share.csv,unknown,https://www.euroheat.org/knowledge-hub/country-profiles,,
|
||||
|
||||
|
Can't render this file because it has a wrong number of fields in line 28.
|
@ -495,7 +495,7 @@ The production of glass is assumed to be fully electrified based on the current
|
||||
|
||||
**Non-ferrous Metals**
|
||||
|
||||
The non-ferrous metal subsector includes the manufacturing of base metals (aluminium, copper, lead, zink), precious metals (gold, silver), and technology metals (molybdenum, cobalt, silicon).
|
||||
The non-ferrous metal subsector includes the manufacturing of base metals (aluminium, copper, lead, zinc), precious metals (gold, silver), and technology metals (molybdenum, cobalt, silicon).
|
||||
|
||||
The manufacturing of aluminium accounts for more than half of the final energy consumption of this subsector. Two alternative processing routes are used today to manufacture aluminium in Europe. The primary route represents 40% of the aluminium pro- duction, while the secondary route represents the remaining 60%.
|
||||
|
||||
@ -613,6 +613,3 @@ Captured :math:`CO_2` can also be sequestered underground up to an annual seques
|
||||
*Carbon dioxide transport*
|
||||
|
||||
Carbon dioxide can be modelled as a single node for Europe (in this case, :math:`CO_2` transport constraints are neglected). A network for modelling the transport of :math:`CO_2` among the different nodes can also be created if selected in the `config file <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L248>`_.
|
||||
|
||||
|
||||
|
||||
|
@ -1,21 +1,21 @@
|
||||
# coding: utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pandas as pd
|
||||
|
||||
idx = pd.IndexSlice
|
||||
|
||||
import numpy as np
|
||||
import pypsa
|
||||
import yaml
|
||||
import numpy as np
|
||||
|
||||
from add_existing_baseyear import add_build_year_to_new_assets
|
||||
from helper import override_component_attrs, update_config_with_sector_opts
|
||||
|
||||
|
||||
def add_brownfield(n, n_p, year):
|
||||
|
||||
logger.info(f"Preparing brownfield for the year {year}")
|
||||
|
||||
# electric transmission grid set optimised capacities of previous as minimum
|
||||
@ -24,47 +24,47 @@ def add_brownfield(n, n_p, year):
|
||||
n.links.loc[dc_i, "p_nom_min"] = n_p.links.loc[dc_i, "p_nom_opt"]
|
||||
|
||||
for c in n_p.iterate_components(["Link", "Generator", "Store"]):
|
||||
|
||||
attr = "e" if c.name == "Store" else "p"
|
||||
|
||||
# first, remove generators, links and stores that track
|
||||
# CO2 or global EU values since these are already in n
|
||||
n_p.mremove(
|
||||
c.name,
|
||||
c.df.index[c.df.lifetime==np.inf]
|
||||
)
|
||||
n_p.mremove(c.name, c.df.index[c.df.lifetime == np.inf])
|
||||
|
||||
# remove assets whose build_year + lifetime < year
|
||||
n_p.mremove(
|
||||
c.name,
|
||||
c.df.index[c.df.build_year + c.df.lifetime < year]
|
||||
)
|
||||
n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime < year])
|
||||
|
||||
# remove assets if their optimized nominal capacity is lower than a threshold
|
||||
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible
|
||||
chp_heat = c.df.index[(
|
||||
chp_heat = c.df.index[
|
||||
(
|
||||
c.df[attr + "_nom_extendable"]
|
||||
& c.df.index.str.contains("urban central")
|
||||
& c.df.index.str.contains("CHP")
|
||||
& c.df.index.str.contains("heat")
|
||||
)]
|
||||
)
|
||||
]
|
||||
|
||||
threshold = snakemake.config['existing_capacities']['threshold_capacity']
|
||||
threshold = snakemake.config["existing_capacities"]["threshold_capacity"]
|
||||
|
||||
if not chp_heat.empty:
|
||||
threshold_chp_heat = (threshold
|
||||
threshold_chp_heat = (
|
||||
threshold
|
||||
* c.df.efficiency[chp_heat.str.replace("heat", "electric")].values
|
||||
* c.df.p_nom_ratio[chp_heat.str.replace("heat", "electric")].values
|
||||
/ c.df.efficiency[chp_heat].values
|
||||
)
|
||||
n_p.mremove(
|
||||
c.name,
|
||||
chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat]
|
||||
chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat],
|
||||
)
|
||||
|
||||
n_p.mremove(
|
||||
c.name,
|
||||
c.df.index[c.df[attr + "_nom_extendable"] & ~c.df.index.isin(chp_heat) & (c.df[attr + "_nom_opt"] < threshold)]
|
||||
c.df.index[
|
||||
c.df[attr + "_nom_extendable"]
|
||||
& ~c.df.index.isin(chp_heat)
|
||||
& (c.df[attr + "_nom_opt"] < threshold)
|
||||
],
|
||||
)
|
||||
|
||||
# copy over assets but fix their capacity
|
||||
@ -74,55 +74,67 @@ def add_brownfield(n, n_p, year):
|
||||
n.import_components_from_dataframe(c.df, c.name)
|
||||
|
||||
# copy time-dependent
|
||||
selection = (
|
||||
n.component_attrs[c.name].type.str.contains("series")
|
||||
& n.component_attrs[c.name].status.str.contains("Input")
|
||||
)
|
||||
selection = n.component_attrs[c.name].type.str.contains(
|
||||
"series"
|
||||
) & n.component_attrs[c.name].status.str.contains("Input")
|
||||
for tattr in n.component_attrs[c.name].index[selection]:
|
||||
n.import_series_from_dataframe(c.pnl[tattr], c.name, tattr)
|
||||
|
||||
# deal with gas network
|
||||
pipe_carrier = ['gas pipeline']
|
||||
if snakemake.config["sector"]['H2_retrofit']:
|
||||
pipe_carrier = ["gas pipeline"]
|
||||
if snakemake.config["sector"]["H2_retrofit"]:
|
||||
# drop capacities of previous year to avoid duplicating
|
||||
to_drop = n.links.carrier.isin(pipe_carrier) & (n.links.build_year != year)
|
||||
n.mremove("Link", n.links.loc[to_drop].index)
|
||||
|
||||
# subtract the already retrofitted from today's gas grid capacity
|
||||
h2_retrofitted_fixed_i = n.links[(n.links.carrier=='H2 pipeline retrofitted') & (n.links.build_year!=year)].index
|
||||
h2_retrofitted_fixed_i = n.links[
|
||||
(n.links.carrier == "H2 pipeline retrofitted")
|
||||
& (n.links.build_year != year)
|
||||
].index
|
||||
gas_pipes_i = n.links[n.links.carrier.isin(pipe_carrier)].index
|
||||
CH4_per_H2 = 1 / snakemake.config["sector"]["H2_retrofit_capacity_per_CH4"]
|
||||
fr = "H2 pipeline retrofitted"
|
||||
to = "gas pipeline"
|
||||
# today's pipe capacity
|
||||
pipe_capacity = n.links.loc[gas_pipes_i, 'p_nom']
|
||||
pipe_capacity = n.links.loc[gas_pipes_i, "p_nom"]
|
||||
# already retrofitted capacity from gas -> H2
|
||||
already_retrofitted = (n.links.loc[h2_retrofitted_fixed_i, 'p_nom']
|
||||
.rename(lambda x: x.split("-2")[0].replace(fr, to)).groupby(level=0).sum())
|
||||
remaining_capacity = pipe_capacity - CH4_per_H2 * already_retrofitted.reindex(index=pipe_capacity.index).fillna(0)
|
||||
already_retrofitted = (
|
||||
n.links.loc[h2_retrofitted_fixed_i, "p_nom"]
|
||||
.rename(lambda x: x.split("-2")[0].replace(fr, to))
|
||||
.groupby(level=0)
|
||||
.sum()
|
||||
)
|
||||
remaining_capacity = (
|
||||
pipe_capacity
|
||||
- CH4_per_H2
|
||||
* already_retrofitted.reindex(index=pipe_capacity.index).fillna(0)
|
||||
)
|
||||
n.links.loc[gas_pipes_i, "p_nom"] = remaining_capacity
|
||||
else:
|
||||
new_pipes = n.links.carrier.isin(pipe_carrier) & (n.links.build_year==year)
|
||||
n.links.loc[new_pipes, "p_nom"] = 0.
|
||||
n.links.loc[new_pipes, "p_nom_min"] = 0.
|
||||
|
||||
new_pipes = n.links.carrier.isin(pipe_carrier) & (
|
||||
n.links.build_year == year
|
||||
)
|
||||
n.links.loc[new_pipes, "p_nom"] = 0.0
|
||||
n.links.loc[new_pipes, "p_nom_min"] = 0.0
|
||||
|
||||
|
||||
# %%
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'add_brownfield',
|
||||
simpl='',
|
||||
"add_brownfield",
|
||||
simpl="",
|
||||
clusters="37",
|
||||
opts="",
|
||||
lv=1.0,
|
||||
sector_opts='168H-T-H-B-I-solar+p3-dist1',
|
||||
sector_opts="168H-T-H-B-I-solar+p3-dist1",
|
||||
planning_horizons=2030,
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
|
||||
|
||||
|
@ -1,23 +1,25 @@
|
||||
# coding: utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pandas as pd
|
||||
|
||||
idx = pd.IndexSlice
|
||||
|
||||
import numpy as np
|
||||
import xarray as xr
|
||||
|
||||
import pypsa
|
||||
import yaml
|
||||
|
||||
from prepare_sector_network import prepare_costs, define_spatial, cluster_heat_buses
|
||||
from helper import override_component_attrs, update_config_with_sector_opts
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
import numpy as np
|
||||
import pypsa
|
||||
import xarray as xr
|
||||
import yaml
|
||||
from helper import override_component_attrs, update_config_with_sector_opts
|
||||
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
|
||||
|
||||
spatial = SimpleNamespace()
|
||||
|
||||
|
||||
def add_build_year_to_new_assets(n, baseyear):
|
||||
"""
|
||||
Parameters
|
||||
@ -29,7 +31,6 @@ def add_build_year_to_new_assets(n, baseyear):
|
||||
|
||||
# Give assets with lifetimes and no build year the build year baseyear
|
||||
for c in n.iterate_components(["Link", "Generator", "Store"]):
|
||||
|
||||
assets = c.df.index[(c.df.lifetime != np.inf) & (c.df.build_year == 0)]
|
||||
c.df.loc[assets, "build_year"] = baseyear
|
||||
|
||||
@ -39,40 +40,34 @@ def add_build_year_to_new_assets(n, baseyear):
|
||||
c.df.rename(index=rename, inplace=True)
|
||||
|
||||
# rename time-dependent
|
||||
selection = (
|
||||
n.component_attrs[c.name].type.str.contains("series")
|
||||
& n.component_attrs[c.name].status.str.contains("Input")
|
||||
)
|
||||
selection = n.component_attrs[c.name].type.str.contains(
|
||||
"series"
|
||||
) & n.component_attrs[c.name].status.str.contains("Input")
|
||||
for attr in n.component_attrs[c.name].index[selection]:
|
||||
c.pnl[attr].rename(columns=rename, inplace=True)
|
||||
|
||||
|
||||
def add_existing_renewables(df_agg):
|
||||
"""
|
||||
Append existing renewables to the df_agg pd.DataFrame
|
||||
with the conventional power plants.
|
||||
Append existing renewables to the df_agg pd.DataFrame with the conventional
|
||||
power plants.
|
||||
"""
|
||||
|
||||
cc = pd.read_csv(snakemake.input.country_codes, index_col=0)
|
||||
|
||||
carriers = {
|
||||
"solar": "solar",
|
||||
"onwind": "onwind",
|
||||
"offwind": "offwind-ac"
|
||||
}
|
||||
|
||||
for tech in ['solar', 'onwind', 'offwind']:
|
||||
carriers = {"solar": "solar", "onwind": "onwind", "offwind": "offwind-ac"}
|
||||
|
||||
for tech in ["solar", "onwind", "offwind"]:
|
||||
carrier = carriers[tech]
|
||||
|
||||
df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.)
|
||||
df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.0)
|
||||
df.columns = df.columns.astype(int)
|
||||
|
||||
rename_countries = {
|
||||
'Czechia': 'Czech Republic',
|
||||
'UK': 'United Kingdom',
|
||||
'Bosnia Herzg': 'Bosnia Herzegovina',
|
||||
'North Macedonia': 'Macedonia'
|
||||
"Czechia": "Czech Republic",
|
||||
"UK": "United Kingdom",
|
||||
"Bosnia Herzg": "Bosnia Herzegovina",
|
||||
"North Macedonia": "Macedonia",
|
||||
}
|
||||
|
||||
df.rename(index=rename_countries, inplace=True)
|
||||
@ -80,16 +75,21 @@ def add_existing_renewables(df_agg):
|
||||
df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True)
|
||||
|
||||
# calculate yearly differences
|
||||
df.insert(loc=0, value=.0, column='1999')
|
||||
df = df.diff(axis=1).drop('1999', axis=1).clip(lower=0)
|
||||
df.insert(loc=0, value=0.0, column="1999")
|
||||
df = df.diff(axis=1).drop("1999", axis=1).clip(lower=0)
|
||||
|
||||
# distribute capacities among nodes according to capacity factor
|
||||
# weighting with nodal_fraction
|
||||
elec_buses = n.buses.index[n.buses.carrier == "AC"].union(n.buses.index[n.buses.carrier == "DC"])
|
||||
nodal_fraction = pd.Series(0., elec_buses)
|
||||
elec_buses = n.buses.index[n.buses.carrier == "AC"].union(
|
||||
n.buses.index[n.buses.carrier == "DC"]
|
||||
)
|
||||
nodal_fraction = pd.Series(0.0, elec_buses)
|
||||
|
||||
for country in n.buses.loc[elec_buses, "country"].unique():
|
||||
gens = n.generators.index[(n.generators.index.str[:2] == country) & (n.generators.carrier == carrier)]
|
||||
gens = n.generators.index[
|
||||
(n.generators.index.str[:2] == country)
|
||||
& (n.generators.carrier == carrier)
|
||||
]
|
||||
cfs = n.generators_t.p_max_pu[gens].mean()
|
||||
cfs_key = cfs / cfs.sum()
|
||||
nodal_fraction.loc[n.generators.loc[gens, "bus"]] = cfs_key.values
|
||||
@ -102,7 +102,7 @@ def add_existing_renewables(df_agg):
|
||||
for node in nodal_df.index:
|
||||
name = f"{node}-{tech}-{year}"
|
||||
capacity = nodal_df.loc[node, year]
|
||||
if capacity > 0.:
|
||||
if capacity > 0.0:
|
||||
df_agg.at[name, "Fueltype"] = tech
|
||||
df_agg.at[name, "Capacity"] = capacity
|
||||
df_agg.at[name, "DateIn"] = year
|
||||
@ -120,35 +120,34 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
||||
to read lifetime to estimate YearDecomissioning
|
||||
baseyear : int
|
||||
"""
|
||||
logger.debug(f"Adding power capacities installed before {baseyear} from powerplants.csv")
|
||||
logger.debug(
|
||||
f"Adding power capacities installed before {baseyear} from powerplants.csv"
|
||||
)
|
||||
|
||||
df_agg = pd.read_csv(snakemake.input.powerplants, index_col=0)
|
||||
|
||||
rename_fuel = {
|
||||
'Hard Coal': 'coal',
|
||||
'Lignite': 'lignite',
|
||||
'Nuclear': 'nuclear',
|
||||
'Oil': 'oil',
|
||||
'OCGT': 'OCGT',
|
||||
'CCGT': 'CCGT',
|
||||
'Natural Gas': 'gas',
|
||||
'Bioenergy': 'urban central solid biomass CHP',
|
||||
"Hard Coal": "coal",
|
||||
"Lignite": "lignite",
|
||||
"Nuclear": "nuclear",
|
||||
"Oil": "oil",
|
||||
"OCGT": "OCGT",
|
||||
"CCGT": "CCGT",
|
||||
"Natural Gas": "gas",
|
||||
"Bioenergy": "urban central solid biomass CHP",
|
||||
}
|
||||
|
||||
fueltype_to_drop = [
|
||||
'Hydro',
|
||||
'Wind',
|
||||
'Solar',
|
||||
'Geothermal',
|
||||
'Waste',
|
||||
'Other',
|
||||
'CCGT, Thermal'
|
||||
"Hydro",
|
||||
"Wind",
|
||||
"Solar",
|
||||
"Geothermal",
|
||||
"Waste",
|
||||
"Other",
|
||||
"CCGT, Thermal",
|
||||
]
|
||||
|
||||
technology_to_drop = [
|
||||
'Pv',
|
||||
'Storage Technologies'
|
||||
]
|
||||
technology_to_drop = ["Pv", "Storage Technologies"]
|
||||
|
||||
# drop unused fueltyps and technologies
|
||||
df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)], inplace=True)
|
||||
@ -157,13 +156,12 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
||||
|
||||
# Intermediate fix for DateIn & DateOut
|
||||
# Fill missing DateIn
|
||||
biomass_i = df_agg.loc[df_agg.Fueltype=='urban central solid biomass CHP'].index
|
||||
mean = df_agg.loc[biomass_i, 'DateIn'].mean()
|
||||
df_agg.loc[biomass_i, 'DateIn'] = df_agg.loc[biomass_i, 'DateIn'].fillna(int(mean))
|
||||
biomass_i = df_agg.loc[df_agg.Fueltype == "urban central solid biomass CHP"].index
|
||||
mean = df_agg.loc[biomass_i, "DateIn"].mean()
|
||||
df_agg.loc[biomass_i, "DateIn"] = df_agg.loc[biomass_i, "DateIn"].fillna(int(mean))
|
||||
# Fill missing DateOut
|
||||
dateout = df_agg.loc[biomass_i, 'DateIn'] + snakemake.config['costs']['lifetime']
|
||||
df_agg.loc[biomass_i, 'DateOut'] = df_agg.loc[biomass_i, 'DateOut'].fillna(dateout)
|
||||
|
||||
dateout = df_agg.loc[biomass_i, "DateIn"] + snakemake.config["costs"]["lifetime"]
|
||||
df_agg.loc[biomass_i, "DateOut"] = df_agg.loc[biomass_i, "DateOut"].fillna(dateout)
|
||||
|
||||
# drop assets which are already phased out / decommissioned
|
||||
phased_out = df_agg[df_agg["DateOut"] < baseyear].index
|
||||
@ -190,22 +188,21 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
||||
add_existing_renewables(df_agg)
|
||||
|
||||
df_agg["grouping_year"] = np.take(
|
||||
grouping_years,
|
||||
np.digitize(df_agg.DateIn, grouping_years, right=True)
|
||||
grouping_years, np.digitize(df_agg.DateIn, grouping_years, right=True)
|
||||
)
|
||||
|
||||
df = df_agg.pivot_table(
|
||||
index=["grouping_year", 'Fueltype'],
|
||||
columns='cluster_bus',
|
||||
values='Capacity',
|
||||
aggfunc='sum'
|
||||
index=["grouping_year", "Fueltype"],
|
||||
columns="cluster_bus",
|
||||
values="Capacity",
|
||||
aggfunc="sum",
|
||||
)
|
||||
|
||||
lifetime = df_agg.pivot_table(
|
||||
index=["grouping_year", 'Fueltype'],
|
||||
columns='cluster_bus',
|
||||
values='lifetime',
|
||||
aggfunc='mean' # currently taken mean for clustering lifetimes
|
||||
index=["grouping_year", "Fueltype"],
|
||||
columns="cluster_bus",
|
||||
values="lifetime",
|
||||
aggfunc="mean", # currently taken mean for clustering lifetimes
|
||||
)
|
||||
|
||||
carrier = {
|
||||
@ -215,78 +212,89 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
||||
"oil": "oil",
|
||||
"lignite": "lignite",
|
||||
"nuclear": "uranium",
|
||||
'urban central solid biomass CHP': "biomass",
|
||||
"urban central solid biomass CHP": "biomass",
|
||||
}
|
||||
|
||||
for grouping_year, generator in df.index:
|
||||
|
||||
|
||||
# capacity is the capacity in MW at each node for this
|
||||
capacity = df.loc[grouping_year, generator]
|
||||
capacity = capacity[~capacity.isna()]
|
||||
capacity = capacity[capacity > snakemake.config['existing_capacities']['threshold_capacity']]
|
||||
suffix = '-ac' if generator == 'offwind' else ''
|
||||
name_suffix = f' {generator}{suffix}-{grouping_year}'
|
||||
capacity = capacity[
|
||||
capacity > snakemake.config["existing_capacities"]["threshold_capacity"]
|
||||
]
|
||||
suffix = "-ac" if generator == "offwind" else ""
|
||||
name_suffix = f" {generator}{suffix}-{grouping_year}"
|
||||
asset_i = capacity.index + name_suffix
|
||||
if generator in ['solar', 'onwind', 'offwind']:
|
||||
|
||||
if generator in ["solar", "onwind", "offwind"]:
|
||||
# to consider electricity grid connection costs or a split between
|
||||
# solar utility and rooftop as well, rather take cost assumptions
|
||||
# from existing network than from the cost database
|
||||
capital_cost = n.generators.loc[n.generators.carrier==generator+suffix, "capital_cost"].mean()
|
||||
marginal_cost = n.generators.loc[n.generators.carrier==generator+suffix, "marginal_cost"].mean()
|
||||
capital_cost = n.generators.loc[
|
||||
n.generators.carrier == generator + suffix, "capital_cost"
|
||||
].mean()
|
||||
marginal_cost = n.generators.loc[
|
||||
n.generators.carrier == generator + suffix, "marginal_cost"
|
||||
].mean()
|
||||
# check if assets are already in network (e.g. for 2020)
|
||||
already_build = n.generators.index.intersection(asset_i)
|
||||
new_build = asset_i.difference(n.generators.index)
|
||||
|
||||
# this is for the year 2020
|
||||
if not already_build.empty:
|
||||
n.generators.loc[already_build, "p_nom_min"] = capacity.loc[already_build.str.replace(name_suffix, "")].values
|
||||
n.generators.loc[already_build, "p_nom_min"] = capacity.loc[
|
||||
already_build.str.replace(name_suffix, "")
|
||||
].values
|
||||
new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")]
|
||||
|
||||
if 'm' in snakemake.wildcards.clusters:
|
||||
|
||||
if "m" in snakemake.wildcards.clusters:
|
||||
for ind in new_capacity.index:
|
||||
|
||||
# existing capacities are split evenly among regions in every country
|
||||
inv_ind = [i for i in inv_busmap[ind]]
|
||||
|
||||
# for offshore the splitting only includes coastal regions
|
||||
inv_ind = [i for i in inv_ind if (i + name_suffix) in n.generators.index]
|
||||
inv_ind = [
|
||||
i for i in inv_ind if (i + name_suffix) in n.generators.index
|
||||
]
|
||||
|
||||
p_max_pu = n.generators_t.p_max_pu[[i + name_suffix for i in inv_ind]]
|
||||
p_max_pu = n.generators_t.p_max_pu[
|
||||
[i + name_suffix for i in inv_ind]
|
||||
]
|
||||
p_max_pu.columns = [i + name_suffix for i in inv_ind]
|
||||
|
||||
n.madd("Generator",
|
||||
n.madd(
|
||||
"Generator",
|
||||
[i + name_suffix for i in inv_ind],
|
||||
bus=ind,
|
||||
carrier=generator,
|
||||
p_nom=new_capacity[ind] / len(inv_ind), # split among regions in a country
|
||||
p_nom=new_capacity[ind]
|
||||
/ len(inv_ind), # split among regions in a country
|
||||
marginal_cost=marginal_cost,
|
||||
capital_cost=capital_cost,
|
||||
efficiency=costs.at[generator, 'efficiency'],
|
||||
efficiency=costs.at[generator, "efficiency"],
|
||||
p_max_pu=p_max_pu,
|
||||
build_year=grouping_year,
|
||||
lifetime=costs.at[generator,'lifetime']
|
||||
lifetime=costs.at[generator, "lifetime"],
|
||||
)
|
||||
|
||||
else:
|
||||
|
||||
p_max_pu = n.generators_t.p_max_pu[capacity.index + f' {generator}{suffix}-{baseyear}']
|
||||
p_max_pu = n.generators_t.p_max_pu[
|
||||
capacity.index + f" {generator}{suffix}-{baseyear}"
|
||||
]
|
||||
|
||||
if not new_build.empty:
|
||||
n.madd("Generator",
|
||||
n.madd(
|
||||
"Generator",
|
||||
new_capacity.index,
|
||||
suffix=' ' + name_suffix,
|
||||
suffix=" " + name_suffix,
|
||||
bus=new_capacity.index,
|
||||
carrier=generator,
|
||||
p_nom=new_capacity,
|
||||
marginal_cost=marginal_cost,
|
||||
capital_cost=capital_cost,
|
||||
efficiency=costs.at[generator, 'efficiency'],
|
||||
efficiency=costs.at[generator, "efficiency"],
|
||||
p_max_pu=p_max_pu.rename(columns=n.generators.bus),
|
||||
build_year=grouping_year,
|
||||
lifetime=costs.at[generator, 'lifetime']
|
||||
lifetime=costs.at[generator, "lifetime"],
|
||||
)
|
||||
|
||||
else:
|
||||
@ -300,52 +308,75 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
||||
|
||||
# this is for the year 2020
|
||||
if not already_build.empty:
|
||||
n.links.loc[already_build, "p_nom_min"] = capacity.loc[already_build.str.replace(name_suffix, "")].values
|
||||
n.links.loc[already_build, "p_nom_min"] = capacity.loc[
|
||||
already_build.str.replace(name_suffix, "")
|
||||
].values
|
||||
|
||||
if not new_build.empty:
|
||||
new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")]
|
||||
|
||||
if generator != "urban central solid biomass CHP":
|
||||
n.madd("Link",
|
||||
n.madd(
|
||||
"Link",
|
||||
new_capacity.index,
|
||||
suffix=name_suffix,
|
||||
bus0=bus0,
|
||||
bus1=new_capacity.index,
|
||||
bus2="co2 atmosphere",
|
||||
carrier=generator,
|
||||
marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel
|
||||
capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel
|
||||
p_nom=new_capacity / costs.at[generator, 'efficiency'],
|
||||
efficiency=costs.at[generator, 'efficiency'],
|
||||
efficiency2=costs.at[carrier[generator], 'CO2 intensity'],
|
||||
marginal_cost=costs.at[generator, "efficiency"]
|
||||
* costs.at[generator, "VOM"], # NB: VOM is per MWel
|
||||
capital_cost=costs.at[generator, "efficiency"]
|
||||
* costs.at[generator, "fixed"], # NB: fixed cost is per MWel
|
||||
p_nom=new_capacity / costs.at[generator, "efficiency"],
|
||||
efficiency=costs.at[generator, "efficiency"],
|
||||
efficiency2=costs.at[carrier[generator], "CO2 intensity"],
|
||||
build_year=grouping_year,
|
||||
lifetime=lifetime_assets.loc[new_capacity.index],
|
||||
)
|
||||
else:
|
||||
key = 'central solid biomass CHP'
|
||||
n.madd("Link",
|
||||
key = "central solid biomass CHP"
|
||||
n.madd(
|
||||
"Link",
|
||||
new_capacity.index,
|
||||
suffix=name_suffix,
|
||||
bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values,
|
||||
bus1=new_capacity.index,
|
||||
bus2=new_capacity.index + " urban central heat",
|
||||
carrier=generator,
|
||||
p_nom=new_capacity / costs.at[key, 'efficiency'],
|
||||
capital_cost=costs.at[key, 'fixed'] * costs.at[key, 'efficiency'],
|
||||
marginal_cost=costs.at[key, 'VOM'],
|
||||
efficiency=costs.at[key, 'efficiency'],
|
||||
p_nom=new_capacity / costs.at[key, "efficiency"],
|
||||
capital_cost=costs.at[key, "fixed"]
|
||||
* costs.at[key, "efficiency"],
|
||||
marginal_cost=costs.at[key, "VOM"],
|
||||
efficiency=costs.at[key, "efficiency"],
|
||||
build_year=grouping_year,
|
||||
efficiency2=costs.at[key, 'efficiency-heat'],
|
||||
lifetime=lifetime_assets.loc[new_capacity.index]
|
||||
efficiency2=costs.at[key, "efficiency-heat"],
|
||||
lifetime=lifetime_assets.loc[new_capacity.index],
|
||||
)
|
||||
# check if existing capacities are larger than technical potential
|
||||
existing_large = n.generators[n.generators["p_nom_min"] > n.generators["p_nom_max"]].index
|
||||
existing_large = n.generators[
|
||||
n.generators["p_nom_min"] > n.generators["p_nom_max"]
|
||||
].index
|
||||
if len(existing_large):
|
||||
logger.warning(f"Existing capacities larger than technical potential for {existing_large},\
|
||||
adjust technical potential to existing capacities")
|
||||
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[existing_large, "p_nom_min"]
|
||||
logger.warning(
|
||||
f"Existing capacities larger than technical potential for {existing_large},\
|
||||
adjust technical potential to existing capacities"
|
||||
)
|
||||
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
|
||||
existing_large, "p_nom_min"
|
||||
]
|
||||
|
||||
def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime):
|
||||
|
||||
def add_heating_capacities_installed_before_baseyear(
|
||||
n,
|
||||
baseyear,
|
||||
grouping_years,
|
||||
ashp_cop,
|
||||
gshp_cop,
|
||||
time_dep_hp_cop,
|
||||
costs,
|
||||
default_lifetime,
|
||||
):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
@ -368,20 +399,20 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
|
||||
|
||||
# retrieve existing heating capacities
|
||||
techs = [
|
||||
'gas boiler',
|
||||
'oil boiler',
|
||||
'resistive heater',
|
||||
'air heat pump',
|
||||
'ground heat pump'
|
||||
"gas boiler",
|
||||
"oil boiler",
|
||||
"resistive heater",
|
||||
"air heat pump",
|
||||
"ground heat pump",
|
||||
]
|
||||
df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0)
|
||||
|
||||
# data for Albania, Montenegro and Macedonia not included in database
|
||||
df.loc['Albania'] = np.nan
|
||||
df.loc['Montenegro'] = np.nan
|
||||
df.loc['Macedonia'] = np.nan
|
||||
df.loc["Albania"] = np.nan
|
||||
df.loc["Montenegro"] = np.nan
|
||||
df.loc["Macedonia"] = np.nan
|
||||
|
||||
df.fillna(0., inplace=True)
|
||||
df.fillna(0.0, inplace=True)
|
||||
|
||||
# convert GW to MW
|
||||
df *= 1e3
|
||||
@ -391,8 +422,8 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
|
||||
df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True)
|
||||
|
||||
# coal and oil boilers are assimilated to oil boilers
|
||||
df['oil boiler'] = df['oil boiler'] + df['coal boiler']
|
||||
df.drop(['coal boiler'], axis=1, inplace=True)
|
||||
df["oil boiler"] = df["oil boiler"] + df["coal boiler"]
|
||||
df.drop(["coal boiler"], axis=1, inplace=True)
|
||||
|
||||
# distribute technologies to nodes by population
|
||||
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
|
||||
@ -403,36 +434,54 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
|
||||
|
||||
# split existing capacities between residential and services
|
||||
# proportional to energy demand
|
||||
ratio_residential=pd.Series([(n.loads_t.p_set.sum()['{} residential rural heat'.format(node)] /
|
||||
(n.loads_t.p_set.sum()['{} residential rural heat'.format(node)] +
|
||||
n.loads_t.p_set.sum()['{} services rural heat'.format(node)] ))
|
||||
for node in nodal_df.index], index=nodal_df.index)
|
||||
ratio_residential = pd.Series(
|
||||
[
|
||||
(
|
||||
n.loads_t.p_set.sum()["{} residential rural heat".format(node)]
|
||||
/ (
|
||||
n.loads_t.p_set.sum()["{} residential rural heat".format(node)]
|
||||
+ n.loads_t.p_set.sum()["{} services rural heat".format(node)]
|
||||
)
|
||||
)
|
||||
for node in nodal_df.index
|
||||
],
|
||||
index=nodal_df.index,
|
||||
)
|
||||
|
||||
for tech in techs:
|
||||
nodal_df['residential ' + tech] = nodal_df[tech] * ratio_residential
|
||||
nodal_df['services ' + tech] = nodal_df[tech] * (1 - ratio_residential)
|
||||
nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential
|
||||
nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential)
|
||||
|
||||
names = [
|
||||
"residential rural",
|
||||
"services rural",
|
||||
"residential urban decentral",
|
||||
"services urban decentral",
|
||||
"urban central"
|
||||
"urban central",
|
||||
]
|
||||
|
||||
nodes = {}
|
||||
p_nom = {}
|
||||
for name in names:
|
||||
|
||||
name_type = "central" if name == "urban central" else "decentral"
|
||||
nodes[name] = pd.Index([n.buses.at[index, "location"] for index in n.buses.index[n.buses.index.str.contains(name) & n.buses.index.str.contains('heat')]])
|
||||
nodes[name] = pd.Index(
|
||||
[
|
||||
n.buses.at[index, "location"]
|
||||
for index in n.buses.index[
|
||||
n.buses.index.str.contains(name)
|
||||
& n.buses.index.str.contains("heat")
|
||||
]
|
||||
]
|
||||
)
|
||||
heat_pump_type = "air" if "urban" in name else "ground"
|
||||
heat_type = "residential" if "residential" in name else "services"
|
||||
|
||||
if name == "urban central":
|
||||
p_nom[name] = nodal_df['air heat pump'][nodes[name]]
|
||||
p_nom[name] = nodal_df["air heat pump"][nodes[name]]
|
||||
else:
|
||||
p_nom[name] = nodal_df[f'{heat_type} {heat_pump_type} heat pump'][nodes[name]]
|
||||
p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][
|
||||
nodes[name]
|
||||
]
|
||||
|
||||
# Add heat pumps
|
||||
costs_name = f"decentral {heat_pump_type}-sourced heat pump"
|
||||
@ -442,104 +491,135 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
|
||||
if time_dep_hp_cop:
|
||||
efficiency = cop[heat_pump_type][nodes[name]]
|
||||
else:
|
||||
efficiency = costs.at[costs_name, 'efficiency']
|
||||
efficiency = costs.at[costs_name, "efficiency"]
|
||||
|
||||
for i, grouping_year in enumerate(grouping_years):
|
||||
|
||||
if int(grouping_year) + default_lifetime <= int(baseyear):
|
||||
continue
|
||||
|
||||
# installation is assumed to be linear for the past 25 years (default lifetime)
|
||||
ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime
|
||||
|
||||
n.madd("Link",
|
||||
n.madd(
|
||||
"Link",
|
||||
nodes[name],
|
||||
suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}",
|
||||
bus0=nodes[name],
|
||||
bus1=nodes[name] + " " + name + " heat",
|
||||
carrier=f"{name} {heat_pump_type} heat pump",
|
||||
efficiency=efficiency,
|
||||
capital_cost=costs.at[costs_name, 'efficiency'] * costs.at[costs_name, 'fixed'],
|
||||
p_nom=p_nom[name] * ratio / costs.at[costs_name, 'efficiency'],
|
||||
capital_cost=costs.at[costs_name, "efficiency"]
|
||||
* costs.at[costs_name, "fixed"],
|
||||
p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"],
|
||||
build_year=int(grouping_year),
|
||||
lifetime=costs.at[costs_name, 'lifetime']
|
||||
lifetime=costs.at[costs_name, "lifetime"],
|
||||
)
|
||||
|
||||
# add resistive heater, gas boilers and oil boilers
|
||||
# (50% capacities to rural buses, 50% to urban buses)
|
||||
n.madd("Link",
|
||||
n.madd(
|
||||
"Link",
|
||||
nodes[name],
|
||||
suffix=f" {name} resistive heater-{grouping_year}",
|
||||
bus0=nodes[name],
|
||||
bus1=nodes[name] + " " + name + " heat",
|
||||
carrier=name + " resistive heater",
|
||||
efficiency=costs.at[name_type + ' resistive heater', 'efficiency'],
|
||||
capital_cost=costs.at[name_type + ' resistive heater', 'efficiency'] * costs.at[name_type + ' resistive heater', 'fixed'],
|
||||
p_nom=0.5 * nodal_df[f'{heat_type} resistive heater'][nodes[name]] * ratio / costs.at[name_type + ' resistive heater', 'efficiency'],
|
||||
efficiency=costs.at[name_type + " resistive heater", "efficiency"],
|
||||
capital_cost=costs.at[name_type + " resistive heater", "efficiency"]
|
||||
* costs.at[name_type + " resistive heater", "fixed"],
|
||||
p_nom=0.5
|
||||
* nodal_df[f"{heat_type} resistive heater"][nodes[name]]
|
||||
* ratio
|
||||
/ costs.at[name_type + " resistive heater", "efficiency"],
|
||||
build_year=int(grouping_year),
|
||||
lifetime=costs.at[costs_name, 'lifetime']
|
||||
lifetime=costs.at[costs_name, "lifetime"],
|
||||
)
|
||||
|
||||
|
||||
n.madd("Link",
|
||||
n.madd(
|
||||
"Link",
|
||||
nodes[name],
|
||||
suffix=f" {name} gas boiler-{grouping_year}",
|
||||
bus0=spatial.gas.nodes,
|
||||
bus1=nodes[name] + " " + name + " heat",
|
||||
bus2="co2 atmosphere",
|
||||
carrier=name + " gas boiler",
|
||||
efficiency=costs.at[name_type + ' gas boiler', 'efficiency'],
|
||||
efficiency2=costs.at['gas', 'CO2 intensity'],
|
||||
capital_cost=costs.at[name_type + ' gas boiler', 'efficiency'] * costs.at[name_type + ' gas boiler', 'fixed'],
|
||||
p_nom=0.5*nodal_df[f'{heat_type} gas boiler'][nodes[name]] * ratio / costs.at[name_type + ' gas boiler', 'efficiency'],
|
||||
efficiency=costs.at[name_type + " gas boiler", "efficiency"],
|
||||
efficiency2=costs.at["gas", "CO2 intensity"],
|
||||
capital_cost=costs.at[name_type + " gas boiler", "efficiency"]
|
||||
* costs.at[name_type + " gas boiler", "fixed"],
|
||||
p_nom=0.5
|
||||
* nodal_df[f"{heat_type} gas boiler"][nodes[name]]
|
||||
* ratio
|
||||
/ costs.at[name_type + " gas boiler", "efficiency"],
|
||||
build_year=int(grouping_year),
|
||||
lifetime=costs.at[name_type + ' gas boiler', 'lifetime']
|
||||
lifetime=costs.at[name_type + " gas boiler", "lifetime"],
|
||||
)
|
||||
|
||||
n.madd("Link",
|
||||
n.madd(
|
||||
"Link",
|
||||
nodes[name],
|
||||
suffix=f" {name} oil boiler-{grouping_year}",
|
||||
bus0=spatial.oil.nodes,
|
||||
bus1=nodes[name] + " " + name + " heat",
|
||||
bus2="co2 atmosphere",
|
||||
carrier=name + " oil boiler",
|
||||
efficiency=costs.at['decentral oil boiler', 'efficiency'],
|
||||
efficiency2=costs.at['oil', 'CO2 intensity'],
|
||||
capital_cost=costs.at['decentral oil boiler', 'efficiency'] * costs.at['decentral oil boiler', 'fixed'],
|
||||
p_nom=0.5 * nodal_df[f'{heat_type} oil boiler'][nodes[name]] * ratio / costs.at['decentral oil boiler', 'efficiency'],
|
||||
efficiency=costs.at["decentral oil boiler", "efficiency"],
|
||||
efficiency2=costs.at["oil", "CO2 intensity"],
|
||||
capital_cost=costs.at["decentral oil boiler", "efficiency"]
|
||||
* costs.at["decentral oil boiler", "fixed"],
|
||||
p_nom=0.5
|
||||
* nodal_df[f"{heat_type} oil boiler"][nodes[name]]
|
||||
* ratio
|
||||
/ costs.at["decentral oil boiler", "efficiency"],
|
||||
build_year=int(grouping_year),
|
||||
lifetime=costs.at[name_type + ' gas boiler', 'lifetime']
|
||||
lifetime=costs.at[name_type + " gas boiler", "lifetime"],
|
||||
)
|
||||
|
||||
# delete links with p_nom=nan corresponding to extra nodes in country
|
||||
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and np.isnan(n.links.p_nom[index])])
|
||||
n.mremove(
|
||||
"Link",
|
||||
[
|
||||
index
|
||||
for index in n.links.index.to_list()
|
||||
if str(grouping_year) in index and np.isnan(n.links.p_nom[index])
|
||||
],
|
||||
)
|
||||
|
||||
# delete links with capacities below threshold
|
||||
threshold = snakemake.config['existing_capacities']['threshold_capacity']
|
||||
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and n.links.p_nom[index] < threshold])
|
||||
threshold = snakemake.config["existing_capacities"]["threshold_capacity"]
|
||||
n.mremove(
|
||||
"Link",
|
||||
[
|
||||
index
|
||||
for index in n.links.index.to_list()
|
||||
if str(grouping_year) in index and n.links.p_nom[index] < threshold
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# %%
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'add_existing_baseyear',
|
||||
simpl='',
|
||||
"add_existing_baseyear",
|
||||
simpl="",
|
||||
clusters="45",
|
||||
lv=1.0,
|
||||
opts='',
|
||||
sector_opts='8760H-T-H-B-I-A-solar+p3-dist1',
|
||||
opts="",
|
||||
sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
|
||||
planning_horizons=2020,
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
|
||||
|
||||
options = snakemake.config["sector"]
|
||||
opts = snakemake.wildcards.sector_opts.split('-')
|
||||
opts = snakemake.wildcards.sector_opts.split("-")
|
||||
|
||||
baseyear = snakemake.config['scenario']["planning_horizons"][0]
|
||||
baseyear = snakemake.config["scenario"]["planning_horizons"][0]
|
||||
|
||||
overrides = override_component_attrs(snakemake.input.overrides)
|
||||
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
|
||||
@ -547,26 +627,46 @@ if __name__ == "__main__":
|
||||
spatial = define_spatial(n.buses[n.buses.carrier == "AC"].index, options)
|
||||
add_build_year_to_new_assets(n, baseyear)
|
||||
|
||||
Nyears = n.snapshot_weightings.generators.sum() / 8760.
|
||||
Nyears = n.snapshot_weightings.generators.sum() / 8760.0
|
||||
costs = prepare_costs(
|
||||
snakemake.input.costs,
|
||||
snakemake.config['costs']['USD2013_to_EUR2013'],
|
||||
snakemake.config['costs']['discountrate'],
|
||||
snakemake.config["costs"]["USD2013_to_EUR2013"],
|
||||
snakemake.config["costs"]["discountrate"],
|
||||
Nyears,
|
||||
snakemake.config['costs']['lifetime']
|
||||
snakemake.config["costs"]["lifetime"],
|
||||
)
|
||||
|
||||
grouping_years_power = snakemake.config['existing_capacities']['grouping_years_power']
|
||||
grouping_years_heat = snakemake.config['existing_capacities']['grouping_years_heat']
|
||||
add_power_capacities_installed_before_baseyear(n, grouping_years_power, costs, baseyear)
|
||||
grouping_years_power = snakemake.config["existing_capacities"][
|
||||
"grouping_years_power"
|
||||
]
|
||||
grouping_years_heat = snakemake.config["existing_capacities"]["grouping_years_heat"]
|
||||
add_power_capacities_installed_before_baseyear(
|
||||
n, grouping_years_power, costs, baseyear
|
||||
)
|
||||
|
||||
if "H" in opts:
|
||||
time_dep_hp_cop = options["time_dep_hp_cop"]
|
||||
ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots)
|
||||
gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots)
|
||||
default_lifetime = snakemake.config['costs']['lifetime']
|
||||
add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years_heat,
|
||||
ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime)
|
||||
ashp_cop = (
|
||||
xr.open_dataarray(snakemake.input.cop_air_total)
|
||||
.to_pandas()
|
||||
.reindex(index=n.snapshots)
|
||||
)
|
||||
gshp_cop = (
|
||||
xr.open_dataarray(snakemake.input.cop_soil_total)
|
||||
.to_pandas()
|
||||
.reindex(index=n.snapshots)
|
||||
)
|
||||
default_lifetime = snakemake.config["costs"]["lifetime"]
|
||||
add_heating_capacities_installed_before_baseyear(
|
||||
n,
|
||||
baseyear,
|
||||
grouping_years_heat,
|
||||
ashp_cop,
|
||||
gshp_cop,
|
||||
time_dep_hp_cop,
|
||||
costs,
|
||||
default_lifetime,
|
||||
)
|
||||
|
||||
if options.get("cluster_heat_buses", False):
|
||||
cluster_heat_buses(n)
|
||||
|
@ -1,4 +1,7 @@
|
||||
"""Build ammonia production."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build ammonia production.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
@ -27,17 +30,20 @@ country_to_alpha2 = {
|
||||
"United Kingdom": "GB",
|
||||
}
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_ammonia_production')
|
||||
|
||||
ammonia = pd.read_excel(snakemake.input.usgs,
|
||||
snakemake = mock_snakemake("build_ammonia_production")
|
||||
|
||||
ammonia = pd.read_excel(
|
||||
snakemake.input.usgs,
|
||||
sheet_name="T12",
|
||||
skiprows=5,
|
||||
header=0,
|
||||
index_col=0,
|
||||
skipfooter=19)
|
||||
skipfooter=19,
|
||||
)
|
||||
|
||||
ammonia.rename(country_to_alpha2, inplace=True)
|
||||
|
||||
|
@ -1,15 +1,15 @@
|
||||
import pandas as pd
|
||||
# -*- coding: utf-8 -*-
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def build_nuts_population_data(year=2013):
|
||||
|
||||
pop = pd.read_csv(
|
||||
snakemake.input.nuts3_population,
|
||||
sep=r'\,| \t|\t',
|
||||
engine='python',
|
||||
sep=r"\,| \t|\t",
|
||||
engine="python",
|
||||
na_values=[":"],
|
||||
index_col=1
|
||||
index_col=1,
|
||||
)[str(year)]
|
||||
|
||||
# only countries
|
||||
@ -18,10 +18,12 @@ def build_nuts_population_data(year=2013):
|
||||
# mapping from Cantons to NUTS3
|
||||
cantons = pd.read_csv(snakemake.input.swiss_cantons)
|
||||
cantons = cantons.set_index(cantons.HASC.str[3:]).NUTS
|
||||
cantons = cantons.str.pad(5, side='right', fillchar='0')
|
||||
cantons = cantons.str.pad(5, side="right", fillchar="0")
|
||||
|
||||
# get population by NUTS3
|
||||
swiss = pd.read_excel(snakemake.input.swiss_population, skiprows=3, index_col=0).loc["Residents in 1000"]
|
||||
swiss = pd.read_excel(
|
||||
snakemake.input.swiss_population, skiprows=3, index_col=0
|
||||
).loc["Residents in 1000"]
|
||||
swiss = swiss.rename(cantons).filter(like="CH")
|
||||
|
||||
# aggregate also to higher order NUTS levels
|
||||
@ -64,13 +66,13 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"):
|
||||
sheet_name="Glossary",
|
||||
usecols="B:D",
|
||||
skiprows=1,
|
||||
index_col=0
|
||||
index_col=0,
|
||||
)
|
||||
|
||||
df = pd.read_excel(
|
||||
str(snakemake.input.enspreso_biomass),
|
||||
sheet_name="ENER - NUTS2 BioCom E",
|
||||
usecols="A:H"
|
||||
usecols="A:H",
|
||||
)
|
||||
|
||||
df["group"] = df["E-Comm"].map(glossary.group)
|
||||
@ -83,7 +85,7 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"):
|
||||
df.rename(columns=to_rename, inplace=True)
|
||||
|
||||
# fill up with NUTS0 if NUTS2 is not given
|
||||
df.NUTS2 = df.apply(lambda x: x.NUTS0 if x.NUTS2 == '-' else x.NUTS2, axis=1)
|
||||
df.NUTS2 = df.apply(lambda x: x.NUTS0 if x.NUTS2 == "-" else x.NUTS2, axis=1)
|
||||
|
||||
# convert PJ to TWh
|
||||
df.potential /= 3.6
|
||||
@ -102,9 +104,8 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"):
|
||||
|
||||
def disaggregate_nuts0(bio):
|
||||
"""
|
||||
Some commodities are only given on NUTS0 level.
|
||||
These are disaggregated here using the NUTS2
|
||||
population as distribution key.
|
||||
Some commodities are only given on NUTS0 level. These are disaggregated
|
||||
here using the NUTS2 population as distribution key.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@ -141,9 +142,11 @@ def build_nuts2_shapes():
|
||||
- consistently name ME, MK
|
||||
"""
|
||||
|
||||
nuts2 = gpd.GeoDataFrame(gpd.read_file(snakemake.input.nuts2).set_index('id').geometry)
|
||||
nuts2 = gpd.GeoDataFrame(
|
||||
gpd.read_file(snakemake.input.nuts2).set_index("id").geometry
|
||||
)
|
||||
|
||||
countries = gpd.read_file(snakemake.input.country_shapes).set_index('name')
|
||||
countries = gpd.read_file(snakemake.input.country_shapes).set_index("name")
|
||||
missing_iso2 = countries.index.intersection(["AL", "RS", "BA"])
|
||||
missing = countries.loc[missing_iso2]
|
||||
|
||||
@ -153,14 +156,16 @@ def build_nuts2_shapes():
|
||||
|
||||
|
||||
def area(gdf):
|
||||
"""Returns area of GeoDataFrame geometries in square kilometers."""
|
||||
"""
|
||||
Returns area of GeoDataFrame geometries in square kilometers.
|
||||
"""
|
||||
return gdf.to_crs(epsg=3035).area.div(1e6)
|
||||
|
||||
|
||||
def convert_nuts2_to_regions(bio_nuts2, regions):
|
||||
"""
|
||||
Converts biomass potentials given in NUTS2 to PyPSA-Eur regions based on the
|
||||
overlay of both GeoDataFrames in proportion to the area.
|
||||
Converts biomass potentials given in NUTS2 to PyPSA-Eur regions based on
|
||||
the overlay of both GeoDataFrames in proportion to the area.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@ -183,7 +188,9 @@ def convert_nuts2_to_regions(bio_nuts2, regions):
|
||||
overlay["share"] = area(overlay) / overlay["area_nuts2"]
|
||||
|
||||
# multiply all nuts2-level values with share of nuts2 inside region
|
||||
adjust_cols = overlay.columns.difference({"name", "area_nuts2", "geometry", "share"})
|
||||
adjust_cols = overlay.columns.difference(
|
||||
{"name", "area_nuts2", "geometry", "share"}
|
||||
)
|
||||
overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0)
|
||||
|
||||
bio_regions = overlay.groupby("name").sum()
|
||||
@ -194,11 +201,12 @@ def convert_nuts2_to_regions(bio_nuts2, regions):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_biomass_potentials', simpl='', clusters='5')
|
||||
|
||||
config = snakemake.config['biomass']
|
||||
snakemake = mock_snakemake("build_biomass_potentials", simpl="", clusters="5")
|
||||
|
||||
config = snakemake.config["biomass"]
|
||||
year = config["year"]
|
||||
scenario = config["scenario"]
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Reads biomass transport costs for different countries of the JRC report
|
||||
Reads biomass transport costs for different countries of the JRC report.
|
||||
|
||||
"The JRC-EU-TIMES model.
|
||||
Bioenergy potentials
|
||||
@ -18,29 +19,24 @@ import tabula as tbl
|
||||
|
||||
ENERGY_CONTENT = 4.8 # unit MWh/t (wood pellets)
|
||||
|
||||
def get_countries():
|
||||
|
||||
pandas_options = dict(
|
||||
skiprows=range(6),
|
||||
header=None,
|
||||
index_col=0
|
||||
)
|
||||
def get_countries():
|
||||
pandas_options = dict(skiprows=range(6), header=None, index_col=0)
|
||||
|
||||
return tbl.read_pdf(
|
||||
str(snakemake.input.transport_cost_data),
|
||||
pages="145",
|
||||
multiple_tables=False,
|
||||
pandas_options=pandas_options
|
||||
pandas_options=pandas_options,
|
||||
)[0].index
|
||||
|
||||
|
||||
def get_cost_per_tkm(page, countries):
|
||||
|
||||
pandas_options = dict(
|
||||
skiprows=range(6),
|
||||
header=0,
|
||||
sep=' |,',
|
||||
engine='python',
|
||||
sep=" |,",
|
||||
engine="python",
|
||||
index_col=False,
|
||||
)
|
||||
|
||||
@ -48,7 +44,7 @@ def get_cost_per_tkm(page, countries):
|
||||
str(snakemake.input.transport_cost_data),
|
||||
pages=page,
|
||||
multiple_tables=False,
|
||||
pandas_options=pandas_options
|
||||
pandas_options=pandas_options,
|
||||
)[0]
|
||||
sc.index = countries
|
||||
sc.columns = sc.columns.str.replace("€", "EUR")
|
||||
@ -57,7 +53,6 @@ def get_cost_per_tkm(page, countries):
|
||||
|
||||
|
||||
def build_biomass_transport_costs():
|
||||
|
||||
countries = get_countries()
|
||||
|
||||
sc1 = get_cost_per_tkm(146, countries)
|
||||
@ -72,11 +67,7 @@ def build_biomass_transport_costs():
|
||||
transport_costs.name = "EUR/km/MWh"
|
||||
|
||||
# rename country names
|
||||
to_rename = {
|
||||
"UK": "GB",
|
||||
"XK": "KO",
|
||||
"EL": "GR"
|
||||
}
|
||||
to_rename = {"UK": "GB", "XK": "KO", "EL": "GR"}
|
||||
transport_costs.rename(to_rename, inplace=True)
|
||||
|
||||
# add missing Norway with data from Sweden
|
||||
@ -86,5 +77,4 @@ def build_biomass_transport_costs():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
build_biomass_transport_costs()
|
||||
|
@ -1,31 +1,38 @@
|
||||
"""Build clustered population layouts."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build clustered population layouts.
|
||||
"""
|
||||
|
||||
import geopandas as gpd
|
||||
import xarray as xr
|
||||
import pandas as pd
|
||||
import atlite
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
import xarray as xr
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_clustered_population_layouts',
|
||||
simpl='',
|
||||
"build_clustered_population_layouts",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
cutout = atlite.Cutout(snakemake.config['atlite']['cutout'])
|
||||
cutout = atlite.Cutout(snakemake.config["atlite"]["cutout"])
|
||||
|
||||
clustered_regions = gpd.read_file(
|
||||
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()
|
||||
clustered_regions = (
|
||||
gpd.read_file(snakemake.input.regions_onshore)
|
||||
.set_index("name")
|
||||
.buffer(0)
|
||||
.squeeze()
|
||||
)
|
||||
|
||||
I = cutout.indicatormatrix(clustered_regions)
|
||||
|
||||
pop = {}
|
||||
for item in ["total", "urban", "rural"]:
|
||||
pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{item}'])
|
||||
pop[item] = I.dot(pop_layout.stack(spatial=('y', 'x')))
|
||||
pop_layout = xr.open_dataarray(snakemake.input[f"pop_layout_{item}"])
|
||||
pop[item] = I.dot(pop_layout.stack(spatial=("y", "x")))
|
||||
|
||||
pop = pd.DataFrame(pop, index=clustered_regions.index)
|
||||
|
||||
|
@ -1,39 +1,41 @@
|
||||
"""Build COP time series for air- or ground-sourced heat pumps."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build COP time series for air- or ground-sourced heat pumps.
|
||||
"""
|
||||
|
||||
import xarray as xr
|
||||
|
||||
|
||||
def coefficient_of_performance(delta_T, source='air'):
|
||||
def coefficient_of_performance(delta_T, source="air"):
|
||||
"""
|
||||
COP is function of temp difference source to sink.
|
||||
|
||||
The quadratic regression is based on Staffell et al. (2012)
|
||||
https://doi.org/10.1039/C2EE22653G.
|
||||
"""
|
||||
if source == 'air':
|
||||
if source == "air":
|
||||
return 6.81 - 0.121 * delta_T + 0.000630 * delta_T**2
|
||||
elif source == 'soil':
|
||||
elif source == "soil":
|
||||
return 8.77 - 0.150 * delta_T + 0.000734 * delta_T**2
|
||||
else:
|
||||
raise NotImplementedError("'source' must be one of ['air', 'soil']")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_cop_profiles',
|
||||
simpl='',
|
||||
"build_cop_profiles",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
for area in ["total", "urban", "rural"]:
|
||||
|
||||
for source in ["air", "soil"]:
|
||||
source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"])
|
||||
|
||||
source_T = xr.open_dataarray(
|
||||
snakemake.input[f"temp_{source}_{area}"])
|
||||
|
||||
delta_T = snakemake.config['sector']['heat_pump_sink_T'] - source_T
|
||||
delta_T = snakemake.config["sector"]["heat_pump_sink_T"] - source_T
|
||||
|
||||
cop = coefficient_of_performance(delta_T, source)
|
||||
|
||||
|
@ -1,25 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from functools import partial
|
||||
from tqdm import tqdm
|
||||
from helper import mute_print
|
||||
|
||||
import multiprocessing as mp
|
||||
import pandas as pd
|
||||
from functools import partial
|
||||
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from helper import mute_print
|
||||
from tqdm import tqdm
|
||||
|
||||
idx = pd.IndexSlice
|
||||
|
||||
|
||||
def cartesian(s1, s2):
|
||||
"""Cartesian product of two pd.Series"""
|
||||
"""
|
||||
Cartesian product of two pd.Series.
|
||||
"""
|
||||
return pd.DataFrame(np.outer(s1, s2), index=s1.index, columns=s2.index)
|
||||
|
||||
|
||||
def reverse(dictionary):
|
||||
"""reverses a keys and values of a dictionary"""
|
||||
"""
|
||||
Reverses a keys and values of a dictionary.
|
||||
"""
|
||||
return {v: k for k, v in dictionary.items()}
|
||||
|
||||
|
||||
@ -122,7 +128,7 @@ to_ipcc = {
|
||||
"total energy": "1 - Energy",
|
||||
"industrial processes": "2 - Industrial Processes and Product Use",
|
||||
"agriculture": "3 - Agriculture",
|
||||
"agriculture, forestry and fishing": '1.A.4.c - Agriculture/Forestry/Fishing',
|
||||
"agriculture, forestry and fishing": "1.A.4.c - Agriculture/Forestry/Fishing",
|
||||
"LULUCF": "4 - Land Use, Land-Use Change and Forestry",
|
||||
"waste management": "5 - Waste management",
|
||||
"other": "6 - Other Sector",
|
||||
@ -131,12 +137,15 @@ to_ipcc = {
|
||||
"total woL": "Total (without LULUCF)",
|
||||
}
|
||||
|
||||
|
||||
def build_eurostat(input_eurostat, countries, report_year, year):
|
||||
"""Return multi-index for all countries' energy data in TWh/a."""
|
||||
"""
|
||||
Return multi-index for all countries' energy data in TWh/a.
|
||||
"""
|
||||
|
||||
filenames = {
|
||||
2016: f"/{year}-Energy-Balances-June2016edition.xlsx",
|
||||
2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx"
|
||||
2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx",
|
||||
}
|
||||
|
||||
with mute_print():
|
||||
@ -149,9 +158,11 @@ def build_eurostat(input_eurostat, countries, report_year, year):
|
||||
|
||||
# sorted_index necessary for slicing
|
||||
lookup = eurostat_country_to_alpha2
|
||||
labelled_dfs = {lookup[df.columns[0]]: df
|
||||
labelled_dfs = {
|
||||
lookup[df.columns[0]]: df
|
||||
for df in dfs.values()
|
||||
if lookup[df.columns[0]] in countries}
|
||||
if lookup[df.columns[0]] in countries
|
||||
}
|
||||
df = pd.concat(labelled_dfs, sort=True).sort_index()
|
||||
|
||||
# drop non-numeric and country columns
|
||||
@ -167,7 +178,9 @@ def build_eurostat(input_eurostat, countries, report_year, year):
|
||||
|
||||
|
||||
def build_swiss(year):
|
||||
"""Return a pd.Series of Swiss energy data in TWh/a"""
|
||||
"""
|
||||
Return a pd.Series of Swiss energy data in TWh/a.
|
||||
"""
|
||||
|
||||
fn = snakemake.input.swiss
|
||||
|
||||
@ -180,7 +193,6 @@ def build_swiss(year):
|
||||
|
||||
|
||||
def idees_per_country(ct, year):
|
||||
|
||||
base_dir = snakemake.input.idees
|
||||
|
||||
ct_totals = {}
|
||||
@ -220,7 +232,7 @@ def idees_per_country(ct, year):
|
||||
assert df.index[46] == "Derived heat"
|
||||
ct_totals["derived heat residential"] = df[46]
|
||||
|
||||
assert df.index[50] == 'Thermal uses'
|
||||
assert df.index[50] == "Thermal uses"
|
||||
ct_totals["thermal uses residential"] = df[50]
|
||||
|
||||
# services
|
||||
@ -253,10 +265,9 @@ def idees_per_country(ct, year):
|
||||
assert df.index[49] == "Derived heat"
|
||||
ct_totals["derived heat services"] = df[49]
|
||||
|
||||
assert df.index[53] == 'Thermal uses'
|
||||
assert df.index[53] == "Thermal uses"
|
||||
ct_totals["thermal uses services"] = df[53]
|
||||
|
||||
|
||||
# agriculture, forestry and fishing
|
||||
|
||||
start = "Detailed split of energy consumption (ktoe)"
|
||||
@ -268,7 +279,7 @@ def idees_per_country(ct, year):
|
||||
"Lighting",
|
||||
"Ventilation",
|
||||
"Specific electricity uses",
|
||||
"Pumping devices (electric)"
|
||||
"Pumping devices (electric)",
|
||||
]
|
||||
ct_totals["total agriculture electricity"] = df[rows].sum()
|
||||
|
||||
@ -360,11 +371,15 @@ def idees_per_country(ct, year):
|
||||
assert df.index[12] == "International - Extra-EU"
|
||||
ct_totals["total international aviation freight"] = df[12]
|
||||
|
||||
ct_totals["total domestic aviation"] = ct_totals["total domestic aviation freight"] \
|
||||
ct_totals["total domestic aviation"] = (
|
||||
ct_totals["total domestic aviation freight"]
|
||||
+ ct_totals["total domestic aviation passenger"]
|
||||
)
|
||||
|
||||
ct_totals["total international aviation"] = ct_totals["total international aviation freight"] \
|
||||
ct_totals["total international aviation"] = (
|
||||
ct_totals["total international aviation freight"]
|
||||
+ ct_totals["total international aviation passenger"]
|
||||
)
|
||||
|
||||
df = pd.read_excel(fn_transport, "TrNavi_ene", index_col=0)[year]
|
||||
|
||||
@ -380,17 +395,19 @@ def idees_per_country(ct, year):
|
||||
|
||||
|
||||
def build_idees(countries, year):
|
||||
|
||||
nprocesses = snakemake.threads
|
||||
|
||||
func = partial(idees_per_country, year=year)
|
||||
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries),
|
||||
desc='Build from IDEES database')
|
||||
tqdm_kwargs = dict(
|
||||
ascii=False,
|
||||
unit=" country",
|
||||
total=len(countries),
|
||||
desc="Build from IDEES database",
|
||||
)
|
||||
with mute_print():
|
||||
with mp.Pool(processes=nprocesses) as pool:
|
||||
totals_list = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
|
||||
|
||||
|
||||
totals = pd.concat(totals_list, axis=1)
|
||||
|
||||
# convert ktoe to TWh
|
||||
@ -401,19 +418,17 @@ def build_idees(countries, year):
|
||||
totals.loc["passenger car efficiency"] *= 10
|
||||
|
||||
# district heating share
|
||||
district_heat = totals.loc[["derived heat residential",
|
||||
"derived heat services"]].sum()
|
||||
total_heat = totals.loc[["thermal uses residential",
|
||||
"thermal uses services"]].sum()
|
||||
district_heat = totals.loc[
|
||||
["derived heat residential", "derived heat services"]
|
||||
].sum()
|
||||
total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum()
|
||||
totals.loc["district heat share"] = district_heat.div(total_heat)
|
||||
|
||||
return totals.T
|
||||
|
||||
|
||||
def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
|
||||
eurostat_fuels = {"electricity": "Electricity",
|
||||
"total": "Total all products"}
|
||||
eurostat_fuels = {"electricity": "Electricity", "total": "Total all products"}
|
||||
|
||||
to_drop = ["passenger cars", "passenger car efficiency"]
|
||||
df = idees.reindex(countries).drop(to_drop, axis=1)
|
||||
@ -439,36 +454,47 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
uses = ["space", "cooking", "water"]
|
||||
|
||||
for sector in ["residential", "services", "road", "rail"]:
|
||||
|
||||
eurostat_sector = sector.capitalize()
|
||||
|
||||
# fuel use
|
||||
|
||||
for fuel in ["electricity", "total"]:
|
||||
slicer = idx[to_fill, :, :, eurostat_sector]
|
||||
fill_values = eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=0).sum()
|
||||
fill_values = (
|
||||
eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=0).sum()
|
||||
)
|
||||
df.loc[to_fill, f"{fuel} {sector}"] = fill_values
|
||||
|
||||
for sector in ["residential", "services"]:
|
||||
|
||||
# electric use
|
||||
|
||||
for use in uses:
|
||||
fuel_use = df[f"electricity {sector} {use}"]
|
||||
fuel = df[f"electricity {sector}"]
|
||||
avg = fuel_use.div(fuel).mean()
|
||||
logger.debug(f"{sector}: average fraction of electricity for {use} is {avg:.3f}")
|
||||
df.loc[to_fill, f"electricity {sector} {use}"] = avg * df.loc[to_fill, f"electricity {sector}"]
|
||||
logger.debug(
|
||||
f"{sector}: average fraction of electricity for {use} is {avg:.3f}"
|
||||
)
|
||||
df.loc[to_fill, f"electricity {sector} {use}"] = (
|
||||
avg * df.loc[to_fill, f"electricity {sector}"]
|
||||
)
|
||||
|
||||
# non-electric use
|
||||
|
||||
for use in uses:
|
||||
nonelectric_use = df[f"total {sector} {use}"] - df[f"electricity {sector} {use}"]
|
||||
nonelectric_use = (
|
||||
df[f"total {sector} {use}"] - df[f"electricity {sector} {use}"]
|
||||
)
|
||||
nonelectric = df[f"total {sector}"] - df[f"electricity {sector}"]
|
||||
avg = nonelectric_use.div(nonelectric).mean()
|
||||
logger.debug(f"{sector}: average fraction of non-electric for {use} is {avg:.3f}")
|
||||
logger.debug(
|
||||
f"{sector}: average fraction of non-electric for {use} is {avg:.3f}"
|
||||
)
|
||||
electric_use = df.loc[to_fill, f"electricity {sector} {use}"]
|
||||
nonelectric = df.loc[to_fill, f"total {sector}"] - df.loc[to_fill, f"electricity {sector}"]
|
||||
nonelectric = (
|
||||
df.loc[to_fill, f"total {sector}"]
|
||||
- df.loc[to_fill, f"electricity {sector}"]
|
||||
)
|
||||
df.loc[to_fill, f"total {sector} {use}"] = electric_use + avg * nonelectric
|
||||
|
||||
# Fix Norway space and water heating fractions
|
||||
@ -480,17 +506,25 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
no_norway = df.drop("NO")
|
||||
|
||||
for sector in ["residential", "services"]:
|
||||
|
||||
# assume non-electric is heating
|
||||
nonelectric = df.loc["NO", f"total {sector}"] - df.loc["NO", f"electricity {sector}"]
|
||||
nonelectric = (
|
||||
df.loc["NO", f"total {sector}"] - df.loc["NO", f"electricity {sector}"]
|
||||
)
|
||||
total_heating = nonelectric / (1 - elec_fraction)
|
||||
|
||||
for use in uses:
|
||||
nonelectric_use = no_norway[f"total {sector} {use}"] - no_norway[f"electricity {sector} {use}"]
|
||||
nonelectric = no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"]
|
||||
nonelectric_use = (
|
||||
no_norway[f"total {sector} {use}"]
|
||||
- no_norway[f"electricity {sector} {use}"]
|
||||
)
|
||||
nonelectric = (
|
||||
no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"]
|
||||
)
|
||||
fraction = nonelectric_use.div(nonelectric).mean()
|
||||
df.loc["NO", f"total {sector} {use}"] = total_heating * fraction
|
||||
df.loc["NO", f"electricity {sector} {use}"] = total_heating * fraction * elec_fraction
|
||||
df.loc["NO", f"electricity {sector} {use}"] = (
|
||||
total_heating * fraction * elec_fraction
|
||||
)
|
||||
|
||||
# Missing aviation
|
||||
|
||||
@ -517,10 +551,7 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
f"{fuel} light duty road freight",
|
||||
]
|
||||
if fuel == "total":
|
||||
selection.extend([
|
||||
f"{fuel} two-wheel",
|
||||
f"{fuel} heavy duty road freight"
|
||||
])
|
||||
selection.extend([f"{fuel} two-wheel", f"{fuel} heavy duty road freight"])
|
||||
road = df[selection].sum()
|
||||
road_fraction = road / road.sum()
|
||||
fill_values = cartesian(df.loc[missing, f"{fuel} road"], road_fraction)
|
||||
@ -544,33 +575,40 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
]
|
||||
aviation = df[selection].sum()
|
||||
aviation_fraction = aviation / aviation.sum()
|
||||
fill_values = cartesian(df.loc[missing, f"total {destination} aviation"], aviation_fraction)
|
||||
fill_values = cartesian(
|
||||
df.loc[missing, f"total {destination} aviation"], aviation_fraction
|
||||
)
|
||||
df.loc[missing, aviation_fraction.index] = fill_values
|
||||
|
||||
for purpose in ["passenger", "freight"]:
|
||||
attrs = [f"total domestic aviation {purpose}", f"total international aviation {purpose}"]
|
||||
df.loc[missing, f"total aviation {purpose}"] = df.loc[missing, attrs].sum(axis=1)
|
||||
attrs = [
|
||||
f"total domestic aviation {purpose}",
|
||||
f"total international aviation {purpose}",
|
||||
]
|
||||
df.loc[missing, f"total aviation {purpose}"] = df.loc[missing, attrs].sum(
|
||||
axis=1
|
||||
)
|
||||
|
||||
if "BA" in df.index:
|
||||
# fill missing data for BA (services and road energy data)
|
||||
# proportional to RS with ratio of total residential demand
|
||||
missing = df.loc["BA"] == 0.0
|
||||
ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"]
|
||||
df.loc['BA', missing] = ratio * df.loc["RS", missing]
|
||||
df.loc["BA", missing] = ratio * df.loc["RS", missing]
|
||||
|
||||
# Missing district heating share
|
||||
dh_share = pd.read_csv(snakemake.input.district_heat_share,
|
||||
index_col=0, usecols=[0, 1])
|
||||
dh_share = pd.read_csv(
|
||||
snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]
|
||||
)
|
||||
# make conservative assumption and take minimum from both data sets
|
||||
df["district heat share"] = (pd.concat([df["district heat share"],
|
||||
dh_share.reindex(index=df.index)/100],
|
||||
axis=1).min(axis=1))
|
||||
df["district heat share"] = pd.concat(
|
||||
[df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1
|
||||
).min(axis=1)
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
|
||||
|
||||
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
|
||||
# downloaded 201228 (modified by EEA last on 201221)
|
||||
df = pd.read_csv(input_co2, encoding="latin-1", low_memory=False)
|
||||
@ -604,13 +642,20 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
|
||||
"international aviation",
|
||||
"domestic navigation",
|
||||
"international navigation",
|
||||
"agriculture, forestry and fishing"
|
||||
"agriculture, forestry and fishing",
|
||||
]
|
||||
emissions["industrial non-elec"] = emissions["total energy"] - emissions[to_subtract].sum(axis=1)
|
||||
emissions["industrial non-elec"] = emissions["total energy"] - emissions[
|
||||
to_subtract
|
||||
].sum(axis=1)
|
||||
|
||||
emissions["agriculture"] += emissions["agriculture, forestry and fishing"]
|
||||
|
||||
to_drop = ["total energy", "total wL", "total woL", "agriculture, forestry and fishing"]
|
||||
to_drop = [
|
||||
"total energy",
|
||||
"total wL",
|
||||
"total woL",
|
||||
"agriculture, forestry and fishing",
|
||||
]
|
||||
emissions.drop(columns=to_drop, inplace=True)
|
||||
|
||||
# convert from Gg to Mt
|
||||
@ -618,7 +663,6 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
|
||||
|
||||
|
||||
def build_eurostat_co2(input_eurostat, countries, report_year, year=1990):
|
||||
|
||||
eurostat = build_eurostat(input_eurostat, countries, report_year, year)
|
||||
|
||||
specific_emissions = pd.Series(index=eurostat.columns, dtype=float)
|
||||
@ -637,13 +681,16 @@ def build_eurostat_co2(input_eurostat, countries, report_year, year=1990):
|
||||
|
||||
|
||||
def build_co2_totals(countries, eea_co2, eurostat_co2):
|
||||
|
||||
co2 = eea_co2.reindex(countries)
|
||||
|
||||
for ct in countries.intersection(["BA", "RS", "AL", "ME", "MK"]):
|
||||
|
||||
mappings = {
|
||||
"electricity": (ct, "+", "Conventional Thermal Power Stations", "of which From Coal"),
|
||||
"electricity": (
|
||||
ct,
|
||||
"+",
|
||||
"Conventional Thermal Power Stations",
|
||||
"of which From Coal",
|
||||
),
|
||||
"residential non-elec": (ct, "+", "+", "Residential"),
|
||||
"services non-elec": (ct, "+", "+", "Services"),
|
||||
"road non-elec": (ct, "+", "+", "Road"),
|
||||
@ -655,7 +702,8 @@ def build_co2_totals(countries, eea_co2, eurostat_co2):
|
||||
# does not include industrial process emissions or fuel processing/refining
|
||||
"industrial non-elec": (ct, "+", "Industry"),
|
||||
# does not include non-energy emissions
|
||||
"agriculture": (eurostat_co2.index.get_level_values(0) == ct) & eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3),
|
||||
"agriculture": (eurostat_co2.index.get_level_values(0) == ct)
|
||||
& eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3),
|
||||
}
|
||||
|
||||
for i, mi in mappings.items():
|
||||
@ -665,7 +713,6 @@ def build_co2_totals(countries, eea_co2, eurostat_co2):
|
||||
|
||||
|
||||
def build_transport_data(countries, population, idees):
|
||||
|
||||
transport_data = pd.DataFrame(index=countries)
|
||||
|
||||
# collect number of cars
|
||||
@ -676,7 +723,9 @@ def build_transport_data(countries, population, idees):
|
||||
transport_data.at["CH", "number cars"] = 4.136e6
|
||||
|
||||
missing = transport_data.index[transport_data["number cars"].isna()]
|
||||
logger.info(f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data.")
|
||||
logger.info(
|
||||
f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data."
|
||||
)
|
||||
|
||||
cars_pp = transport_data["number cars"] / population
|
||||
transport_data.loc[missing, "number cars"] = cars_pp.mean() * population
|
||||
@ -686,7 +735,9 @@ def build_transport_data(countries, population, idees):
|
||||
transport_data["average fuel efficiency"] = idees["passenger car efficiency"]
|
||||
|
||||
missing = transport_data.index[transport_data["average fuel efficiency"].isna()]
|
||||
logger.info(f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data.")
|
||||
logger.info(
|
||||
f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data."
|
||||
)
|
||||
|
||||
fill_values = transport_data["average fuel efficiency"].mean()
|
||||
transport_data.loc[missing, "average fuel efficiency"] = fill_values
|
||||
@ -695,11 +746,12 @@ def build_transport_data(countries, population, idees):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_energy_totals')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
snakemake = mock_snakemake("build_energy_totals")
|
||||
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
config = snakemake.config["energy"]
|
||||
|
||||
@ -722,7 +774,9 @@ if __name__ == "__main__":
|
||||
base_year_emissions = config["base_emissions_year"]
|
||||
emissions_scope = snakemake.config["energy"]["emissions"]
|
||||
eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope)
|
||||
eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, base_year_emissions)
|
||||
eurostat_co2 = build_eurostat_co2(
|
||||
input_eurostat, countries, report_year, base_year_emissions
|
||||
)
|
||||
|
||||
co2 = build_co2_totals(countries, eea_co2, eurostat_co2)
|
||||
co2.to_csv(snakemake.output.co2_name)
|
||||
|
@ -1,15 +1,17 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build import locations for fossil gas from entry-points, LNG terminals and production sites.
|
||||
Build import locations for fossil gas from entry-points, LNG terminals and
|
||||
production sites.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
from shapely import wkt
|
||||
|
||||
import pandas as pd
|
||||
from cluster_gas_network import load_bus_regions
|
||||
from shapely import wkt
|
||||
|
||||
|
||||
def read_scigrid_gas(fn):
|
||||
@ -20,24 +22,25 @@ def read_scigrid_gas(fn):
|
||||
|
||||
|
||||
def build_gem_lng_data(lng_fn):
|
||||
df = pd.read_excel(lng_fn[0], sheet_name='LNG terminals - data')
|
||||
df = pd.read_excel(lng_fn[0], sheet_name="LNG terminals - data")
|
||||
df = df.set_index("ComboID")
|
||||
|
||||
remove_status = ['Cancelled']
|
||||
remove_country = ['Cyprus','Turkey']
|
||||
remove_terminal = ['Puerto de la Luz LNG Terminal', 'Gran Canaria LNG Terminal']
|
||||
remove_status = ["Cancelled"]
|
||||
remove_country = ["Cyprus", "Turkey"]
|
||||
remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"]
|
||||
|
||||
df = df.query("Status != 'Cancelled' \
|
||||
df = df.query(
|
||||
"Status != 'Cancelled' \
|
||||
& Country != @remove_country \
|
||||
& TerminalName != @remove_terminal \
|
||||
& CapacityInMtpa != '--'")
|
||||
& CapacityInMtpa != '--'"
|
||||
)
|
||||
|
||||
geometry = gpd.points_from_xy(df['Longitude'], df['Latitude'])
|
||||
geometry = gpd.points_from_xy(df["Longitude"], df["Latitude"])
|
||||
return gpd.GeoDataFrame(df, geometry=geometry, crs="EPSG:4326")
|
||||
|
||||
|
||||
def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries):
|
||||
|
||||
# LNG terminals
|
||||
lng = build_gem_lng_data(lng_fn)
|
||||
|
||||
@ -45,17 +48,15 @@ def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries):
|
||||
entry = read_scigrid_gas(entry_fn)
|
||||
entry["from_country"] = entry.from_country.str.rstrip()
|
||||
entry = entry.loc[
|
||||
~(entry.from_country.isin(countries) & entry.to_country.isin(countries)) & # only take non-EU entries
|
||||
~entry.name.str.contains("Tegelen") | # malformed datapoint
|
||||
(entry.from_country == "NO") # entries from NO to GB
|
||||
~(entry.from_country.isin(countries) & entry.to_country.isin(countries))
|
||||
& ~entry.name.str.contains("Tegelen") # only take non-EU entries
|
||||
| (entry.from_country == "NO") # malformed datapoint # entries from NO to GB
|
||||
]
|
||||
|
||||
# production sites inside the model scope
|
||||
prod = read_scigrid_gas(prod_fn)
|
||||
prod = prod.loc[
|
||||
(prod.geometry.y > 35) &
|
||||
(prod.geometry.x < 30) &
|
||||
(prod.country_code != "DE")
|
||||
(prod.geometry.y > 35) & (prod.geometry.x < 30) & (prod.country_code != "DE")
|
||||
]
|
||||
|
||||
mcm_per_day_to_mw = 437.5 # MCM/day to MWh/h
|
||||
@ -74,28 +75,29 @@ def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_gas_input_locations',
|
||||
simpl='',
|
||||
clusters='37',
|
||||
"build_gas_input_locations",
|
||||
simpl="",
|
||||
clusters="37",
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
regions = load_bus_regions(
|
||||
snakemake.input.regions_onshore,
|
||||
snakemake.input.regions_offshore
|
||||
snakemake.input.regions_onshore, snakemake.input.regions_offshore
|
||||
)
|
||||
|
||||
# add a buffer to eastern countries because some
|
||||
# entry points are still in Russian or Ukrainian territory.
|
||||
buffer = 9000 # meters
|
||||
eastern_countries = ['FI', 'EE', 'LT', 'LV', 'PL', 'SK', 'HU', 'RO']
|
||||
eastern_countries = ["FI", "EE", "LT", "LV", "PL", "SK", "HU", "RO"]
|
||||
add_buffer_b = regions.index.str[:2].isin(eastern_countries)
|
||||
regions.loc[add_buffer_b] = regions[add_buffer_b].to_crs(3035).buffer(buffer).to_crs(4326)
|
||||
regions.loc[add_buffer_b] = (
|
||||
regions[add_buffer_b].to_crs(3035).buffer(buffer).to_crs(4326)
|
||||
)
|
||||
|
||||
countries = regions.index.str[:2].unique().str.replace("GB", "UK")
|
||||
|
||||
@ -103,16 +105,18 @@ if __name__ == "__main__":
|
||||
snakemake.input.lng,
|
||||
snakemake.input.entry,
|
||||
snakemake.input.production,
|
||||
countries
|
||||
countries,
|
||||
)
|
||||
|
||||
gas_input_nodes = gpd.sjoin(gas_input_locations, regions, how='left')
|
||||
gas_input_nodes = gpd.sjoin(gas_input_locations, regions, how="left")
|
||||
|
||||
gas_input_nodes.rename(columns={"index_right": "bus"}, inplace=True)
|
||||
|
||||
gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver='GeoJSON')
|
||||
gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver="GeoJSON")
|
||||
|
||||
gas_input_nodes_s = gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack()
|
||||
gas_input_nodes_s = (
|
||||
gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack()
|
||||
)
|
||||
gas_input_nodes_s.columns.name = "p_nom"
|
||||
|
||||
gas_input_nodes_s.to_csv(snakemake.output.gas_input_nodes_simplified)
|
@ -1,16 +1,22 @@
|
||||
"""Preprocess gas network based on data from bthe SciGRID Gas project (https://www.gas.scigrid.de/)."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Preprocess gas network based on data from bthe SciGRID Gas project
|
||||
(https://www.gas.scigrid.de/).
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
from shapely.geometry import Point
|
||||
import pandas as pd
|
||||
from pypsa.geo import haversine_pts
|
||||
from shapely.geometry import Point
|
||||
|
||||
|
||||
def diameter_to_capacity(pipe_diameter_mm):
|
||||
"""Calculate pipe capacity in MW based on diameter in mm.
|
||||
"""
|
||||
Calculate pipe capacity in MW based on diameter in mm.
|
||||
|
||||
20 inch (500 mm) 50 bar -> 1.5 GW CH4 pipe capacity (LHV)
|
||||
24 inch (600 mm) 50 bar -> 5 GW CH4 pipe capacity (LHV)
|
||||
@ -59,9 +65,8 @@ def prepare_dataset(
|
||||
length_factor=1.5,
|
||||
correction_threshold_length=4,
|
||||
correction_threshold_p_nom=8,
|
||||
bidirectional_below=10
|
||||
bidirectional_below=10,
|
||||
):
|
||||
|
||||
# extract start and end from LineString
|
||||
df["point0"] = df.geometry.apply(lambda x: Point(x.coords[0]))
|
||||
df["point1"] = df.geometry.apply(lambda x: Point(x.coords[-1]))
|
||||
@ -70,11 +75,21 @@ def prepare_dataset(
|
||||
df["p_nom"] = df.max_cap_M_m3_per_d * conversion_factor
|
||||
|
||||
# for inferred diameters, assume 500 mm rather than 900 mm (more conservative)
|
||||
df.loc[df.diameter_mm_method != 'raw', "diameter_mm"] = 500.
|
||||
df.loc[df.diameter_mm_method != "raw", "diameter_mm"] = 500.0
|
||||
|
||||
keep = ["name", "diameter_mm", "is_H_gas", "is_bothDirection",
|
||||
"length_km", "p_nom", "max_pressure_bar",
|
||||
"start_year", "point0", "point1", "geometry"]
|
||||
keep = [
|
||||
"name",
|
||||
"diameter_mm",
|
||||
"is_H_gas",
|
||||
"is_bothDirection",
|
||||
"length_km",
|
||||
"p_nom",
|
||||
"max_pressure_bar",
|
||||
"start_year",
|
||||
"point0",
|
||||
"point1",
|
||||
"geometry",
|
||||
]
|
||||
to_rename = {
|
||||
"is_bothDirection": "bidirectional",
|
||||
"is_H_gas": "H_gas",
|
||||
@ -96,37 +111,40 @@ def prepare_dataset(
|
||||
df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity)
|
||||
ratio = df.p_nom / df.p_nom_diameter
|
||||
not_nordstream = df.max_pressure_bar < 220
|
||||
df.p_nom.update(df.p_nom_diameter.where(
|
||||
(df.p_nom <= 500) |
|
||||
((ratio > correction_threshold_p_nom) & not_nordstream) |
|
||||
((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
|
||||
))
|
||||
df.p_nom.update(
|
||||
df.p_nom_diameter.where(
|
||||
(df.p_nom <= 500)
|
||||
| ((ratio > correction_threshold_p_nom) & not_nordstream)
|
||||
| ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
|
||||
)
|
||||
)
|
||||
|
||||
# lines which have way too discrepant line lengths
|
||||
# get assigned haversine length * length factor
|
||||
df["length_haversine"] = df.apply(
|
||||
lambda p: length_factor * haversine_pts(
|
||||
[p.point0.x, p.point0.y],
|
||||
[p.point1.x, p.point1.y]
|
||||
), axis=1
|
||||
lambda p: length_factor
|
||||
* haversine_pts([p.point0.x, p.point0.y], [p.point1.x, p.point1.y]),
|
||||
axis=1,
|
||||
)
|
||||
ratio = df.eval("length / length_haversine")
|
||||
df["length"].update(df.length_haversine.where(
|
||||
(df["length"] < 20) |
|
||||
(ratio > correction_threshold_length) |
|
||||
(ratio < 1 / correction_threshold_length)
|
||||
))
|
||||
df["length"].update(
|
||||
df.length_haversine.where(
|
||||
(df["length"] < 20)
|
||||
| (ratio > correction_threshold_length)
|
||||
| (ratio < 1 / correction_threshold_length)
|
||||
)
|
||||
)
|
||||
|
||||
return df
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_gas_network')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
snakemake = mock_snakemake("build_gas_network")
|
||||
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
gas_network = load_dataset(snakemake.input.gas_network)
|
||||
|
||||
|
@ -1,18 +1,22 @@
|
||||
"""Build heat demand time series."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build heat demand time series.
|
||||
"""
|
||||
|
||||
import geopandas as gpd
|
||||
import atlite
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import xarray as xr
|
||||
import numpy as np
|
||||
from dask.distributed import Client, LocalCluster
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_heat_demands',
|
||||
simpl='',
|
||||
"build_heat_demands",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
@ -20,23 +24,29 @@ if __name__ == '__main__':
|
||||
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
|
||||
client = Client(cluster, asynchronous=True)
|
||||
|
||||
time = pd.date_range(freq='h', **snakemake.config['snapshots'])
|
||||
cutout_config = snakemake.config['atlite']['cutout']
|
||||
time = pd.date_range(freq="h", **snakemake.config["snapshots"])
|
||||
cutout_config = snakemake.config["atlite"]["cutout"]
|
||||
cutout = atlite.Cutout(cutout_config).sel(time=time)
|
||||
|
||||
clustered_regions = gpd.read_file(
|
||||
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()
|
||||
clustered_regions = (
|
||||
gpd.read_file(snakemake.input.regions_onshore)
|
||||
.set_index("name")
|
||||
.buffer(0)
|
||||
.squeeze()
|
||||
)
|
||||
|
||||
I = cutout.indicatormatrix(clustered_regions)
|
||||
|
||||
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
|
||||
|
||||
stacked_pop = pop_layout.stack(spatial=('y', 'x'))
|
||||
stacked_pop = pop_layout.stack(spatial=("y", "x"))
|
||||
M = I.T.dot(np.diag(I.dot(stacked_pop)))
|
||||
|
||||
heat_demand = cutout.heat_demand(
|
||||
matrix=M.T, index=clustered_regions.index,
|
||||
matrix=M.T,
|
||||
index=clustered_regions.index,
|
||||
dask_kwargs=dict(scheduler=client),
|
||||
show_progress=False)
|
||||
show_progress=False,
|
||||
)
|
||||
|
||||
heat_demand.to_netcdf(snakemake.output.heat_demand)
|
||||
|
@ -1,40 +1,47 @@
|
||||
"""Build industrial distribution keys from hotmaps database."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build industrial distribution keys from hotmaps database.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import uuid
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
|
||||
from itertools import product
|
||||
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
from packaging.version import Version, parse
|
||||
|
||||
|
||||
def locate_missing_industrial_sites(df):
|
||||
"""
|
||||
Locate industrial sites without valid locations based on
|
||||
city and countries. Should only be used if the model's
|
||||
spatial resolution is coarser than individual cities.
|
||||
Locate industrial sites without valid locations based on city and
|
||||
countries.
|
||||
|
||||
Should only be used if the model's spatial resolution is coarser
|
||||
than individual cities.
|
||||
"""
|
||||
|
||||
try:
|
||||
from geopy.geocoders import Nominatim
|
||||
from geopy.extra.rate_limiter import RateLimiter
|
||||
from geopy.geocoders import Nominatim
|
||||
except:
|
||||
raise ModuleNotFoundError("Optional dependency 'geopy' not found."
|
||||
raise ModuleNotFoundError(
|
||||
"Optional dependency 'geopy' not found."
|
||||
"Install via 'conda install -c conda-forge geopy'"
|
||||
"or set 'industry: hotmaps_locate_missing: false'.")
|
||||
"or set 'industry: hotmaps_locate_missing: false'."
|
||||
)
|
||||
|
||||
locator = Nominatim(user_agent=str(uuid.uuid4()))
|
||||
geocode = RateLimiter(locator.geocode, min_delay_seconds=2)
|
||||
|
||||
def locate_missing(s):
|
||||
|
||||
if pd.isna(s.City) or s.City == "CONFIDENTIAL":
|
||||
return None
|
||||
|
||||
loc = geocode([s.City, s.Country], geometry='wkt')
|
||||
loc = geocode([s.City, s.Country], geometry="wkt")
|
||||
if loc is not None:
|
||||
logger.debug(f"Found:\t{loc}\nFor:\t{s['City']}, {s['Country']}\n")
|
||||
return f"POINT({loc.longitude} {loc.latitude})"
|
||||
@ -42,14 +49,16 @@ def locate_missing_industrial_sites(df):
|
||||
return None
|
||||
|
||||
missing = df.index[df.geom.isna()]
|
||||
df.loc[missing, 'coordinates'] = df.loc[missing].apply(locate_missing, axis=1)
|
||||
df.loc[missing, "coordinates"] = df.loc[missing].apply(locate_missing, axis=1)
|
||||
|
||||
# report stats
|
||||
num_still_missing = df.coordinates.isna().sum()
|
||||
num_found = len(missing) - num_still_missing
|
||||
share_missing = len(missing) / len(df) * 100
|
||||
share_still_missing = num_still_missing / len(df) * 100
|
||||
logger.warning(f"Found {num_found} missing locations. \nShare of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%.")
|
||||
logger.warning(
|
||||
f"Found {num_found} missing locations. \nShare of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%."
|
||||
)
|
||||
|
||||
return df
|
||||
|
||||
@ -61,19 +70,23 @@ def prepare_hotmaps_database(regions):
|
||||
|
||||
df = pd.read_csv(snakemake.input.hotmaps_industrial_database, sep=";", index_col=0)
|
||||
|
||||
df[["srid", "coordinates"]] = df.geom.str.split(';', expand=True)
|
||||
df[["srid", "coordinates"]] = df.geom.str.split(";", expand=True)
|
||||
|
||||
if snakemake.config['industry'].get('hotmaps_locate_missing', False):
|
||||
if snakemake.config["industry"].get("hotmaps_locate_missing", False):
|
||||
df = locate_missing_industrial_sites(df)
|
||||
|
||||
# remove those sites without valid locations
|
||||
df.drop(df.index[df.coordinates.isna()], inplace=True)
|
||||
|
||||
df['coordinates'] = gpd.GeoSeries.from_wkt(df['coordinates'])
|
||||
df["coordinates"] = gpd.GeoSeries.from_wkt(df["coordinates"])
|
||||
|
||||
gdf = gpd.GeoDataFrame(df, geometry='coordinates', crs="EPSG:4326")
|
||||
gdf = gpd.GeoDataFrame(df, geometry="coordinates", crs="EPSG:4326")
|
||||
|
||||
kws = dict(op="within") if parse(gpd.__version__) < Version('0.10') else dict(predicate="within")
|
||||
kws = (
|
||||
dict(op="within")
|
||||
if parse(gpd.__version__) < Version("0.10")
|
||||
else dict(predicate="within")
|
||||
)
|
||||
gdf = gpd.sjoin(gdf, regions, how="inner", **kws)
|
||||
|
||||
gdf.rename(columns={"index_right": "bus"}, inplace=True)
|
||||
@ -83,7 +96,9 @@ def prepare_hotmaps_database(regions):
|
||||
|
||||
|
||||
def build_nodal_distribution_key(hotmaps, regions):
|
||||
"""Build nodal distribution keys for each sector."""
|
||||
"""
|
||||
Build nodal distribution keys for each sector.
|
||||
"""
|
||||
|
||||
sectors = hotmaps.Subsector.unique()
|
||||
countries = regions.index.str[:2].unique()
|
||||
@ -91,12 +106,11 @@ def build_nodal_distribution_key(hotmaps, regions):
|
||||
keys = pd.DataFrame(index=regions.index, columns=sectors, dtype=float)
|
||||
|
||||
pop = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
|
||||
pop['country'] = pop.index.str[:2]
|
||||
ct_total = pop.total.groupby(pop['country']).sum()
|
||||
keys['population'] = pop.total / pop.country.map(ct_total)
|
||||
pop["country"] = pop.index.str[:2]
|
||||
ct_total = pop.total.groupby(pop["country"]).sum()
|
||||
keys["population"] = pop.total / pop.country.map(ct_total)
|
||||
|
||||
for sector, country in product(sectors, countries):
|
||||
|
||||
regions_ct = regions.index[regions.index.str.contains(country)]
|
||||
|
||||
facilities = hotmaps.query("country == @country and Subsector == @sector")
|
||||
@ -109,9 +123,9 @@ def build_nodal_distribution_key(hotmaps, regions):
|
||||
# BEWARE: this is a strong assumption
|
||||
emissions = emissions.fillna(emissions.mean())
|
||||
key = emissions / emissions.sum()
|
||||
key = key.groupby(facilities.bus).sum().reindex(regions_ct, fill_value=0.)
|
||||
key = key.groupby(facilities.bus).sum().reindex(regions_ct, fill_value=0.0)
|
||||
else:
|
||||
key = keys.loc[regions_ct, 'population']
|
||||
key = keys.loc[regions_ct, "population"]
|
||||
|
||||
keys.loc[regions_ct, sector] = key
|
||||
|
||||
@ -119,17 +133,18 @@ def build_nodal_distribution_key(hotmaps, regions):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_industrial_distribution_key',
|
||||
simpl='',
|
||||
"build_industrial_distribution_key",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name')
|
||||
regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name")
|
||||
|
||||
hotmaps = prepare_hotmaps_database(regions)
|
||||
|
||||
|
@ -1,84 +1,116 @@
|
||||
"""Build industrial energy demand per country."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build industrial energy demand per country.
|
||||
"""
|
||||
|
||||
import multiprocessing as mp
|
||||
|
||||
import pandas as pd
|
||||
import multiprocessing as mp
|
||||
from tqdm import tqdm
|
||||
|
||||
ktoe_to_twh = 0.011630
|
||||
|
||||
# name in JRC-IDEES Energy Balances
|
||||
sector_sheets = {'Integrated steelworks': 'cisb',
|
||||
'Electric arc': 'cise',
|
||||
'Alumina production': 'cnfa',
|
||||
'Aluminium - primary production': 'cnfp',
|
||||
'Aluminium - secondary production': 'cnfs',
|
||||
'Other non-ferrous metals': 'cnfo',
|
||||
'Basic chemicals': 'cbch',
|
||||
'Other chemicals': 'coch',
|
||||
'Pharmaceutical products etc.': 'cpha',
|
||||
'Basic chemicals feedstock': 'cpch',
|
||||
'Cement': 'ccem',
|
||||
'Ceramics & other NMM': 'ccer',
|
||||
'Glass production': 'cgla',
|
||||
'Pulp production': 'cpul',
|
||||
'Paper production': 'cpap',
|
||||
'Printing and media reproduction': 'cprp',
|
||||
'Food, beverages and tobacco': 'cfbt',
|
||||
'Transport Equipment': 'ctre',
|
||||
'Machinery Equipment': 'cmae',
|
||||
'Textiles and leather': 'ctel',
|
||||
'Wood and wood products': 'cwwp',
|
||||
'Mining and quarrying': 'cmiq',
|
||||
'Construction': 'ccon',
|
||||
'Non-specified': 'cnsi',
|
||||
sector_sheets = {
|
||||
"Integrated steelworks": "cisb",
|
||||
"Electric arc": "cise",
|
||||
"Alumina production": "cnfa",
|
||||
"Aluminium - primary production": "cnfp",
|
||||
"Aluminium - secondary production": "cnfs",
|
||||
"Other non-ferrous metals": "cnfo",
|
||||
"Basic chemicals": "cbch",
|
||||
"Other chemicals": "coch",
|
||||
"Pharmaceutical products etc.": "cpha",
|
||||
"Basic chemicals feedstock": "cpch",
|
||||
"Cement": "ccem",
|
||||
"Ceramics & other NMM": "ccer",
|
||||
"Glass production": "cgla",
|
||||
"Pulp production": "cpul",
|
||||
"Paper production": "cpap",
|
||||
"Printing and media reproduction": "cprp",
|
||||
"Food, beverages and tobacco": "cfbt",
|
||||
"Transport Equipment": "ctre",
|
||||
"Machinery Equipment": "cmae",
|
||||
"Textiles and leather": "ctel",
|
||||
"Wood and wood products": "cwwp",
|
||||
"Mining and quarrying": "cmiq",
|
||||
"Construction": "ccon",
|
||||
"Non-specified": "cnsi",
|
||||
}
|
||||
|
||||
|
||||
fuels = {'All Products': 'all',
|
||||
'Solid Fuels': 'solid',
|
||||
'Total petroleum products (without biofuels)': 'liquid',
|
||||
'Gases': 'gas',
|
||||
'Nuclear heat': 'heat',
|
||||
'Derived heat': 'heat',
|
||||
'Biomass and Renewable wastes': 'biomass',
|
||||
'Wastes (non-renewable)': 'waste',
|
||||
'Electricity': 'electricity'
|
||||
fuels = {
|
||||
"All Products": "all",
|
||||
"Solid Fuels": "solid",
|
||||
"Total petroleum products (without biofuels)": "liquid",
|
||||
"Gases": "gas",
|
||||
"Nuclear heat": "heat",
|
||||
"Derived heat": "heat",
|
||||
"Biomass and Renewable wastes": "biomass",
|
||||
"Wastes (non-renewable)": "waste",
|
||||
"Electricity": "electricity",
|
||||
}
|
||||
|
||||
eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI',
|
||||
'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ',
|
||||
'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT']
|
||||
eu28 = [
|
||||
"FR",
|
||||
"DE",
|
||||
"GB",
|
||||
"IT",
|
||||
"ES",
|
||||
"PL",
|
||||
"SE",
|
||||
"NL",
|
||||
"BE",
|
||||
"FI",
|
||||
"DK",
|
||||
"PT",
|
||||
"RO",
|
||||
"AT",
|
||||
"BG",
|
||||
"EE",
|
||||
"GR",
|
||||
"LV",
|
||||
"CZ",
|
||||
"HU",
|
||||
"IE",
|
||||
"SK",
|
||||
"LT",
|
||||
"HR",
|
||||
"LU",
|
||||
"SI",
|
||||
"CY",
|
||||
"MT",
|
||||
]
|
||||
|
||||
jrc_names = {"GR": "EL", "GB": "UK"}
|
||||
|
||||
|
||||
def industrial_energy_demand_per_country(country):
|
||||
|
||||
jrc_dir = snakemake.input.jrc
|
||||
jrc_country = jrc_names.get(country, country)
|
||||
fn = f'{jrc_dir}/JRC-IDEES-2015_EnergyBalance_{jrc_country}.xlsx'
|
||||
fn = f"{jrc_dir}/JRC-IDEES-2015_EnergyBalance_{jrc_country}.xlsx"
|
||||
|
||||
sheets = list(sector_sheets.values())
|
||||
df_dict = pd.read_excel(fn, sheet_name=sheets, index_col=0)
|
||||
|
||||
def get_subsector_data(sheet):
|
||||
|
||||
df = df_dict[sheet][year].groupby(fuels).sum()
|
||||
|
||||
df["ammonia"] = 0.
|
||||
df["ammonia"] = 0.0
|
||||
|
||||
df['other'] = df['all'] - df.loc[df.index != 'all'].sum()
|
||||
df["other"] = df["all"] - df.loc[df.index != "all"].sum()
|
||||
|
||||
return df
|
||||
|
||||
df = pd.concat({sub: get_subsector_data(sheet)
|
||||
for sub, sheet in sector_sheets.items()}, axis=1)
|
||||
df = pd.concat(
|
||||
{sub: get_subsector_data(sheet) for sub, sheet in sector_sheets.items()}, axis=1
|
||||
)
|
||||
|
||||
sel = ['Mining and quarrying', 'Construction', 'Non-specified']
|
||||
df['Other Industrial Sectors'] = df[sel].sum(axis=1)
|
||||
df['Basic chemicals'] += df['Basic chemicals feedstock']
|
||||
sel = ["Mining and quarrying", "Construction", "Non-specified"]
|
||||
df["Other Industrial Sectors"] = df[sel].sum(axis=1)
|
||||
df["Basic chemicals"] += df["Basic chemicals feedstock"]
|
||||
|
||||
df.drop(columns=sel+['Basic chemicals feedstock'], index='all', inplace=True)
|
||||
df.drop(columns=sel + ["Basic chemicals feedstock"], index="all", inplace=True)
|
||||
|
||||
df *= ktoe_to_twh
|
||||
|
||||
@ -86,36 +118,39 @@ def industrial_energy_demand_per_country(country):
|
||||
|
||||
|
||||
def add_ammonia_energy_demand(demand):
|
||||
|
||||
# MtNH3/a
|
||||
fn = snakemake.input.ammonia_production
|
||||
ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3
|
||||
|
||||
def get_ammonia_by_fuel(x):
|
||||
|
||||
fuels = {'gas': config['MWh_CH4_per_tNH3_SMR'],
|
||||
'electricity': config['MWh_elec_per_tNH3_SMR']}
|
||||
fuels = {
|
||||
"gas": config["MWh_CH4_per_tNH3_SMR"],
|
||||
"electricity": config["MWh_elec_per_tNH3_SMR"],
|
||||
}
|
||||
|
||||
return pd.Series({k: x * v for k, v in fuels.items()})
|
||||
|
||||
ammonia_by_fuel = ammonia.apply(get_ammonia_by_fuel).T
|
||||
ammonia_by_fuel = ammonia_by_fuel.unstack().reindex(index=demand.index, fill_value=0.)
|
||||
ammonia_by_fuel = ammonia_by_fuel.unstack().reindex(
|
||||
index=demand.index, fill_value=0.0
|
||||
)
|
||||
|
||||
ammonia = pd.DataFrame({"ammonia": ammonia * config['MWh_NH3_per_tNH3']}).T
|
||||
ammonia = pd.DataFrame({"ammonia": ammonia * config["MWh_NH3_per_tNH3"]}).T
|
||||
|
||||
demand['Ammonia'] = ammonia.unstack().reindex(index=demand.index, fill_value=0.)
|
||||
demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0)
|
||||
|
||||
demand['Basic chemicals (without ammonia)'] = demand["Basic chemicals"] - ammonia_by_fuel
|
||||
demand["Basic chemicals (without ammonia)"] = (
|
||||
demand["Basic chemicals"] - ammonia_by_fuel
|
||||
)
|
||||
|
||||
demand['Basic chemicals (without ammonia)'].clip(lower=0, inplace=True)
|
||||
demand["Basic chemicals (without ammonia)"].clip(lower=0, inplace=True)
|
||||
|
||||
demand.drop(columns='Basic chemicals', inplace=True)
|
||||
demand.drop(columns="Basic chemicals", inplace=True)
|
||||
|
||||
return demand
|
||||
|
||||
|
||||
def add_non_eu28_industrial_energy_demand(demand):
|
||||
|
||||
# output in MtMaterial/a
|
||||
fn = snakemake.input.industrial_production_per_country
|
||||
production = pd.read_csv(fn, index_col=0) / 1e3
|
||||
@ -131,18 +166,22 @@ def add_non_eu28_industrial_energy_demand(demand):
|
||||
|
||||
non_eu28 = production.index.symmetric_difference(eu28)
|
||||
|
||||
demand_non_eu28 = pd.concat({k: v * eu28_averages
|
||||
for k, v in production.loc[non_eu28].iterrows()})
|
||||
demand_non_eu28 = pd.concat(
|
||||
{k: v * eu28_averages for k, v in production.loc[non_eu28].iterrows()}
|
||||
)
|
||||
|
||||
return pd.concat([demand, demand_non_eu28])
|
||||
|
||||
|
||||
def industrial_energy_demand(countries):
|
||||
|
||||
nprocesses = snakemake.threads
|
||||
func = industrial_energy_demand_per_country
|
||||
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries),
|
||||
desc="Build industrial energy demand")
|
||||
tqdm_kwargs = dict(
|
||||
ascii=False,
|
||||
unit=" country",
|
||||
total=len(countries),
|
||||
desc="Build industrial energy demand",
|
||||
)
|
||||
with mp.Pool(processes=nprocesses) as pool:
|
||||
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
|
||||
|
||||
@ -151,13 +190,14 @@ def industrial_energy_demand(countries):
|
||||
return demand
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_industrial_energy_demand_per_country_today')
|
||||
|
||||
config = snakemake.config['industry']
|
||||
year = config.get('reference_year', 2015)
|
||||
snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today")
|
||||
|
||||
config = snakemake.config["industry"]
|
||||
year = config.get("reference_year", 2015)
|
||||
|
||||
demand = industrial_energy_demand(eu28)
|
||||
|
||||
@ -169,7 +209,7 @@ if __name__ == '__main__':
|
||||
demand = demand.stack(dropna=False).unstack(level=[0, 2])
|
||||
|
||||
# style and annotation
|
||||
demand.index.name = 'TWh/a'
|
||||
demand.index.name = "TWh/a"
|
||||
demand.sort_index(axis=1, inplace=True)
|
||||
|
||||
fn = snakemake.output.industrial_energy_demand_per_country_today
|
||||
|
@ -1,13 +1,17 @@
|
||||
"""Build industrial energy demand per node."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build industrial energy demand per node.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_industrial_energy_demand_per_node',
|
||||
simpl='',
|
||||
"build_industrial_energy_demand_per_node",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
planning_horizons=2030,
|
||||
)
|
||||
@ -31,9 +35,9 @@ if __name__ == '__main__':
|
||||
nodal_df *= 0.001
|
||||
|
||||
rename_sectors = {
|
||||
'elec': 'electricity',
|
||||
'biomass': 'solid biomass',
|
||||
'heat': 'low-temperature heat'
|
||||
"elec": "electricity",
|
||||
"biomass": "solid biomass",
|
||||
"heat": "low-temperature heat",
|
||||
}
|
||||
nodal_df.rename(columns=rename_sectors, inplace=True)
|
||||
|
||||
@ -42,4 +46,4 @@ if __name__ == '__main__':
|
||||
nodal_df.index.name = "TWh/a (MtCO2/a)"
|
||||
|
||||
fn = snakemake.output.industrial_energy_demand_per_node
|
||||
nodal_df.to_csv(fn, float_format='%.2f')
|
||||
nodal_df.to_csv(fn, float_format="%.2f")
|
||||
|
@ -1,33 +1,36 @@
|
||||
"""Build industrial energy demand per node."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build industrial energy demand per node.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from itertools import product
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
# map JRC/our sectors to hotmaps sector, where mapping exist
|
||||
sector_mapping = {
|
||||
'Electric arc': 'Iron and steel',
|
||||
'Integrated steelworks': 'Iron and steel',
|
||||
'DRI + Electric arc': 'Iron and steel',
|
||||
'Ammonia': 'Chemical industry',
|
||||
'Basic chemicals (without ammonia)': 'Chemical industry',
|
||||
'Other chemicals': 'Chemical industry',
|
||||
'Pharmaceutical products etc.': 'Chemical industry',
|
||||
'Cement': 'Cement',
|
||||
'Ceramics & other NMM': 'Non-metallic mineral products',
|
||||
'Glass production': 'Glass',
|
||||
'Pulp production': 'Paper and printing',
|
||||
'Paper production': 'Paper and printing',
|
||||
'Printing and media reproduction': 'Paper and printing',
|
||||
'Alumina production': 'Non-ferrous metals',
|
||||
'Aluminium - primary production': 'Non-ferrous metals',
|
||||
'Aluminium - secondary production': 'Non-ferrous metals',
|
||||
'Other non-ferrous metals': 'Non-ferrous metals',
|
||||
"Electric arc": "Iron and steel",
|
||||
"Integrated steelworks": "Iron and steel",
|
||||
"DRI + Electric arc": "Iron and steel",
|
||||
"Ammonia": "Chemical industry",
|
||||
"Basic chemicals (without ammonia)": "Chemical industry",
|
||||
"Other chemicals": "Chemical industry",
|
||||
"Pharmaceutical products etc.": "Chemical industry",
|
||||
"Cement": "Cement",
|
||||
"Ceramics & other NMM": "Non-metallic mineral products",
|
||||
"Glass production": "Glass",
|
||||
"Pulp production": "Paper and printing",
|
||||
"Paper production": "Paper and printing",
|
||||
"Printing and media reproduction": "Paper and printing",
|
||||
"Alumina production": "Non-ferrous metals",
|
||||
"Aluminium - primary production": "Non-ferrous metals",
|
||||
"Aluminium - secondary production": "Non-ferrous metals",
|
||||
"Other non-ferrous metals": "Non-ferrous metals",
|
||||
}
|
||||
|
||||
|
||||
def build_nodal_industrial_energy_demand():
|
||||
|
||||
fn = snakemake.input.industrial_energy_demand_per_country_today
|
||||
industrial_demand = pd.read_csv(fn, header=[0, 1], index_col=0)
|
||||
|
||||
@ -35,24 +38,23 @@ def build_nodal_industrial_energy_demand():
|
||||
keys = pd.read_csv(fn, index_col=0)
|
||||
keys["country"] = keys.index.str[:2]
|
||||
|
||||
nodal_demand = pd.DataFrame(0., dtype=float,
|
||||
index=keys.index,
|
||||
columns=industrial_demand.index)
|
||||
nodal_demand = pd.DataFrame(
|
||||
0.0, dtype=float, index=keys.index, columns=industrial_demand.index
|
||||
)
|
||||
|
||||
countries = keys.country.unique()
|
||||
sectors = industrial_demand.columns.levels[1]
|
||||
|
||||
for country, sector in product(countries, sectors):
|
||||
|
||||
buses = keys.index[keys.country == country]
|
||||
mapping = sector_mapping.get(sector, 'population')
|
||||
mapping = sector_mapping.get(sector, "population")
|
||||
|
||||
key = keys.loc[buses, mapping]
|
||||
demand = industrial_demand[country, sector]
|
||||
|
||||
outer = pd.DataFrame(np.outer(key, demand),
|
||||
index=key.index,
|
||||
columns=demand.index)
|
||||
outer = pd.DataFrame(
|
||||
np.outer(key, demand), index=key.index, columns=demand.index
|
||||
)
|
||||
|
||||
nodal_demand.loc[buses] += outer
|
||||
|
||||
@ -62,11 +64,12 @@ def build_nodal_industrial_energy_demand():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_industrial_energy_demand_per_node_today',
|
||||
simpl='',
|
||||
"build_industrial_energy_demand_per_node_today",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
|
@ -1,132 +1,204 @@
|
||||
"""Build industrial production per country."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build industrial production per country.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import multiprocessing as mp
|
||||
from tqdm import tqdm
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from helper import mute_print
|
||||
from tqdm import tqdm
|
||||
|
||||
tj_to_ktoe = 0.0238845
|
||||
ktoe_to_twh = 0.01163
|
||||
|
||||
sub_sheet_name_dict = {'Iron and steel': 'ISI',
|
||||
'Chemicals Industry': 'CHI',
|
||||
'Non-metallic mineral products': 'NMM',
|
||||
'Pulp, paper and printing': 'PPA',
|
||||
'Food, beverages and tobacco': 'FBT',
|
||||
'Non Ferrous Metals': 'NFM',
|
||||
'Transport Equipment': 'TRE',
|
||||
'Machinery Equipment': 'MAE',
|
||||
'Textiles and leather': 'TEL',
|
||||
'Wood and wood products': 'WWP',
|
||||
'Other Industrial Sectors': 'OIS'}
|
||||
sub_sheet_name_dict = {
|
||||
"Iron and steel": "ISI",
|
||||
"Chemicals Industry": "CHI",
|
||||
"Non-metallic mineral products": "NMM",
|
||||
"Pulp, paper and printing": "PPA",
|
||||
"Food, beverages and tobacco": "FBT",
|
||||
"Non Ferrous Metals": "NFM",
|
||||
"Transport Equipment": "TRE",
|
||||
"Machinery Equipment": "MAE",
|
||||
"Textiles and leather": "TEL",
|
||||
"Wood and wood products": "WWP",
|
||||
"Other Industrial Sectors": "OIS",
|
||||
}
|
||||
|
||||
non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL']
|
||||
non_EU = ["NO", "CH", "ME", "MK", "RS", "BA", "AL"]
|
||||
|
||||
jrc_names = {"GR": "EL", "GB": "UK"}
|
||||
|
||||
eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI',
|
||||
'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ',
|
||||
'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT']
|
||||
eu28 = [
|
||||
"FR",
|
||||
"DE",
|
||||
"GB",
|
||||
"IT",
|
||||
"ES",
|
||||
"PL",
|
||||
"SE",
|
||||
"NL",
|
||||
"BE",
|
||||
"FI",
|
||||
"DK",
|
||||
"PT",
|
||||
"RO",
|
||||
"AT",
|
||||
"BG",
|
||||
"EE",
|
||||
"GR",
|
||||
"LV",
|
||||
"CZ",
|
||||
"HU",
|
||||
"IE",
|
||||
"SK",
|
||||
"LT",
|
||||
"HR",
|
||||
"LU",
|
||||
"SI",
|
||||
"CY",
|
||||
"MT",
|
||||
]
|
||||
|
||||
sect2sub = {'Iron and steel': ['Electric arc', 'Integrated steelworks'],
|
||||
'Chemicals Industry': ['Basic chemicals', 'Other chemicals', 'Pharmaceutical products etc.'],
|
||||
'Non-metallic mineral products': ['Cement', 'Ceramics & other NMM', 'Glass production'],
|
||||
'Pulp, paper and printing': ['Pulp production', 'Paper production', 'Printing and media reproduction'],
|
||||
'Food, beverages and tobacco': ['Food, beverages and tobacco'],
|
||||
'Non Ferrous Metals': ['Alumina production', 'Aluminium - primary production', 'Aluminium - secondary production', 'Other non-ferrous metals'],
|
||||
'Transport Equipment': ['Transport Equipment'],
|
||||
'Machinery Equipment': ['Machinery Equipment'],
|
||||
'Textiles and leather': ['Textiles and leather'],
|
||||
'Wood and wood products': ['Wood and wood products'],
|
||||
'Other Industrial Sectors': ['Other Industrial Sectors']}
|
||||
sect2sub = {
|
||||
"Iron and steel": ["Electric arc", "Integrated steelworks"],
|
||||
"Chemicals Industry": [
|
||||
"Basic chemicals",
|
||||
"Other chemicals",
|
||||
"Pharmaceutical products etc.",
|
||||
],
|
||||
"Non-metallic mineral products": [
|
||||
"Cement",
|
||||
"Ceramics & other NMM",
|
||||
"Glass production",
|
||||
],
|
||||
"Pulp, paper and printing": [
|
||||
"Pulp production",
|
||||
"Paper production",
|
||||
"Printing and media reproduction",
|
||||
],
|
||||
"Food, beverages and tobacco": ["Food, beverages and tobacco"],
|
||||
"Non Ferrous Metals": [
|
||||
"Alumina production",
|
||||
"Aluminium - primary production",
|
||||
"Aluminium - secondary production",
|
||||
"Other non-ferrous metals",
|
||||
],
|
||||
"Transport Equipment": ["Transport Equipment"],
|
||||
"Machinery Equipment": ["Machinery Equipment"],
|
||||
"Textiles and leather": ["Textiles and leather"],
|
||||
"Wood and wood products": ["Wood and wood products"],
|
||||
"Other Industrial Sectors": ["Other Industrial Sectors"],
|
||||
}
|
||||
|
||||
sub2sect = {v: k for k, vv in sect2sub.items() for v in vv}
|
||||
|
||||
fields = {'Electric arc': 'Electric arc',
|
||||
'Integrated steelworks': 'Integrated steelworks',
|
||||
'Basic chemicals': 'Basic chemicals (kt ethylene eq.)',
|
||||
'Other chemicals': 'Other chemicals (kt ethylene eq.)',
|
||||
'Pharmaceutical products etc.': 'Pharmaceutical products etc. (kt ethylene eq.)',
|
||||
'Cement': 'Cement (kt)',
|
||||
'Ceramics & other NMM': 'Ceramics & other NMM (kt bricks eq.)',
|
||||
'Glass production': 'Glass production (kt)',
|
||||
'Pulp production': 'Pulp production (kt)',
|
||||
'Paper production': 'Paper production (kt)',
|
||||
'Printing and media reproduction': 'Printing and media reproduction (kt paper eq.)',
|
||||
'Food, beverages and tobacco': 'Physical output (index)',
|
||||
'Alumina production': 'Alumina production (kt)',
|
||||
'Aluminium - primary production': 'Aluminium - primary production',
|
||||
'Aluminium - secondary production': 'Aluminium - secondary production',
|
||||
'Other non-ferrous metals': 'Other non-ferrous metals (kt lead eq.)',
|
||||
'Transport Equipment': 'Physical output (index)',
|
||||
'Machinery Equipment': 'Physical output (index)',
|
||||
'Textiles and leather': 'Physical output (index)',
|
||||
'Wood and wood products': 'Physical output (index)',
|
||||
'Other Industrial Sectors': 'Physical output (index)'}
|
||||
fields = {
|
||||
"Electric arc": "Electric arc",
|
||||
"Integrated steelworks": "Integrated steelworks",
|
||||
"Basic chemicals": "Basic chemicals (kt ethylene eq.)",
|
||||
"Other chemicals": "Other chemicals (kt ethylene eq.)",
|
||||
"Pharmaceutical products etc.": "Pharmaceutical products etc. (kt ethylene eq.)",
|
||||
"Cement": "Cement (kt)",
|
||||
"Ceramics & other NMM": "Ceramics & other NMM (kt bricks eq.)",
|
||||
"Glass production": "Glass production (kt)",
|
||||
"Pulp production": "Pulp production (kt)",
|
||||
"Paper production": "Paper production (kt)",
|
||||
"Printing and media reproduction": "Printing and media reproduction (kt paper eq.)",
|
||||
"Food, beverages and tobacco": "Physical output (index)",
|
||||
"Alumina production": "Alumina production (kt)",
|
||||
"Aluminium - primary production": "Aluminium - primary production",
|
||||
"Aluminium - secondary production": "Aluminium - secondary production",
|
||||
"Other non-ferrous metals": "Other non-ferrous metals (kt lead eq.)",
|
||||
"Transport Equipment": "Physical output (index)",
|
||||
"Machinery Equipment": "Physical output (index)",
|
||||
"Textiles and leather": "Physical output (index)",
|
||||
"Wood and wood products": "Physical output (index)",
|
||||
"Other Industrial Sectors": "Physical output (index)",
|
||||
}
|
||||
|
||||
eb_names = {'NO': 'Norway', 'AL': 'Albania', 'BA': 'Bosnia and Herzegovina',
|
||||
'MK': 'FYR of Macedonia', 'GE': 'Georgia', 'IS': 'Iceland',
|
||||
'KO': 'Kosovo', 'MD': 'Moldova', 'ME': 'Montenegro', 'RS': 'Serbia',
|
||||
'UA': 'Ukraine', 'TR': 'Turkey', }
|
||||
eb_names = {
|
||||
"NO": "Norway",
|
||||
"AL": "Albania",
|
||||
"BA": "Bosnia and Herzegovina",
|
||||
"MK": "FYR of Macedonia",
|
||||
"GE": "Georgia",
|
||||
"IS": "Iceland",
|
||||
"KO": "Kosovo",
|
||||
"MD": "Moldova",
|
||||
"ME": "Montenegro",
|
||||
"RS": "Serbia",
|
||||
"UA": "Ukraine",
|
||||
"TR": "Turkey",
|
||||
}
|
||||
|
||||
eb_sectors = {'Iron & steel industry': 'Iron and steel',
|
||||
'Chemical and Petrochemical industry': 'Chemicals Industry',
|
||||
'Non-ferrous metal industry': 'Non-metallic mineral products',
|
||||
'Paper, Pulp and Print': 'Pulp, paper and printing',
|
||||
'Food and Tabacco': 'Food, beverages and tobacco',
|
||||
'Non-metallic Minerals (Glass, pottery & building mat. Industry)': 'Non Ferrous Metals',
|
||||
'Transport Equipment': 'Transport Equipment',
|
||||
'Machinery': 'Machinery Equipment',
|
||||
'Textile and Leather': 'Textiles and leather',
|
||||
'Wood and Wood Products': 'Wood and wood products',
|
||||
'Non-specified (Industry)': 'Other Industrial Sectors'}
|
||||
eb_sectors = {
|
||||
"Iron & steel industry": "Iron and steel",
|
||||
"Chemical and Petrochemical industry": "Chemicals Industry",
|
||||
"Non-ferrous metal industry": "Non-metallic mineral products",
|
||||
"Paper, Pulp and Print": "Pulp, paper and printing",
|
||||
"Food and Tabacco": "Food, beverages and tobacco",
|
||||
"Non-metallic Minerals (Glass, pottery & building mat. Industry)": "Non Ferrous Metals",
|
||||
"Transport Equipment": "Transport Equipment",
|
||||
"Machinery": "Machinery Equipment",
|
||||
"Textile and Leather": "Textiles and leather",
|
||||
"Wood and Wood Products": "Wood and wood products",
|
||||
"Non-specified (Industry)": "Other Industrial Sectors",
|
||||
}
|
||||
|
||||
# TODO: this should go in a csv in `data`
|
||||
# Annual energy consumption in Switzerland by sector in 2015 (in TJ)
|
||||
# From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat
|
||||
# http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775
|
||||
e_switzerland = pd.Series({'Iron and steel': 7889.,
|
||||
'Chemicals Industry': 26871.,
|
||||
'Non-metallic mineral products': 15513.+3820.,
|
||||
'Pulp, paper and printing': 12004.,
|
||||
'Food, beverages and tobacco': 17728.,
|
||||
'Non Ferrous Metals': 3037.,
|
||||
'Transport Equipment': 14993.,
|
||||
'Machinery Equipment': 4724.,
|
||||
'Textiles and leather': 1742.,
|
||||
'Wood and wood products': 0.,
|
||||
'Other Industrial Sectors': 10825.,
|
||||
'current electricity': 53760.})
|
||||
e_switzerland = pd.Series(
|
||||
{
|
||||
"Iron and steel": 7889.0,
|
||||
"Chemicals Industry": 26871.0,
|
||||
"Non-metallic mineral products": 15513.0 + 3820.0,
|
||||
"Pulp, paper and printing": 12004.0,
|
||||
"Food, beverages and tobacco": 17728.0,
|
||||
"Non Ferrous Metals": 3037.0,
|
||||
"Transport Equipment": 14993.0,
|
||||
"Machinery Equipment": 4724.0,
|
||||
"Textiles and leather": 1742.0,
|
||||
"Wood and wood products": 0.0,
|
||||
"Other Industrial Sectors": 10825.0,
|
||||
"current electricity": 53760.0,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def find_physical_output(df):
|
||||
start = np.where(df.index.str.contains('Physical output', na=''))[0][0]
|
||||
start = np.where(df.index.str.contains("Physical output", na=""))[0][0]
|
||||
empty_row = np.where(df.index.isnull())[0]
|
||||
end = empty_row[np.argmax(empty_row > start)]
|
||||
return slice(start, end)
|
||||
|
||||
|
||||
def get_energy_ratio(country):
|
||||
|
||||
if country == 'CH':
|
||||
if country == "CH":
|
||||
e_country = e_switzerland * tj_to_ktoe
|
||||
else:
|
||||
# estimate physical output, energy consumption in the sector and country
|
||||
fn = f"{eurostat_dir}/{eb_names[country]}.XLSX"
|
||||
with mute_print():
|
||||
df = pd.read_excel(fn, sheet_name='2016', index_col=2,
|
||||
header=0, skiprows=1).squeeze('columns')
|
||||
e_country = df.loc[eb_sectors.keys(
|
||||
), 'Total all products'].rename(eb_sectors)
|
||||
df = pd.read_excel(
|
||||
fn, sheet_name="2016", index_col=2, header=0, skiprows=1
|
||||
).squeeze("columns")
|
||||
e_country = df.loc[eb_sectors.keys(), "Total all products"].rename(eb_sectors)
|
||||
|
||||
fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx'
|
||||
fn = f"{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx"
|
||||
|
||||
with mute_print():
|
||||
df = pd.read_excel(fn, sheet_name='Ind_Summary',
|
||||
index_col=0, header=0).squeeze('columns')
|
||||
df = pd.read_excel(fn, sheet_name="Ind_Summary", index_col=0, header=0).squeeze(
|
||||
"columns"
|
||||
)
|
||||
|
||||
assert df.index[48] == "by sector"
|
||||
year_i = df.columns.get_loc(year)
|
||||
@ -139,15 +211,14 @@ def get_energy_ratio(country):
|
||||
|
||||
|
||||
def industry_production_per_country(country):
|
||||
|
||||
def get_sector_data(sector, country):
|
||||
|
||||
jrc_country = jrc_names.get(country, country)
|
||||
fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_{jrc_country}.xlsx'
|
||||
fn = f"{jrc_dir}/JRC-IDEES-2015_Industry_{jrc_country}.xlsx"
|
||||
sheet = sub_sheet_name_dict[sector]
|
||||
with mute_print():
|
||||
df = pd.read_excel(fn, sheet_name=sheet,
|
||||
index_col=0, header=0).squeeze('columns')
|
||||
df = pd.read_excel(fn, sheet_name=sheet, index_col=0, header=0).squeeze(
|
||||
"columns"
|
||||
)
|
||||
|
||||
year_i = df.columns.get_loc(year)
|
||||
df = df.iloc[find_physical_output(df), year_i]
|
||||
@ -169,11 +240,14 @@ def industry_production_per_country(country):
|
||||
|
||||
|
||||
def industry_production(countries):
|
||||
|
||||
nprocesses = snakemake.threads
|
||||
func = industry_production_per_country
|
||||
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries),
|
||||
desc="Build industry production")
|
||||
tqdm_kwargs = dict(
|
||||
ascii=False,
|
||||
unit=" country",
|
||||
total=len(countries),
|
||||
desc="Build industry production",
|
||||
)
|
||||
with mp.Pool(processes=nprocesses) as pool:
|
||||
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
|
||||
|
||||
@ -185,7 +259,9 @@ def industry_production(countries):
|
||||
|
||||
|
||||
def separate_basic_chemicals(demand):
|
||||
"""Separate basic chemicals into ammonia, chlorine, methanol and HVC."""
|
||||
"""
|
||||
Separate basic chemicals into ammonia, chlorine, methanol and HVC.
|
||||
"""
|
||||
|
||||
ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0)
|
||||
|
||||
@ -194,14 +270,14 @@ def separate_basic_chemicals(demand):
|
||||
|
||||
logger.info(f"Following countries have no ammonia demand: {missing.tolist()}")
|
||||
|
||||
demand["Ammonia"] = 0.
|
||||
demand["Ammonia"] = 0.0
|
||||
|
||||
demand.loc[there, "Ammonia"] = ammonia.loc[there, str(year)]
|
||||
|
||||
demand["Basic chemicals"] -= demand["Ammonia"]
|
||||
|
||||
# EE, HR and LT got negative demand through subtraction - poor data
|
||||
demand['Basic chemicals'].clip(lower=0., inplace=True)
|
||||
demand["Basic chemicals"].clip(lower=0.0, inplace=True)
|
||||
|
||||
# assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals
|
||||
distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum()
|
||||
@ -211,16 +287,18 @@ def separate_basic_chemicals(demand):
|
||||
|
||||
demand.drop(columns=["Basic chemicals"], inplace=True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_industrial_production_per_country')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake("build_industrial_production_per_country")
|
||||
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
countries = non_EU + eu28
|
||||
|
||||
year = snakemake.config['industry']['reference_year']
|
||||
year = snakemake.config["industry"]["reference_year"]
|
||||
|
||||
config = snakemake.config["industry"]
|
||||
|
||||
@ -232,4 +310,4 @@ if __name__ == '__main__':
|
||||
separate_basic_chemicals(demand)
|
||||
|
||||
fn = snakemake.output.industrial_production_per_country
|
||||
demand.to_csv(fn, float_format='%.2f')
|
||||
demand.to_csv(fn, float_format="%.2f")
|
||||
|
@ -1,13 +1,16 @@
|
||||
"""Build future industrial production per country."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build future industrial production per country.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from prepare_sector_network import get
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_industrial_production_per_country_tomorrow')
|
||||
|
||||
snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow")
|
||||
|
||||
config = snakemake.config["industry"]
|
||||
|
||||
@ -24,12 +27,20 @@ if __name__ == '__main__':
|
||||
int_steel = production["Integrated steelworks"].sum()
|
||||
fraction_persistent_primary = st_primary_fraction * total_steel.sum() / int_steel
|
||||
|
||||
dri = dri_fraction * fraction_persistent_primary * production["Integrated steelworks"]
|
||||
dri = (
|
||||
dri_fraction * fraction_persistent_primary * production["Integrated steelworks"]
|
||||
)
|
||||
production.insert(2, "DRI + Electric arc", dri)
|
||||
|
||||
not_dri = (1 - dri_fraction)
|
||||
production["Integrated steelworks"] = not_dri * fraction_persistent_primary * production["Integrated steelworks"]
|
||||
production["Electric arc"] = total_steel - production["DRI + Electric arc"] - production["Integrated steelworks"]
|
||||
not_dri = 1 - dri_fraction
|
||||
production["Integrated steelworks"] = (
|
||||
not_dri * fraction_persistent_primary * production["Integrated steelworks"]
|
||||
)
|
||||
production["Electric arc"] = (
|
||||
total_steel
|
||||
- production["DRI + Electric arc"]
|
||||
- production["Integrated steelworks"]
|
||||
)
|
||||
|
||||
keys = ["Aluminium - primary production", "Aluminium - secondary production"]
|
||||
total_aluminium = production[keys].sum(axis=1)
|
||||
@ -38,15 +49,23 @@ if __name__ == '__main__':
|
||||
key_sec = "Aluminium - secondary production"
|
||||
|
||||
al_primary_fraction = get(config["Al_primary_fraction"], investment_year)
|
||||
fraction_persistent_primary = al_primary_fraction * total_aluminium.sum() / production[key_pri].sum()
|
||||
fraction_persistent_primary = (
|
||||
al_primary_fraction * total_aluminium.sum() / production[key_pri].sum()
|
||||
)
|
||||
|
||||
production[key_pri] = fraction_persistent_primary * production[key_pri]
|
||||
production[key_sec] = total_aluminium - production[key_pri]
|
||||
|
||||
production["HVC (mechanical recycling)"] = get(config["HVC_mechanical_recycling_fraction"], investment_year) * production["HVC"]
|
||||
production["HVC (chemical recycling)"] = get(config["HVC_chemical_recycling_fraction"], investment_year) * production["HVC"]
|
||||
production["HVC (mechanical recycling)"] = (
|
||||
get(config["HVC_mechanical_recycling_fraction"], investment_year)
|
||||
* production["HVC"]
|
||||
)
|
||||
production["HVC (chemical recycling)"] = (
|
||||
get(config["HVC_chemical_recycling_fraction"], investment_year)
|
||||
* production["HVC"]
|
||||
)
|
||||
|
||||
production["HVC"] *= get(config['HVC_primary_fraction'], investment_year)
|
||||
production["HVC"] *= get(config["HVC_primary_fraction"], investment_year)
|
||||
|
||||
fn = snakemake.output.industrial_production_per_country_tomorrow
|
||||
production.to_csv(fn, float_format='%.2f')
|
||||
production.to_csv(fn, float_format="%.2f")
|
||||
|
@ -1,36 +1,39 @@
|
||||
"""Build industrial production per node."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build industrial production per node.
|
||||
"""
|
||||
|
||||
from itertools import product
|
||||
|
||||
import pandas as pd
|
||||
from itertools import product
|
||||
|
||||
# map JRC/our sectors to hotmaps sector, where mapping exist
|
||||
sector_mapping = {
|
||||
'Electric arc': 'Iron and steel',
|
||||
'Integrated steelworks': 'Iron and steel',
|
||||
'DRI + Electric arc': 'Iron and steel',
|
||||
'Ammonia': 'Chemical industry',
|
||||
'HVC': 'Chemical industry',
|
||||
'HVC (mechanical recycling)': 'Chemical industry',
|
||||
'HVC (chemical recycling)': 'Chemical industry',
|
||||
'Methanol': 'Chemical industry',
|
||||
'Chlorine': 'Chemical industry',
|
||||
'Other chemicals': 'Chemical industry',
|
||||
'Pharmaceutical products etc.': 'Chemical industry',
|
||||
'Cement': 'Cement',
|
||||
'Ceramics & other NMM': 'Non-metallic mineral products',
|
||||
'Glass production': 'Glass',
|
||||
'Pulp production': 'Paper and printing',
|
||||
'Paper production': 'Paper and printing',
|
||||
'Printing and media reproduction': 'Paper and printing',
|
||||
'Alumina production': 'Non-ferrous metals',
|
||||
'Aluminium - primary production': 'Non-ferrous metals',
|
||||
'Aluminium - secondary production': 'Non-ferrous metals',
|
||||
'Other non-ferrous metals': 'Non-ferrous metals',
|
||||
"Electric arc": "Iron and steel",
|
||||
"Integrated steelworks": "Iron and steel",
|
||||
"DRI + Electric arc": "Iron and steel",
|
||||
"Ammonia": "Chemical industry",
|
||||
"HVC": "Chemical industry",
|
||||
"HVC (mechanical recycling)": "Chemical industry",
|
||||
"HVC (chemical recycling)": "Chemical industry",
|
||||
"Methanol": "Chemical industry",
|
||||
"Chlorine": "Chemical industry",
|
||||
"Other chemicals": "Chemical industry",
|
||||
"Pharmaceutical products etc.": "Chemical industry",
|
||||
"Cement": "Cement",
|
||||
"Ceramics & other NMM": "Non-metallic mineral products",
|
||||
"Glass production": "Glass",
|
||||
"Pulp production": "Paper and printing",
|
||||
"Paper production": "Paper and printing",
|
||||
"Printing and media reproduction": "Paper and printing",
|
||||
"Alumina production": "Non-ferrous metals",
|
||||
"Aluminium - primary production": "Non-ferrous metals",
|
||||
"Aluminium - secondary production": "Non-ferrous metals",
|
||||
"Other non-ferrous metals": "Non-ferrous metals",
|
||||
}
|
||||
|
||||
|
||||
def build_nodal_industrial_production():
|
||||
|
||||
fn = snakemake.input.industrial_production_per_country_tomorrow
|
||||
industrial_production = pd.read_csv(fn, index_col=0)
|
||||
|
||||
@ -38,29 +41,32 @@ def build_nodal_industrial_production():
|
||||
keys = pd.read_csv(fn, index_col=0)
|
||||
keys["country"] = keys.index.str[:2]
|
||||
|
||||
nodal_production = pd.DataFrame(index=keys.index,
|
||||
columns=industrial_production.columns,
|
||||
dtype=float)
|
||||
nodal_production = pd.DataFrame(
|
||||
index=keys.index, columns=industrial_production.columns, dtype=float
|
||||
)
|
||||
|
||||
countries = keys.country.unique()
|
||||
sectors = industrial_production.columns
|
||||
|
||||
for country, sector in product(countries, sectors):
|
||||
|
||||
buses = keys.index[keys.country == country]
|
||||
mapping = sector_mapping.get(sector, "population")
|
||||
|
||||
key = keys.loc[buses, mapping]
|
||||
nodal_production.loc[buses, sector] = industrial_production.at[country, sector] * key
|
||||
nodal_production.loc[buses, sector] = (
|
||||
industrial_production.at[country, sector] * key
|
||||
)
|
||||
|
||||
nodal_production.to_csv(snakemake.output.industrial_production_per_node)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_industrial_production_per_node',
|
||||
simpl='',
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_industrial_production_per_node",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
"""Build industry sector ratios."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build industry sector ratios.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
from helper import mute_print
|
||||
@ -68,7 +71,6 @@ index = [
|
||||
|
||||
|
||||
def load_idees_data(sector, country="EU28"):
|
||||
|
||||
suffixes = {"out": "", "fec": "_fec", "ued": "_ued", "emi": "_emi"}
|
||||
sheets = {k: sheet_names[sector] + v for k, v in suffixes.items()}
|
||||
|
||||
@ -91,7 +93,6 @@ def load_idees_data(sector, country="EU28"):
|
||||
|
||||
|
||||
def iron_and_steel():
|
||||
|
||||
# There are two different approaches to produce iron and steel:
|
||||
# i.e., integrated steelworks and electric arc.
|
||||
# Electric arc approach has higher efficiency and relies more on electricity.
|
||||
@ -602,7 +603,6 @@ def chemicals_industry():
|
||||
|
||||
|
||||
def nonmetalic_mineral_products():
|
||||
|
||||
# This includes cement, ceramic and glass production.
|
||||
# This includes process emissions related to the fabrication of clinker.
|
||||
|
||||
@ -789,7 +789,6 @@ def nonmetalic_mineral_products():
|
||||
|
||||
|
||||
def pulp_paper_printing():
|
||||
|
||||
# Pulp, paper and printing can be completely electrified.
|
||||
# There are no process emissions associated to this sector.
|
||||
|
||||
@ -942,7 +941,6 @@ def pulp_paper_printing():
|
||||
|
||||
|
||||
def food_beverages_tobacco():
|
||||
|
||||
# Food, beverages and tobaco can be completely electrified.
|
||||
# There are no process emissions associated to this sector.
|
||||
|
||||
@ -1002,7 +1000,6 @@ def food_beverages_tobacco():
|
||||
|
||||
|
||||
def non_ferrous_metals():
|
||||
|
||||
sector = "Non Ferrous Metals"
|
||||
idees = load_idees_data(sector)
|
||||
|
||||
@ -1205,7 +1202,6 @@ def non_ferrous_metals():
|
||||
|
||||
|
||||
def transport_equipment():
|
||||
|
||||
sector = "Transport Equipment"
|
||||
idees = load_idees_data(sector)
|
||||
|
||||
@ -1256,7 +1252,6 @@ def transport_equipment():
|
||||
|
||||
|
||||
def machinery_equipment():
|
||||
|
||||
sector = "Machinery Equipment"
|
||||
|
||||
idees = load_idees_data(sector)
|
||||
@ -1309,7 +1304,6 @@ def machinery_equipment():
|
||||
|
||||
|
||||
def textiles_and_leather():
|
||||
|
||||
sector = "Textiles and leather"
|
||||
|
||||
idees = load_idees_data(sector)
|
||||
@ -1358,7 +1352,6 @@ def textiles_and_leather():
|
||||
|
||||
|
||||
def wood_and_wood_products():
|
||||
|
||||
sector = "Wood and wood products"
|
||||
|
||||
idees = load_idees_data(sector)
|
||||
@ -1404,7 +1397,6 @@ def wood_and_wood_products():
|
||||
|
||||
|
||||
def other_industrial_sectors():
|
||||
|
||||
sector = "Other Industrial Sectors"
|
||||
|
||||
idees = load_idees_data(sector)
|
||||
@ -1465,9 +1457,10 @@ def other_industrial_sectors():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_industry_sector_ratios')
|
||||
|
||||
snakemake = mock_snakemake("build_industry_sector_ratios")
|
||||
|
||||
# TODO make config option
|
||||
year = 2015
|
||||
|
@ -1,29 +1,35 @@
|
||||
"""Build mapping between grid cells and population (total, urban, rural)"""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build mapping between grid cells and population (total, urban, rural)
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import multiprocessing as mp
|
||||
|
||||
import atlite
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import xarray as xr
|
||||
import geopandas as gpd
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_population_layouts')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
snakemake = mock_snakemake("build_population_layouts")
|
||||
|
||||
cutout = atlite.Cutout(snakemake.config['atlite']['cutout'])
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
cutout = atlite.Cutout(snakemake.config["atlite"]["cutout"])
|
||||
|
||||
grid_cells = cutout.grid.geometry
|
||||
|
||||
# nuts3 has columns country, gdp, pop, geometry
|
||||
# population is given in dimensions of 1e3=k
|
||||
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')
|
||||
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
|
||||
|
||||
# Indicator matrix NUTS3 -> grid cells
|
||||
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells)
|
||||
@ -34,9 +40,12 @@ if __name__ == '__main__':
|
||||
|
||||
countries = np.sort(nuts3.country.unique())
|
||||
|
||||
urban_fraction = pd.read_csv(snakemake.input.urban_percent,
|
||||
header=None, index_col=0,
|
||||
names=['fraction']).squeeze() / 100.
|
||||
urban_fraction = (
|
||||
pd.read_csv(
|
||||
snakemake.input.urban_percent, header=None, index_col=0, names=["fraction"]
|
||||
).squeeze()
|
||||
/ 100.0
|
||||
)
|
||||
|
||||
# fill missing Balkans values
|
||||
missing = ["AL", "ME", "MK"]
|
||||
@ -46,7 +55,7 @@ if __name__ == '__main__':
|
||||
urban_fraction = pd.concat([urban_fraction, fill_values])
|
||||
|
||||
# population in each grid cell
|
||||
pop_cells = pd.Series(I.dot(nuts3['pop']))
|
||||
pop_cells = pd.Series(I.dot(nuts3["pop"]))
|
||||
|
||||
# in km^2
|
||||
cell_areas = grid_cells.to_crs(3035).area / 1e6
|
||||
@ -55,13 +64,15 @@ if __name__ == '__main__':
|
||||
density_cells = pop_cells / cell_areas
|
||||
|
||||
# rural or urban population in grid cell
|
||||
pop_rural = pd.Series(0., density_cells.index)
|
||||
pop_urban = pd.Series(0., density_cells.index)
|
||||
pop_rural = pd.Series(0.0, density_cells.index)
|
||||
pop_urban = pd.Series(0.0, density_cells.index)
|
||||
|
||||
for ct in countries:
|
||||
logger.debug(f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%")
|
||||
logger.debug(
|
||||
f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%"
|
||||
)
|
||||
|
||||
indicator_nuts3_ct = nuts3.country.apply(lambda x: 1. if x == ct else 0.)
|
||||
indicator_nuts3_ct = nuts3.country.apply(lambda x: 1.0 if x == ct else 0.0)
|
||||
|
||||
indicator_cells_ct = pd.Series(Iinv.T.dot(indicator_nuts3_ct))
|
||||
|
||||
@ -70,7 +81,7 @@ if __name__ == '__main__':
|
||||
pop_cells_ct = indicator_cells_ct * pop_cells
|
||||
|
||||
# correct for imprecision of Iinv*I
|
||||
pop_ct = nuts3.loc[nuts3.country==ct,'pop'].sum()
|
||||
pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum()
|
||||
pop_cells_ct *= pop_ct / pop_cells_ct.sum()
|
||||
|
||||
# The first low density grid cells to reach rural fraction are rural
|
||||
@ -80,20 +91,19 @@ if __name__ == '__main__':
|
||||
pop_ct_rural_b = asc_density_cumsum < rural_fraction_ct
|
||||
pop_ct_urban_b = ~pop_ct_rural_b
|
||||
|
||||
pop_ct_rural_b[indicator_cells_ct == 0.] = False
|
||||
pop_ct_urban_b[indicator_cells_ct == 0.] = False
|
||||
pop_ct_rural_b[indicator_cells_ct == 0.0] = False
|
||||
pop_ct_urban_b[indicator_cells_ct == 0.0] = False
|
||||
|
||||
pop_rural += pop_cells_ct.where(pop_ct_rural_b, 0.)
|
||||
pop_urban += pop_cells_ct.where(pop_ct_urban_b, 0.)
|
||||
pop_rural += pop_cells_ct.where(pop_ct_rural_b, 0.0)
|
||||
pop_urban += pop_cells_ct.where(pop_ct_urban_b, 0.0)
|
||||
|
||||
pop_cells = {"total": pop_cells}
|
||||
pop_cells["rural"] = pop_rural
|
||||
pop_cells["urban"] = pop_urban
|
||||
|
||||
for key, pop in pop_cells.items():
|
||||
|
||||
ycoords = ('y', cutout.coords['y'].data)
|
||||
xcoords = ('x', cutout.coords['x'].data)
|
||||
ycoords = ("y", cutout.coords["y"].data)
|
||||
xcoords = ("x", cutout.coords["x"].data)
|
||||
values = pop.values.reshape(cutout.shape)
|
||||
layout = xr.DataArray(values, [ycoords, xcoords])
|
||||
|
||||
|
@ -1,13 +1,17 @@
|
||||
"""Build population-weighted energy totals."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build population-weighted energy totals.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_population_weighted_energy_totals',
|
||||
simpl='',
|
||||
"build_population_weighted_energy_totals",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
@ -15,7 +19,7 @@ if __name__ == '__main__':
|
||||
|
||||
energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=0)
|
||||
|
||||
nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.)
|
||||
nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.0)
|
||||
nodal_energy_totals.index = pop_layout.index
|
||||
nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0)
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build salt cavern potentials for hydrogen storage.
|
||||
|
||||
@ -22,29 +23,35 @@ import geopandas as gpd
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def concat_gdf(gdf_list, crs='EPSG:4326'):
|
||||
"""Concatenate multiple geopandas dataframes with common coordinate reference system (crs)."""
|
||||
def concat_gdf(gdf_list, crs="EPSG:4326"):
|
||||
"""
|
||||
Concatenate multiple geopandas dataframes with common coordinate reference
|
||||
system (crs).
|
||||
"""
|
||||
return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs)
|
||||
|
||||
|
||||
def load_bus_regions(onshore_path, offshore_path):
|
||||
"""Load pypsa-eur on- and offshore regions and concat."""
|
||||
"""
|
||||
Load pypsa-eur on- and offshore regions and concat.
|
||||
"""
|
||||
|
||||
bus_regions_offshore = gpd.read_file(offshore_path)
|
||||
bus_regions_onshore = gpd.read_file(onshore_path)
|
||||
bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore])
|
||||
bus_regions = bus_regions.dissolve(by='name', aggfunc='sum')
|
||||
bus_regions = bus_regions.dissolve(by="name", aggfunc="sum")
|
||||
|
||||
return bus_regions
|
||||
|
||||
|
||||
def area(gdf):
|
||||
"""Returns area of GeoDataFrame geometries in square kilometers."""
|
||||
"""
|
||||
Returns area of GeoDataFrame geometries in square kilometers.
|
||||
"""
|
||||
return gdf.to_crs(epsg=3035).area.div(1e6)
|
||||
|
||||
|
||||
def salt_cavern_potential_by_region(caverns, regions):
|
||||
|
||||
# calculate area of caverns shapes
|
||||
caverns["area_caverns"] = area(caverns)
|
||||
|
||||
@ -53,18 +60,24 @@ def salt_cavern_potential_by_region(caverns, regions):
|
||||
# calculate share of cavern area inside region
|
||||
overlay["share"] = area(overlay) / overlay["area_caverns"]
|
||||
|
||||
overlay["e_nom"] = overlay.eval("capacity_per_area * share * area_caverns / 1000") # TWh
|
||||
overlay["e_nom"] = overlay.eval(
|
||||
"capacity_per_area * share * area_caverns / 1000"
|
||||
) # TWh
|
||||
|
||||
caverns_regions = overlay.groupby(['name', "storage_type"]).e_nom.sum().unstack("storage_type")
|
||||
caverns_regions = (
|
||||
overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type")
|
||||
)
|
||||
|
||||
return caverns_regions
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_salt_cavern_potentials', simpl='', clusters='37')
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_salt_cavern_potentials", simpl="", clusters="37"
|
||||
)
|
||||
|
||||
fn_onshore = snakemake.input.regions_onshore
|
||||
fn_offshore = snakemake.input.regions_offshore
|
||||
|
@ -1,12 +1,18 @@
|
||||
import pandas as pd
|
||||
# -*- coding: utf-8 -*-
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def area(gdf):
|
||||
"""Returns area of GeoDataFrame geometries in square kilometers."""
|
||||
"""
|
||||
Returns area of GeoDataFrame geometries in square kilometers.
|
||||
"""
|
||||
return gdf.to_crs(epsg=3035).area.div(1e6)
|
||||
|
||||
|
||||
def allocate_sequestration_potential(gdf, regions, attr='conservative estimate Mt', threshold=3):
|
||||
def allocate_sequestration_potential(
|
||||
gdf, regions, attr="conservative estimate Mt", threshold=3
|
||||
):
|
||||
gdf = gdf.loc[gdf[attr] > threshold, [attr, "geometry"]]
|
||||
gdf["area_sqkm"] = area(gdf)
|
||||
overlay = gpd.overlay(regions, gdf, keep_geom_type=True)
|
||||
@ -19,12 +25,11 @@ def allocate_sequestration_potential(gdf, regions, attr='conservative estimate M
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_sequestration_potentials',
|
||||
simpl='',
|
||||
clusters="181"
|
||||
"build_sequestration_potentials", simpl="", clusters="181"
|
||||
)
|
||||
|
||||
cf = snakemake.config["sector"]["regional_co2_sequestration_potential"]
|
||||
@ -34,9 +39,11 @@ if __name__ == "__main__":
|
||||
regions = gpd.read_file(snakemake.input.regions_offshore)
|
||||
if cf["include_onshore"]:
|
||||
onregions = gpd.read_file(snakemake.input.regions_onshore)
|
||||
regions = pd.concat([regions, onregions]).dissolve(by='name').reset_index()
|
||||
regions = pd.concat([regions, onregions]).dissolve(by="name").reset_index()
|
||||
|
||||
s = allocate_sequestration_potential(gdf, regions, attr=cf["attribute"], threshold=cf["min_size"])
|
||||
s = allocate_sequestration_potential(
|
||||
gdf, regions, attr=cf["attribute"], threshold=cf["min_size"]
|
||||
)
|
||||
|
||||
s = s.where(s > cf["min_size"]).dropna()
|
||||
|
||||
|
@ -1,24 +1,32 @@
|
||||
"""Build regional demand for international navigation based on outflow volume of ports."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build regional demand for international navigation based on outflow volume of
|
||||
ports.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
import json
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_shipping_demand_per_node',
|
||||
simpl='',
|
||||
"build_shipping_demand_per_node",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
scope = gpd.read_file(snakemake.input.scope).geometry[0]
|
||||
regions = gpd.read_file(snakemake.input.regions).set_index('name')
|
||||
demand = pd.read_csv(snakemake.input.demand, index_col=0)["total international navigation"]
|
||||
regions = gpd.read_file(snakemake.input.regions).set_index("name")
|
||||
demand = pd.read_csv(snakemake.input.demand, index_col=0)[
|
||||
"total international navigation"
|
||||
]
|
||||
|
||||
# read port data into GeoDataFrame
|
||||
with open(snakemake.input.ports, 'r', encoding='latin_1') as f:
|
||||
with open(snakemake.input.ports, "r", encoding="latin_1") as f:
|
||||
ports = json.load(f)
|
||||
ports = pd.json_normalize(ports, "features", sep="_")
|
||||
coordinates = ports.geometry_coordinates
|
||||
@ -31,7 +39,9 @@ if __name__ == '__main__':
|
||||
# assign ports to nearest region
|
||||
p = european_ports.to_crs(3857)
|
||||
r = regions.to_crs(3857)
|
||||
outflows = p.sjoin_nearest(r).groupby("index_right").properties_outflows.sum().div(1e3)
|
||||
outflows = (
|
||||
p.sjoin_nearest(r).groupby("index_right").properties_outflows.sum().div(1e3)
|
||||
)
|
||||
|
||||
# calculate fraction of each country's port outflows
|
||||
countries = outflows.index.str[:2]
|
||||
@ -39,7 +49,7 @@ if __name__ == '__main__':
|
||||
fraction = outflows / countries.map(outflows_per_country)
|
||||
|
||||
# distribute per-country demands to nodes based on these fractions
|
||||
nodal_demand = demand.loc[countries].fillna(0.)
|
||||
nodal_demand = demand.loc[countries].fillna(0.0)
|
||||
nodal_demand.index = fraction.index
|
||||
nodal_demand = nodal_demand.multiply(fraction, axis=0)
|
||||
nodal_demand = nodal_demand.reindex(regions.index, fill_value=0)
|
||||
|
@ -1,18 +1,22 @@
|
||||
"""Build solar thermal collector time series."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build solar thermal collector time series.
|
||||
"""
|
||||
|
||||
import geopandas as gpd
|
||||
import atlite
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import xarray as xr
|
||||
import numpy as np
|
||||
from dask.distributed import Client, LocalCluster
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_solar_thermal_profiles',
|
||||
simpl='',
|
||||
"build_solar_thermal_profiles",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
@ -20,29 +24,36 @@ if __name__ == '__main__':
|
||||
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
|
||||
client = Client(cluster, asynchronous=True)
|
||||
|
||||
config = snakemake.config['solar_thermal']
|
||||
config = snakemake.config["solar_thermal"]
|
||||
|
||||
time = pd.date_range(freq='h', **snakemake.config['snapshots'])
|
||||
cutout_config = snakemake.config['atlite']['cutout']
|
||||
time = pd.date_range(freq="h", **snakemake.config["snapshots"])
|
||||
cutout_config = snakemake.config["atlite"]["cutout"]
|
||||
cutout = atlite.Cutout(cutout_config).sel(time=time)
|
||||
|
||||
clustered_regions = gpd.read_file(
|
||||
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()
|
||||
clustered_regions = (
|
||||
gpd.read_file(snakemake.input.regions_onshore)
|
||||
.set_index("name")
|
||||
.buffer(0)
|
||||
.squeeze()
|
||||
)
|
||||
|
||||
I = cutout.indicatormatrix(clustered_regions)
|
||||
|
||||
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
|
||||
|
||||
stacked_pop = pop_layout.stack(spatial=('y', 'x'))
|
||||
stacked_pop = pop_layout.stack(spatial=("y", "x"))
|
||||
M = I.T.dot(np.diag(I.dot(stacked_pop)))
|
||||
|
||||
nonzero_sum = M.sum(axis=0, keepdims=True)
|
||||
nonzero_sum[nonzero_sum == 0.] = 1.
|
||||
nonzero_sum[nonzero_sum == 0.0] = 1.0
|
||||
M_tilde = M / nonzero_sum
|
||||
|
||||
solar_thermal = cutout.solar_thermal(**config, matrix=M_tilde.T,
|
||||
solar_thermal = cutout.solar_thermal(
|
||||
**config,
|
||||
matrix=M_tilde.T,
|
||||
index=clustered_regions.index,
|
||||
dask_kwargs=dict(scheduler=client),
|
||||
show_progress=False)
|
||||
show_progress=False
|
||||
)
|
||||
|
||||
solar_thermal.to_netcdf(snakemake.output.solar_thermal)
|
||||
|
@ -1,18 +1,22 @@
|
||||
"""Build temperature profiles."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build temperature profiles.
|
||||
"""
|
||||
|
||||
import geopandas as gpd
|
||||
import atlite
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import xarray as xr
|
||||
import numpy as np
|
||||
from dask.distributed import Client, LocalCluster
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'build_temperature_profiles',
|
||||
simpl='',
|
||||
"build_temperature_profiles",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
@ -20,34 +24,42 @@ if __name__ == '__main__':
|
||||
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
|
||||
client = Client(cluster, asynchronous=True)
|
||||
|
||||
time = pd.date_range(freq='h', **snakemake.config['snapshots'])
|
||||
cutout_config = snakemake.config['atlite']['cutout']
|
||||
time = pd.date_range(freq="h", **snakemake.config["snapshots"])
|
||||
cutout_config = snakemake.config["atlite"]["cutout"]
|
||||
cutout = atlite.Cutout(cutout_config).sel(time=time)
|
||||
|
||||
clustered_regions = gpd.read_file(
|
||||
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()
|
||||
clustered_regions = (
|
||||
gpd.read_file(snakemake.input.regions_onshore)
|
||||
.set_index("name")
|
||||
.buffer(0)
|
||||
.squeeze()
|
||||
)
|
||||
|
||||
I = cutout.indicatormatrix(clustered_regions)
|
||||
|
||||
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
|
||||
|
||||
stacked_pop = pop_layout.stack(spatial=('y', 'x'))
|
||||
stacked_pop = pop_layout.stack(spatial=("y", "x"))
|
||||
M = I.T.dot(np.diag(I.dot(stacked_pop)))
|
||||
|
||||
nonzero_sum = M.sum(axis=0, keepdims=True)
|
||||
nonzero_sum[nonzero_sum == 0.] = 1.
|
||||
nonzero_sum[nonzero_sum == 0.0] = 1.0
|
||||
M_tilde = M / nonzero_sum
|
||||
|
||||
temp_air = cutout.temperature(
|
||||
matrix=M_tilde.T, index=clustered_regions.index,
|
||||
matrix=M_tilde.T,
|
||||
index=clustered_regions.index,
|
||||
dask_kwargs=dict(scheduler=client),
|
||||
show_progress=False)
|
||||
show_progress=False,
|
||||
)
|
||||
|
||||
temp_air.to_netcdf(snakemake.output.temp_air)
|
||||
|
||||
temp_soil = cutout.soil_temperature(
|
||||
matrix=M_tilde.T, index=clustered_regions.index,
|
||||
matrix=M_tilde.T,
|
||||
index=clustered_regions.index,
|
||||
dask_kwargs=dict(scheduler=client),
|
||||
show_progress=False)
|
||||
show_progress=False,
|
||||
)
|
||||
|
||||
temp_soil.to_netcdf(snakemake.output.temp_soil)
|
||||
|
@ -1,13 +1,15 @@
|
||||
"""Build transport demand."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Build transport demand.
|
||||
"""
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import xarray as xr
|
||||
from helper import generate_periodic_profiles
|
||||
|
||||
|
||||
def build_nodal_transport_data(fn, pop_layout):
|
||||
|
||||
transport_data = pd.read_csv(fn, index_col=0)
|
||||
|
||||
nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.0)
|
||||
@ -24,12 +26,9 @@ def build_nodal_transport_data(fn, pop_layout):
|
||||
|
||||
|
||||
def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data):
|
||||
|
||||
## Get overall demand curve for all vehicles
|
||||
|
||||
traffic = pd.read_csv(
|
||||
traffic_fn, skiprows=2, usecols=["count"]
|
||||
).squeeze("columns")
|
||||
traffic = pd.read_csv(traffic_fn, skiprows=2, usecols=["count"]).squeeze("columns")
|
||||
|
||||
transport_shape = generate_periodic_profiles(
|
||||
dt_index=snapshots,
|
||||
@ -94,9 +93,11 @@ def transport_degree_factor(
|
||||
upper_degree_factor=1.6,
|
||||
):
|
||||
"""
|
||||
Work out how much energy demand in vehicles increases due to heating and cooling.
|
||||
There is a deadband where there is no increase.
|
||||
Degree factors are % increase in demand compared to no heating/cooling fuel consumption.
|
||||
Work out how much energy demand in vehicles increases due to heating and
|
||||
cooling.
|
||||
|
||||
There is a deadband where there is no increase. Degree factors are %
|
||||
increase in demand compared to no heating/cooling fuel consumption.
|
||||
Returns per unit increase in demand for each place and time
|
||||
"""
|
||||
|
||||
@ -137,7 +138,6 @@ def bev_availability_profile(fn, snapshots, nodes, options):
|
||||
|
||||
|
||||
def bev_dsm_profile(snapshots, nodes, options):
|
||||
|
||||
dsm_week = np.zeros((24 * 7,))
|
||||
|
||||
dsm_week[(np.arange(0, 7, 1) * 24 + options["bev_dsm_restriction_time"])] = options[
|
||||
@ -173,24 +173,23 @@ if __name__ == "__main__":
|
||||
|
||||
options = snakemake.config["sector"]
|
||||
|
||||
snapshots = pd.date_range(freq='h', **snakemake.config["snapshots"], tz="UTC")
|
||||
snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"], tz="UTC")
|
||||
|
||||
Nyears = 1
|
||||
|
||||
nodal_transport_data = build_nodal_transport_data(
|
||||
snakemake.input.transport_data,
|
||||
pop_layout
|
||||
snakemake.input.transport_data, pop_layout
|
||||
)
|
||||
|
||||
transport_demand = build_transport_demand(
|
||||
snakemake.input.traffic_data_KFZ,
|
||||
snakemake.input.temp_air_total,
|
||||
nodes, nodal_transport_data
|
||||
nodes,
|
||||
nodal_transport_data,
|
||||
)
|
||||
|
||||
avail_profile = bev_availability_profile(
|
||||
snakemake.input.traffic_data_Pkw,
|
||||
snapshots, nodes, options
|
||||
snakemake.input.traffic_data_Pkw, snapshots, nodes, options
|
||||
)
|
||||
|
||||
dsm_profile = bev_dsm_profile(snapshots, nodes, options)
|
||||
|
@ -1,45 +1,57 @@
|
||||
"""Cluster gas network."""
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Cluster gas network.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
|
||||
from shapely import wkt
|
||||
from pypsa.geo import haversine_pts
|
||||
import pandas as pd
|
||||
from packaging.version import Version, parse
|
||||
from pypsa.geo import haversine_pts
|
||||
from shapely import wkt
|
||||
|
||||
|
||||
def concat_gdf(gdf_list, crs='EPSG:4326'):
|
||||
"""Concatenate multiple geopandas dataframes with common coordinate reference system (crs)."""
|
||||
def concat_gdf(gdf_list, crs="EPSG:4326"):
|
||||
"""
|
||||
Concatenate multiple geopandas dataframes with common coordinate reference
|
||||
system (crs).
|
||||
"""
|
||||
return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs)
|
||||
|
||||
|
||||
def load_bus_regions(onshore_path, offshore_path):
|
||||
"""Load pypsa-eur on- and offshore regions and concat."""
|
||||
"""
|
||||
Load pypsa-eur on- and offshore regions and concat.
|
||||
"""
|
||||
|
||||
bus_regions_offshore = gpd.read_file(offshore_path)
|
||||
bus_regions_onshore = gpd.read_file(onshore_path)
|
||||
bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore])
|
||||
bus_regions = bus_regions.dissolve(by='name', aggfunc='sum')
|
||||
bus_regions = bus_regions.dissolve(by="name", aggfunc="sum")
|
||||
|
||||
return bus_regions
|
||||
|
||||
|
||||
def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
|
||||
|
||||
for i in [0, 1]:
|
||||
|
||||
gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326")
|
||||
|
||||
kws = dict(op="within") if parse(gpd.__version__) < Version('0.10') else dict(predicate="within")
|
||||
kws = (
|
||||
dict(op="within")
|
||||
if parse(gpd.__version__) < Version("0.10")
|
||||
else dict(predicate="within")
|
||||
)
|
||||
bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right
|
||||
bus_mapping = bus_mapping.groupby(bus_mapping.index).first()
|
||||
|
||||
df[f"bus{i}"] = bus_mapping
|
||||
|
||||
df[f"point{i}"] = df[f"bus{i}"].map(bus_regions.to_crs(3035).centroid.to_crs(4326))
|
||||
df[f"point{i}"] = df[f"bus{i}"].map(
|
||||
bus_regions.to_crs(3035).centroid.to_crs(4326)
|
||||
)
|
||||
|
||||
# drop pipes where not both buses are inside regions
|
||||
df = df.loc[~df.bus0.isna() & ~df.bus1.isna()]
|
||||
@ -49,10 +61,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
|
||||
|
||||
# recalculate lengths as center to center * length factor
|
||||
df["length"] = df.apply(
|
||||
lambda p: length_factor * haversine_pts(
|
||||
[p.point0.x, p.point0.y],
|
||||
[p.point1.x, p.point1.y]
|
||||
), axis=1
|
||||
lambda p: length_factor
|
||||
* haversine_pts([p.point0.x, p.point0.y], [p.point1.x, p.point1.y]),
|
||||
axis=1,
|
||||
)
|
||||
|
||||
# tidy and create new numbered index
|
||||
@ -63,7 +74,6 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
|
||||
|
||||
|
||||
def reindex_pipes(df):
|
||||
|
||||
def make_index(x):
|
||||
connector = " <-> " if x.bidirectional else " -> "
|
||||
return "gas pipeline " + x.bus0 + connector + x.bus1
|
||||
@ -77,33 +87,28 @@ def reindex_pipes(df):
|
||||
|
||||
|
||||
def aggregate_parallel_pipes(df):
|
||||
|
||||
strategies = {
|
||||
'bus0': 'first',
|
||||
'bus1': 'first',
|
||||
"p_nom": 'sum',
|
||||
"p_nom_diameter": 'sum',
|
||||
"bus0": "first",
|
||||
"bus1": "first",
|
||||
"p_nom": "sum",
|
||||
"p_nom_diameter": "sum",
|
||||
"max_pressure_bar": "mean",
|
||||
"build_year": "mean",
|
||||
"diameter_mm": "mean",
|
||||
"length": 'mean',
|
||||
'name': ' '.join,
|
||||
"p_min_pu": 'min',
|
||||
"length": "mean",
|
||||
"name": " ".join,
|
||||
"p_min_pu": "min",
|
||||
}
|
||||
return df.groupby(df.index).agg(strategies)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake(
|
||||
'cluster_gas_network',
|
||||
simpl='',
|
||||
clusters='37'
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37")
|
||||
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
fn = snakemake.input.cleaned_gas_network
|
||||
df = pd.read_csv(fn, index_col=0)
|
||||
@ -111,8 +116,7 @@ if __name__ == "__main__":
|
||||
df[col] = df[col].apply(wkt.loads)
|
||||
|
||||
bus_regions = load_bus_regions(
|
||||
snakemake.input.regions_onshore,
|
||||
snakemake.input.regions_offshore
|
||||
snakemake.input.regions_onshore, snakemake.input.regions_offshore
|
||||
)
|
||||
|
||||
gas_network = build_clustered_gas_network(df, bus_regions)
|
||||
|
@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from shutil import copy
|
||||
|
||||
import yaml
|
||||
|
||||
files = {
|
||||
@ -7,24 +9,27 @@ files = {
|
||||
"Snakefile": "Snakefile",
|
||||
"scripts/solve_network.py": "solve_network.py",
|
||||
"scripts/prepare_sector_network.py": "prepare_sector_network.py",
|
||||
"../pypsa-eur/config.yaml": "config.pypsaeur.yaml"
|
||||
"../pypsa-eur/config.yaml": "config.pypsaeur.yaml",
|
||||
}
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('copy_config')
|
||||
|
||||
basepath = snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/configs/'
|
||||
snakemake = mock_snakemake("copy_config")
|
||||
|
||||
basepath = (
|
||||
snakemake.config["summary_dir"] + "/" + snakemake.config["run"] + "/configs/"
|
||||
)
|
||||
|
||||
for f, name in files.items():
|
||||
copy(f, basepath + name)
|
||||
|
||||
with open(basepath + 'config.snakemake.yaml', 'w') as yaml_file:
|
||||
with open(basepath + "config.snakemake.yaml", "w") as yaml_file:
|
||||
yaml.dump(
|
||||
snakemake.config,
|
||||
yaml_file,
|
||||
default_flow_style=False,
|
||||
allow_unicode=True,
|
||||
sort_keys=False
|
||||
sort_keys=False,
|
||||
)
|
@ -1,22 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import contextlib
|
||||
import yaml
|
||||
import pytz
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
from snakemake.utils import update_config
|
||||
from pypsa.descriptors import Dict
|
||||
from pypsa.components import components, component_attrs
|
||||
|
||||
import logging
|
||||
import pandas as pd
|
||||
import pytz
|
||||
import yaml
|
||||
from pypsa.components import component_attrs, components
|
||||
from pypsa.descriptors import Dict
|
||||
from snakemake.utils import update_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Define a context manager to temporarily mute print statements
|
||||
@contextlib.contextmanager
|
||||
def mute_print():
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
with open(os.devnull, "w") as devnull:
|
||||
with contextlib.redirect_stdout(devnull):
|
||||
yield
|
||||
|
||||
@ -66,15 +68,17 @@ def mock_snakemake(rulename, **wildcards):
|
||||
keyword arguments fixing the wildcards. Only necessary if wildcards are
|
||||
needed.
|
||||
"""
|
||||
import snakemake as sm
|
||||
import os
|
||||
|
||||
import snakemake as sm
|
||||
from packaging.version import Version, parse
|
||||
from pypsa.descriptors import Dict
|
||||
from snakemake.script import Snakemake
|
||||
from packaging.version import Version, parse
|
||||
|
||||
script_dir = Path(__file__).parent.resolve()
|
||||
assert Path.cwd().resolve() == script_dir, \
|
||||
f'mock_snakemake has to be run from the repository scripts directory {script_dir}'
|
||||
assert (
|
||||
Path.cwd().resolve() == script_dir
|
||||
), f"mock_snakemake has to be run from the repository scripts directory {script_dir}"
|
||||
os.chdir(script_dir.parent)
|
||||
for p in sm.SNAKEFILE_CHOICES:
|
||||
if os.path.exists(p):
|
||||
@ -95,9 +99,18 @@ def mock_snakemake(rulename, **wildcards):
|
||||
io[i] = os.path.abspath(io[i])
|
||||
|
||||
make_accessable(job.input, job.output, job.log)
|
||||
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards,
|
||||
job.threads, job.resources, job.log,
|
||||
job.dag.workflow.config, job.rule.name, None,)
|
||||
snakemake = Snakemake(
|
||||
job.input,
|
||||
job.output,
|
||||
job.params,
|
||||
job.wildcards,
|
||||
job.threads,
|
||||
job.resources,
|
||||
job.log,
|
||||
job.dag.workflow.config,
|
||||
job.rule.name,
|
||||
None,
|
||||
)
|
||||
# create log and output dir if not existent
|
||||
for path in list(snakemake.log) + list(snakemake.output):
|
||||
Path(path).parent.mkdir(parents=True, exist_ok=True)
|
||||
@ -105,9 +118,11 @@ def mock_snakemake(rulename, **wildcards):
|
||||
os.chdir(script_dir)
|
||||
return snakemake
|
||||
|
||||
|
||||
# from pypsa-eur/_helpers.py
|
||||
def progress_retrieve(url, file):
|
||||
import urllib
|
||||
|
||||
from progressbar import ProgressBar
|
||||
|
||||
pbar = ProgressBar(0, 100)
|
||||
@ -121,7 +136,8 @@ def progress_retrieve(url, file):
|
||||
def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
|
||||
"""
|
||||
Give a 24*7 long list of weekly hourly profiles, generate this for each
|
||||
country for the period dt_index, taking account of time zones and summer time.
|
||||
country for the period dt_index, taking account of time zones and summer
|
||||
time.
|
||||
"""
|
||||
|
||||
weekly_profile = pd.Series(weekly_profile, range(24 * 7))
|
||||
|
@ -1,23 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import sys
|
||||
import yaml
|
||||
import pypsa
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from prepare_sector_network import prepare_costs
|
||||
import pypsa
|
||||
import yaml
|
||||
from helper import override_component_attrs
|
||||
from prepare_sector_network import prepare_costs
|
||||
|
||||
idx = pd.IndexSlice
|
||||
|
||||
opt_name = {
|
||||
"Store": "e",
|
||||
"Line": "s",
|
||||
"Transformer": "s"
|
||||
}
|
||||
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
|
||||
|
||||
|
||||
def assign_carriers(n):
|
||||
@ -31,15 +28,20 @@ def assign_locations(n):
|
||||
for i in ifind.unique():
|
||||
names = ifind.index[ifind == i]
|
||||
if i == -1:
|
||||
c.df.loc[names, 'location'] = ""
|
||||
c.df.loc[names, "location"] = ""
|
||||
else:
|
||||
c.df.loc[names, 'location'] = names.str[:i]
|
||||
c.df.loc[names, "location"] = names.str[:i]
|
||||
|
||||
|
||||
def calculate_nodal_cfs(n, label, nodal_cfs):
|
||||
# Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
|
||||
for c in n.iterate_components((n.branch_components^{"Line","Transformer"})|n.controllable_one_port_components^{"Load","StorageUnit"}):
|
||||
capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum()
|
||||
for c in n.iterate_components(
|
||||
(n.branch_components ^ {"Line", "Transformer"})
|
||||
| n.controllable_one_port_components ^ {"Load", "StorageUnit"}
|
||||
):
|
||||
capacities_c = c.df.groupby(["location", "carrier"])[
|
||||
opt_name.get(c.name, "p") + "_nom_opt"
|
||||
].sum()
|
||||
|
||||
if c.name == "Link":
|
||||
p = c.pnl.p0.abs().mean()
|
||||
@ -55,7 +57,9 @@ def calculate_nodal_cfs(n, label, nodal_cfs):
|
||||
|
||||
cf_c = p_c / capacities_c
|
||||
|
||||
index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in cf_c.index.to_list()])
|
||||
index = pd.MultiIndex.from_tuples(
|
||||
[(c.list_name,) + t for t in cf_c.index.to_list()]
|
||||
)
|
||||
nodal_cfs = nodal_cfs.reindex(index.union(nodal_cfs.index))
|
||||
nodal_cfs.loc[index, label] = cf_c.values
|
||||
|
||||
@ -63,9 +67,13 @@ def calculate_nodal_cfs(n, label, nodal_cfs):
|
||||
|
||||
|
||||
def calculate_cfs(n, label, cfs):
|
||||
|
||||
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load","StorageUnit"}):
|
||||
capacities_c = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum()
|
||||
for c in n.iterate_components(
|
||||
n.branch_components
|
||||
| n.controllable_one_port_components ^ {"Load", "StorageUnit"}
|
||||
):
|
||||
capacities_c = (
|
||||
c.df[opt_name.get(c.name, "p") + "_nom_opt"].groupby(c.df.carrier).sum()
|
||||
)
|
||||
|
||||
if c.name in ["Link", "Line", "Transformer"]:
|
||||
p = c.pnl.p0.abs().mean()
|
||||
@ -89,10 +97,16 @@ def calculate_cfs(n, label, cfs):
|
||||
|
||||
def calculate_nodal_costs(n, label, nodal_costs):
|
||||
# Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
|
||||
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
|
||||
c.df["capital_costs"] = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
|
||||
for c in n.iterate_components(
|
||||
n.branch_components | n.controllable_one_port_components ^ {"Load"}
|
||||
):
|
||||
c.df["capital_costs"] = (
|
||||
c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
|
||||
)
|
||||
capital_costs = c.df.groupby(["location", "carrier"])["capital_costs"].sum()
|
||||
index = pd.MultiIndex.from_tuples([(c.list_name, "capital") + t for t in capital_costs.index.to_list()])
|
||||
index = pd.MultiIndex.from_tuples(
|
||||
[(c.list_name, "capital") + t for t in capital_costs.index.to_list()]
|
||||
)
|
||||
nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
|
||||
nodal_costs.loc[index, label] = capital_costs.values
|
||||
|
||||
@ -102,19 +116,23 @@ def calculate_nodal_costs(n, label, nodal_costs):
|
||||
continue
|
||||
elif c.name == "StorageUnit":
|
||||
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
p_all[p_all < 0.] = 0.
|
||||
p_all[p_all < 0.0] = 0.0
|
||||
p = p_all.sum()
|
||||
else:
|
||||
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
|
||||
|
||||
# correct sequestration cost
|
||||
if c.name == "Store":
|
||||
items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.)]
|
||||
c.df.loc[items, "marginal_cost"] = -20.
|
||||
items = c.df.index[
|
||||
(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
|
||||
]
|
||||
c.df.loc[items, "marginal_cost"] = -20.0
|
||||
|
||||
c.df["marginal_costs"] = p * c.df.marginal_cost
|
||||
marginal_costs = c.df.groupby(["location", "carrier"])["marginal_costs"].sum()
|
||||
index = pd.MultiIndex.from_tuples([(c.list_name, "marginal") + t for t in marginal_costs.index.to_list()])
|
||||
index = pd.MultiIndex.from_tuples(
|
||||
[(c.list_name, "marginal") + t for t in marginal_costs.index.to_list()]
|
||||
)
|
||||
nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
|
||||
nodal_costs.loc[index, label] = marginal_costs.values
|
||||
|
||||
@ -122,8 +140,9 @@ def calculate_nodal_costs(n, label, nodal_costs):
|
||||
|
||||
|
||||
def calculate_costs(n, label, costs):
|
||||
|
||||
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
|
||||
for c in n.iterate_components(
|
||||
n.branch_components | n.controllable_one_port_components ^ {"Load"}
|
||||
):
|
||||
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
|
||||
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum()
|
||||
|
||||
@ -140,15 +159,17 @@ def calculate_costs(n, label, costs):
|
||||
continue
|
||||
elif c.name == "StorageUnit":
|
||||
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
p_all[p_all < 0.] = 0.
|
||||
p_all[p_all < 0.0] = 0.0
|
||||
p = p_all.sum()
|
||||
else:
|
||||
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
|
||||
|
||||
# correct sequestration cost
|
||||
if c.name == "Store":
|
||||
items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.)]
|
||||
c.df.loc[items, "marginal_cost"] = -20.
|
||||
items = c.df.index[
|
||||
(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
|
||||
]
|
||||
c.df.loc[items, "marginal_cost"] = -20.0
|
||||
|
||||
marginal_costs = p * c.df.marginal_cost
|
||||
|
||||
@ -170,30 +191,50 @@ def calculate_costs(n, label, costs):
|
||||
|
||||
|
||||
def calculate_cumulative_cost():
|
||||
planning_horizons = snakemake.config['scenario']['planning_horizons']
|
||||
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
|
||||
|
||||
cumulative_cost = pd.DataFrame(index = df["costs"].sum().index,
|
||||
columns=pd.Series(data=np.arange(0,0.1, 0.01), name='social discount rate'))
|
||||
cumulative_cost = pd.DataFrame(
|
||||
index=df["costs"].sum().index,
|
||||
columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"),
|
||||
)
|
||||
|
||||
# discount cost and express them in money value of planning_horizons[0]
|
||||
for r in cumulative_cost.columns:
|
||||
cumulative_cost[r]=[df["costs"].sum()[index]/((1+r)**(index[-1]-planning_horizons[0])) for index in cumulative_cost.index]
|
||||
cumulative_cost[r] = [
|
||||
df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0]))
|
||||
for index in cumulative_cost.index
|
||||
]
|
||||
|
||||
# integrate cost throughout the transition path
|
||||
for r in cumulative_cost.columns:
|
||||
for cluster in cumulative_cost.index.get_level_values(level=0).unique():
|
||||
for lv in cumulative_cost.index.get_level_values(level=1).unique():
|
||||
for sector_opts in cumulative_cost.index.get_level_values(level=2).unique():
|
||||
cumulative_cost.loc[(cluster, lv, sector_opts, 'cumulative cost'),r] = np.trapz(cumulative_cost.loc[idx[cluster, lv, sector_opts,planning_horizons],r].values, x=planning_horizons)
|
||||
for sector_opts in cumulative_cost.index.get_level_values(
|
||||
level=2
|
||||
).unique():
|
||||
cumulative_cost.loc[
|
||||
(cluster, lv, sector_opts, "cumulative cost"), r
|
||||
] = np.trapz(
|
||||
cumulative_cost.loc[
|
||||
idx[cluster, lv, sector_opts, planning_horizons], r
|
||||
].values,
|
||||
x=planning_horizons,
|
||||
)
|
||||
|
||||
return cumulative_cost
|
||||
|
||||
|
||||
def calculate_nodal_capacities(n, label, nodal_capacities):
|
||||
# Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
|
||||
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
|
||||
nodal_capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum()
|
||||
index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in nodal_capacities_c.index.to_list()])
|
||||
for c in n.iterate_components(
|
||||
n.branch_components | n.controllable_one_port_components ^ {"Load"}
|
||||
):
|
||||
nodal_capacities_c = c.df.groupby(["location", "carrier"])[
|
||||
opt_name.get(c.name, "p") + "_nom_opt"
|
||||
].sum()
|
||||
index = pd.MultiIndex.from_tuples(
|
||||
[(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]
|
||||
)
|
||||
nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
|
||||
nodal_capacities.loc[index, label] = nodal_capacities_c.values
|
||||
|
||||
@ -201,12 +242,17 @@ def calculate_nodal_capacities(n, label, nodal_capacities):
|
||||
|
||||
|
||||
def calculate_capacities(n, label, capacities):
|
||||
|
||||
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
|
||||
capacities_grouped = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum()
|
||||
for c in n.iterate_components(
|
||||
n.branch_components | n.controllable_one_port_components ^ {"Load"}
|
||||
):
|
||||
capacities_grouped = (
|
||||
c.df[opt_name.get(c.name, "p") + "_nom_opt"].groupby(c.df.carrier).sum()
|
||||
)
|
||||
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
|
||||
|
||||
capacities = capacities.reindex(capacities_grouped.index.union(capacities.index))
|
||||
capacities = capacities.reindex(
|
||||
capacities_grouped.index.union(capacities.index)
|
||||
)
|
||||
|
||||
capacities.loc[capacities_grouped.index, label] = capacities_grouped
|
||||
|
||||
@ -214,8 +260,12 @@ def calculate_capacities(n, label, capacities):
|
||||
|
||||
|
||||
def calculate_curtailment(n, label, curtailment):
|
||||
|
||||
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum()
|
||||
avail = (
|
||||
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
|
||||
.sum()
|
||||
.groupby(n.generators.carrier)
|
||||
.sum()
|
||||
)
|
||||
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
|
||||
|
||||
curtailment[label] = (((avail - used) / avail) * 100).round(3)
|
||||
@ -224,18 +274,28 @@ def calculate_curtailment(n, label, curtailment):
|
||||
|
||||
|
||||
def calculate_energy(n, label, energy):
|
||||
|
||||
for c in n.iterate_components(n.one_port_components | n.branch_components):
|
||||
|
||||
if c.name in n.one_port_components:
|
||||
c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
|
||||
c_energies = (
|
||||
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
.sum()
|
||||
.multiply(c.df.sign)
|
||||
.groupby(c.df.carrier)
|
||||
.sum()
|
||||
)
|
||||
else:
|
||||
c_energies = pd.Series(0., c.df.carrier.unique())
|
||||
c_energies = pd.Series(0.0, c.df.carrier.unique())
|
||||
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||
totals = c.pnl["p" + port].multiply(n.snapshot_weightings.generators, axis=0).sum()
|
||||
totals = (
|
||||
c.pnl["p" + port]
|
||||
.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
.sum()
|
||||
)
|
||||
# remove values where bus is missing (bug in nomopyomo)
|
||||
no_bus = c.df.index[c.df["bus" + port] == ""]
|
||||
totals.loc[no_bus] = n.component_attrs[c.name].loc["p" + port, "default"]
|
||||
totals.loc[no_bus] = n.component_attrs[c.name].loc[
|
||||
"p" + port, "default"
|
||||
]
|
||||
c_energies -= totals.groupby(c.df.carrier).sum()
|
||||
|
||||
c_energies = pd.concat([c_energies], keys=[c.list_name])
|
||||
@ -248,40 +308,47 @@ def calculate_energy(n, label, energy):
|
||||
|
||||
|
||||
def calculate_supply(n, label, supply):
|
||||
"""calculate the max dispatch of each component at the buses aggregated by carrier"""
|
||||
"""
|
||||
Calculate the max dispatch of each component at the buses aggregated by
|
||||
carrier.
|
||||
"""
|
||||
|
||||
bus_carriers = n.buses.carrier.unique()
|
||||
|
||||
for i in bus_carriers:
|
||||
bus_map = (n.buses.carrier == i)
|
||||
bus_map = n.buses.carrier == i
|
||||
bus_map.at[""] = False
|
||||
|
||||
for c in n.iterate_components(n.one_port_components):
|
||||
|
||||
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
|
||||
|
||||
if len(items) == 0:
|
||||
continue
|
||||
|
||||
s = c.pnl.p[items].max().multiply(c.df.loc[items, 'sign']).groupby(c.df.loc[items, 'carrier']).sum()
|
||||
s = (
|
||||
c.pnl.p[items]
|
||||
.max()
|
||||
.multiply(c.df.loc[items, "sign"])
|
||||
.groupby(c.df.loc[items, "carrier"])
|
||||
.sum()
|
||||
)
|
||||
s = pd.concat([s], keys=[c.list_name])
|
||||
s = pd.concat([s], keys=[i])
|
||||
|
||||
supply = supply.reindex(s.index.union(supply.index))
|
||||
supply.loc[s.index, label] = s
|
||||
|
||||
|
||||
for c in n.iterate_components(n.branch_components):
|
||||
|
||||
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||
|
||||
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
|
||||
|
||||
if len(items) == 0:
|
||||
continue
|
||||
|
||||
# lots of sign compensation for direction and to do maximums
|
||||
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items, 'carrier']).sum()
|
||||
s = (-1) ** (1 - int(end)) * (
|
||||
(-1) ** int(end) * c.pnl["p" + end][items]
|
||||
).max().groupby(c.df.loc[items, "carrier"]).sum()
|
||||
s.index = s.index + end
|
||||
s = pd.concat([s], keys=[c.list_name])
|
||||
s = pd.concat([s], keys=[i])
|
||||
@ -291,46 +358,56 @@ def calculate_supply(n, label, supply):
|
||||
|
||||
return supply
|
||||
|
||||
def calculate_supply_energy(n, label, supply_energy):
|
||||
"""calculate the total energy supply/consuption of each component at the buses aggregated by carrier"""
|
||||
|
||||
def calculate_supply_energy(n, label, supply_energy):
|
||||
"""
|
||||
Calculate the total energy supply/consuption of each component at the buses
|
||||
aggregated by carrier.
|
||||
"""
|
||||
|
||||
bus_carriers = n.buses.carrier.unique()
|
||||
|
||||
for i in bus_carriers:
|
||||
bus_map = (n.buses.carrier == i)
|
||||
bus_map = n.buses.carrier == i
|
||||
bus_map.at[""] = False
|
||||
|
||||
for c in n.iterate_components(n.one_port_components):
|
||||
|
||||
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
|
||||
|
||||
if len(items) == 0:
|
||||
continue
|
||||
|
||||
s = c.pnl.p[items].multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.loc[items, 'sign']).groupby(c.df.loc[items, 'carrier']).sum()
|
||||
s = (
|
||||
c.pnl.p[items]
|
||||
.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
.sum()
|
||||
.multiply(c.df.loc[items, "sign"])
|
||||
.groupby(c.df.loc[items, "carrier"])
|
||||
.sum()
|
||||
)
|
||||
s = pd.concat([s], keys=[c.list_name])
|
||||
s = pd.concat([s], keys=[i])
|
||||
|
||||
supply_energy = supply_energy.reindex(s.index.union(supply_energy.index))
|
||||
supply_energy.loc[s.index, label] = s
|
||||
|
||||
|
||||
for c in n.iterate_components(n.branch_components):
|
||||
|
||||
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||
|
||||
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
|
||||
|
||||
if len(items) == 0:
|
||||
continue
|
||||
|
||||
s = (-1)*c.pnl["p"+end][items].multiply(n.snapshot_weightings.generators,axis=0).sum().groupby(c.df.loc[items, 'carrier']).sum()
|
||||
s = (-1) * c.pnl["p" + end][items].multiply(
|
||||
n.snapshot_weightings.generators, axis=0
|
||||
).sum().groupby(c.df.loc[items, "carrier"]).sum()
|
||||
s.index = s.index + end
|
||||
s = pd.concat([s], keys=[c.list_name])
|
||||
s = pd.concat([s], keys=[i])
|
||||
|
||||
supply_energy = supply_energy.reindex(s.index.union(supply_energy.index))
|
||||
supply_energy = supply_energy.reindex(
|
||||
s.index.union(supply_energy.index)
|
||||
)
|
||||
|
||||
supply_energy.loc[s.index, label] = s
|
||||
|
||||
@ -338,21 +415,24 @@ def calculate_supply_energy(n, label, supply_energy):
|
||||
|
||||
|
||||
def calculate_metrics(n, label, metrics):
|
||||
|
||||
metrics_list = [
|
||||
"line_volume",
|
||||
"line_volume_limit",
|
||||
"line_volume_AC",
|
||||
"line_volume_DC",
|
||||
"line_volume_shadow",
|
||||
"co2_shadow"
|
||||
"co2_shadow",
|
||||
]
|
||||
|
||||
metrics = metrics.reindex(pd.Index(metrics_list).union(metrics.index))
|
||||
|
||||
metrics.at["line_volume_DC",label] = (n.links.length * n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
|
||||
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
|
||||
n.links.carrier == "DC"
|
||||
].sum()
|
||||
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
|
||||
metrics.at["line_volume",label] = metrics.loc[["line_volume_AC", "line_volume_DC"], label].sum()
|
||||
metrics.at["line_volume", label] = metrics.loc[
|
||||
["line_volume_AC", "line_volume_DC"], label
|
||||
].sum()
|
||||
|
||||
if hasattr(n, "line_volume_limit"):
|
||||
metrics.at["line_volume_limit", label] = n.line_volume_limit
|
||||
@ -365,7 +445,6 @@ def calculate_metrics(n, label, metrics):
|
||||
|
||||
|
||||
def calculate_prices(n, label, prices):
|
||||
|
||||
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
|
||||
|
||||
# WARNING: this is time-averaged, see weighted_prices for load-weighted average
|
||||
@ -377,26 +456,36 @@ def calculate_prices(n, label, prices):
|
||||
def calculate_weighted_prices(n, label, weighted_prices):
|
||||
# Warning: doesn't include storage units as loads
|
||||
|
||||
weighted_prices = weighted_prices.reindex(pd.Index([
|
||||
weighted_prices = weighted_prices.reindex(
|
||||
pd.Index(
|
||||
[
|
||||
"electricity",
|
||||
"heat",
|
||||
"space heat",
|
||||
"urban heat",
|
||||
"space urban heat",
|
||||
"gas",
|
||||
"H2"
|
||||
]))
|
||||
"H2",
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
link_loads = {"electricity": ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"],
|
||||
link_loads = {
|
||||
"electricity": [
|
||||
"heat pump",
|
||||
"resistive heater",
|
||||
"battery charger",
|
||||
"H2 Electrolysis",
|
||||
],
|
||||
"heat": ["water tanks charger"],
|
||||
"urban heat": ["water tanks charger"],
|
||||
"space heat": [],
|
||||
"space urban heat": [],
|
||||
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
|
||||
"H2": ["Sabatier", "H2 Fuel Cell"]}
|
||||
"H2": ["Sabatier", "H2 Fuel Cell"],
|
||||
}
|
||||
|
||||
for carrier in link_loads:
|
||||
|
||||
if carrier == "electricity":
|
||||
suffix = ""
|
||||
elif carrier[:5] == "space":
|
||||
@ -410,20 +499,23 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
||||
continue
|
||||
|
||||
if carrier in ["H2", "gas"]:
|
||||
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.)
|
||||
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
||||
elif carrier[:5] == "space":
|
||||
load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix)
|
||||
load = heat_demand_df[buses.str[:2]].rename(
|
||||
columns=lambda i: str(i) + suffix
|
||||
)
|
||||
else:
|
||||
load = n.loads_t.p_set[buses]
|
||||
|
||||
for tech in link_loads[carrier]:
|
||||
|
||||
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
||||
|
||||
if names.empty:
|
||||
continue
|
||||
|
||||
load += n.links_t.p0[names].groupby(n.links.loc[names, "bus0"],axis=1).sum()
|
||||
load += (
|
||||
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
|
||||
)
|
||||
|
||||
# Add H2 Store when charging
|
||||
# if carrier == "H2":
|
||||
@ -431,7 +523,9 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
||||
# stores[stores > 0.] = 0.
|
||||
# load += -stores
|
||||
|
||||
weighted_prices.loc[carrier,label] = (load * n.buses_t.marginal_price[buses]).sum().sum() / load.sum().sum()
|
||||
weighted_prices.loc[carrier, label] = (
|
||||
load * n.buses_t.marginal_price[buses]
|
||||
).sum().sum() / load.sum().sum()
|
||||
|
||||
# still have no idea what this is for, only for debug reasons.
|
||||
if carrier[:5] == "space":
|
||||
@ -455,17 +549,20 @@ def calculate_market_values(n, label, market_values):
|
||||
|
||||
market_values = market_values.reindex(market_values.index.union(techs))
|
||||
|
||||
|
||||
for tech in techs:
|
||||
gens = generators[n.generators.loc[generators, "carrier"] == tech]
|
||||
|
||||
dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens, "bus"], axis=1).sum().reindex(columns=buses, fill_value=0.)
|
||||
dispatch = (
|
||||
n.generators_t.p[gens]
|
||||
.groupby(n.generators.loc[gens, "bus"], axis=1)
|
||||
.sum()
|
||||
.reindex(columns=buses, fill_value=0.0)
|
||||
)
|
||||
|
||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||
|
||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
||||
|
||||
|
||||
## Now do market value of links ##
|
||||
|
||||
for i in ["0", "1"]:
|
||||
@ -478,7 +575,12 @@ def calculate_market_values(n, label, market_values):
|
||||
for tech in techs:
|
||||
links = all_links[n.links.loc[all_links, "carrier"] == tech]
|
||||
|
||||
dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links, "bus"+i], axis=1).sum().reindex(columns=buses, fill_value=0.)
|
||||
dispatch = (
|
||||
n.links_t["p" + i][links]
|
||||
.groupby(n.links.loc[links, "bus" + i], axis=1)
|
||||
.sum()
|
||||
.reindex(columns=buses, fill_value=0.0)
|
||||
)
|
||||
|
||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||
|
||||
@ -488,29 +590,36 @@ def calculate_market_values(n, label, market_values):
|
||||
|
||||
|
||||
def calculate_price_statistics(n, label, price_statistics):
|
||||
|
||||
|
||||
price_statistics = price_statistics.reindex(price_statistics.index.union(pd.Index(["zero_hours", "mean", "standard_deviation"])))
|
||||
price_statistics = price_statistics.reindex(
|
||||
price_statistics.index.union(
|
||||
pd.Index(["zero_hours", "mean", "standard_deviation"])
|
||||
)
|
||||
)
|
||||
|
||||
buses = n.buses.index[n.buses.carrier == "AC"]
|
||||
|
||||
threshold = 0.1 # higher than phoney marginal_cost of wind/solar
|
||||
|
||||
df = pd.DataFrame(data=0., columns=buses, index=n.snapshots)
|
||||
df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots)
|
||||
|
||||
df[n.buses_t.marginal_price[buses] < threshold] = 1.
|
||||
df[n.buses_t.marginal_price[buses] < threshold] = 1.0
|
||||
|
||||
price_statistics.at["zero_hours", label] = df.sum().sum() / (df.shape[0] * df.shape[1])
|
||||
price_statistics.at["zero_hours", label] = df.sum().sum() / (
|
||||
df.shape[0] * df.shape[1]
|
||||
)
|
||||
|
||||
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean()
|
||||
price_statistics.at["mean", label] = (
|
||||
n.buses_t.marginal_price[buses].unstack().mean()
|
||||
)
|
||||
|
||||
price_statistics.at["standard_deviation", label] = n.buses_t.marginal_price[buses].unstack().std()
|
||||
price_statistics.at["standard_deviation", label] = (
|
||||
n.buses_t.marginal_price[buses].unstack().std()
|
||||
)
|
||||
|
||||
return price_statistics
|
||||
|
||||
|
||||
def make_summaries(networks_dict):
|
||||
|
||||
outputs = [
|
||||
"nodal_costs",
|
||||
"nodal_capacities",
|
||||
@ -530,8 +639,7 @@ def make_summaries(networks_dict):
|
||||
]
|
||||
|
||||
columns = pd.MultiIndex.from_tuples(
|
||||
networks_dict.keys(),
|
||||
names=["cluster", "lv", "opt", "planning_horizon"]
|
||||
networks_dict.keys(), names=["cluster", "lv", "opt", "planning_horizon"]
|
||||
)
|
||||
|
||||
df = {}
|
||||
@ -560,31 +668,35 @@ def to_csv(df):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('make_summary')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
snakemake = mock_snakemake("make_summary")
|
||||
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
networks_dict = {
|
||||
(cluster, lv, opt+sector_opt, planning_horizon) :
|
||||
snakemake.config['results_dir'] + snakemake.config['run'] + f'/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc' \
|
||||
for simpl in snakemake.config['scenario']['simpl'] \
|
||||
for cluster in snakemake.config['scenario']['clusters'] \
|
||||
for opt in snakemake.config['scenario']['opts'] \
|
||||
for sector_opt in snakemake.config['scenario']['sector_opts'] \
|
||||
for lv in snakemake.config['scenario']['lv'] \
|
||||
for planning_horizon in snakemake.config['scenario']['planning_horizons']
|
||||
(cluster, lv, opt + sector_opt, planning_horizon): snakemake.config[
|
||||
"results_dir"
|
||||
]
|
||||
+ snakemake.config["run"]
|
||||
+ f"/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc"
|
||||
for simpl in snakemake.config["scenario"]["simpl"]
|
||||
for cluster in snakemake.config["scenario"]["clusters"]
|
||||
for opt in snakemake.config["scenario"]["opts"]
|
||||
for sector_opt in snakemake.config["scenario"]["sector_opts"]
|
||||
for lv in snakemake.config["scenario"]["lv"]
|
||||
for planning_horizon in snakemake.config["scenario"]["planning_horizons"]
|
||||
}
|
||||
|
||||
Nyears = 1
|
||||
|
||||
costs_db = prepare_costs(
|
||||
snakemake.input.costs,
|
||||
snakemake.config['costs']['USD2013_to_EUR2013'],
|
||||
snakemake.config['costs']['discountrate'],
|
||||
snakemake.config["costs"]["USD2013_to_EUR2013"],
|
||||
snakemake.config["costs"]["discountrate"],
|
||||
Nyears,
|
||||
snakemake.config['costs']['lifetime']
|
||||
snakemake.config["costs"]["lifetime"],
|
||||
)
|
||||
|
||||
df = make_summaries(networks_dict)
|
||||
@ -593,8 +705,11 @@ if __name__ == "__main__":
|
||||
|
||||
to_csv(df)
|
||||
|
||||
if snakemake.config["foresight"]=='myopic':
|
||||
if snakemake.config["foresight"] == "myopic":
|
||||
cumulative_cost = calculate_cumulative_cost()
|
||||
cumulative_cost.to_csv(snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/csvs/cumulative_cost.csv')
|
||||
|
||||
|
||||
cumulative_cost.to_csv(
|
||||
snakemake.config["summary_dir"]
|
||||
+ "/"
|
||||
+ snakemake.config["run"]
|
||||
+ "/csvs/cumulative_cost.csv"
|
||||
)
|
||||
|
@ -1,20 +1,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pypsa
|
||||
|
||||
import pandas as pd
|
||||
import cartopy.crs as ccrs
|
||||
import geopandas as gpd
|
||||
import matplotlib.pyplot as plt
|
||||
import cartopy.crs as ccrs
|
||||
|
||||
from pypsa.plot import add_legend_circles, add_legend_patches, add_legend_lines
|
||||
|
||||
from make_summary import assign_carriers
|
||||
from plot_summary import rename_techs, preferred_order
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
from helper import override_component_attrs
|
||||
from make_summary import assign_carriers
|
||||
from plot_summary import preferred_order, rename_techs
|
||||
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
|
||||
|
||||
plt.style.use(['ggplot', "matplotlibrc"])
|
||||
plt.style.use(["ggplot", "matplotlibrc"])
|
||||
|
||||
|
||||
def rename_techs_tyndp(tech):
|
||||
@ -46,15 +45,20 @@ def assign_location(n):
|
||||
ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
|
||||
for i in ifind.value_counts().index:
|
||||
# these have already been assigned defaults
|
||||
if i == -1: continue
|
||||
if i == -1:
|
||||
continue
|
||||
names = ifind.index[ifind == i]
|
||||
c.df.loc[names, 'location'] = names.str[:i]
|
||||
c.df.loc[names, "location"] = names.str[:i]
|
||||
|
||||
|
||||
def plot_map(network, components=["links", "stores", "storage_units", "generators"],
|
||||
bus_size_factor=1.7e10, transmission=False, with_legend=True):
|
||||
|
||||
tech_colors = snakemake.config['plotting']['tech_colors']
|
||||
def plot_map(
|
||||
network,
|
||||
components=["links", "stores", "storage_units", "generators"],
|
||||
bus_size_factor=1.7e10,
|
||||
transmission=False,
|
||||
with_legend=True,
|
||||
):
|
||||
tech_colors = snakemake.config["plotting"]["tech_colors"]
|
||||
|
||||
n = network.copy()
|
||||
assign_location(n)
|
||||
@ -73,19 +77,24 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
|
||||
attr = "e_nom_opt" if comp == "stores" else "p_nom_opt"
|
||||
|
||||
costs_c = ((df_c.capital_cost * df_c[attr])
|
||||
.groupby([df_c.location, df_c.nice_group]).sum()
|
||||
.unstack().fillna(0.))
|
||||
costs_c = (
|
||||
(df_c.capital_cost * df_c[attr])
|
||||
.groupby([df_c.location, df_c.nice_group])
|
||||
.sum()
|
||||
.unstack()
|
||||
.fillna(0.0)
|
||||
)
|
||||
costs = pd.concat([costs, costs_c], axis=1)
|
||||
|
||||
logger.debug(f"{comp}, {costs}")
|
||||
|
||||
costs = costs.groupby(costs.columns, axis=1).sum()
|
||||
|
||||
costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True)
|
||||
costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True)
|
||||
|
||||
new_columns = (preferred_order.intersection(costs.columns)
|
||||
.append(costs.columns.difference(preferred_order)))
|
||||
new_columns = preferred_order.intersection(costs.columns).append(
|
||||
costs.columns.difference(preferred_order)
|
||||
)
|
||||
costs = costs[new_columns]
|
||||
|
||||
for item in new_columns:
|
||||
@ -95,12 +104,16 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
costs = costs.stack() # .sort_index()
|
||||
|
||||
# hack because impossible to drop buses...
|
||||
eu_location = snakemake.config["plotting"].get("eu_node_location", dict(x=-5.5, y=46))
|
||||
eu_location = snakemake.config["plotting"].get(
|
||||
"eu_node_location", dict(x=-5.5, y=46)
|
||||
)
|
||||
n.buses.loc["EU gas", "x"] = eu_location["x"]
|
||||
n.buses.loc["EU gas", "y"] = eu_location["y"]
|
||||
|
||||
n.links.drop(n.links.index[(n.links.carrier != "DC") & (
|
||||
n.links.carrier != "B2B")], inplace=True)
|
||||
n.links.drop(
|
||||
n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
|
||||
inplace=True,
|
||||
)
|
||||
|
||||
# drop non-bus
|
||||
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
|
||||
@ -117,7 +130,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
carriers = list(carriers.index)
|
||||
|
||||
# PDF has minimum width, so set these to zero
|
||||
line_lower_threshold = 500.
|
||||
line_lower_threshold = 500.0
|
||||
line_upper_threshold = 1e4
|
||||
linewidth_factor = 4e3
|
||||
ac_color = "rosybrown"
|
||||
@ -133,7 +146,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
line_widths = n.lines.s_nom_opt
|
||||
link_widths = n.links.p_nom_opt
|
||||
linewidth_factor = 2e3
|
||||
line_lower_threshold = 0.
|
||||
line_lower_threshold = 0.0
|
||||
title = "current grid"
|
||||
else:
|
||||
line_widths = n.lines.s_nom_opt - n.lines.s_nom_min
|
||||
@ -161,7 +174,8 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
link_colors=dc_color,
|
||||
line_widths=line_widths / linewidth_factor,
|
||||
link_widths=link_widths / linewidth_factor,
|
||||
ax=ax, **map_opts
|
||||
ax=ax,
|
||||
**map_opts,
|
||||
)
|
||||
|
||||
sizes = [20, 10, 5]
|
||||
@ -174,7 +188,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
labelspacing=0.8,
|
||||
frameon=False,
|
||||
handletextpad=0,
|
||||
title='system cost',
|
||||
title="system cost",
|
||||
)
|
||||
|
||||
add_legend_circles(
|
||||
@ -183,7 +197,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
labels,
|
||||
srid=n.srid,
|
||||
patch_kw=dict(facecolor="lightgrey"),
|
||||
legend_kw=legend_kw
|
||||
legend_kw=legend_kw,
|
||||
)
|
||||
|
||||
sizes = [10, 5]
|
||||
@ -197,15 +211,11 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
frameon=False,
|
||||
labelspacing=0.8,
|
||||
handletextpad=1,
|
||||
title=title
|
||||
title=title,
|
||||
)
|
||||
|
||||
add_legend_lines(
|
||||
ax,
|
||||
sizes,
|
||||
labels,
|
||||
patch_kw=dict(color='lightgrey'),
|
||||
legend_kw=legend_kw
|
||||
ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw
|
||||
)
|
||||
|
||||
legend_kw = dict(
|
||||
@ -214,7 +224,6 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
)
|
||||
|
||||
if with_legend:
|
||||
|
||||
colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color]
|
||||
labels = carriers + ["HVAC line", "HVDC link"]
|
||||
|
||||
@ -225,14 +234,12 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
legend_kw=legend_kw,
|
||||
)
|
||||
|
||||
fig.savefig(
|
||||
snakemake.output.map,
|
||||
transparent=True,
|
||||
bbox_inches="tight"
|
||||
)
|
||||
fig.savefig(snakemake.output.map, transparent=True, bbox_inches="tight")
|
||||
|
||||
|
||||
def group_pipes(df, drop_direction=False):
|
||||
"""Group pipes which connect same buses and return overall capacity.
|
||||
"""
|
||||
Group pipes which connect same buses and return overall capacity.
|
||||
"""
|
||||
if drop_direction:
|
||||
positive_order = df.bus0 < df.bus1
|
||||
@ -244,16 +251,17 @@ def group_pipes(df, drop_direction=False):
|
||||
# there are pipes for each investment period rename to AC buses name for plotting
|
||||
df.index = df.apply(
|
||||
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
|
||||
axis=1
|
||||
axis=1,
|
||||
)
|
||||
# group pipe lines connecting the same buses and rename them for plotting
|
||||
pipe_capacity = df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"})
|
||||
pipe_capacity = df.groupby(level=0).agg(
|
||||
{"p_nom_opt": sum, "bus0": "first", "bus1": "first"}
|
||||
)
|
||||
|
||||
return pipe_capacity
|
||||
|
||||
|
||||
def plot_h2_map(network, regions):
|
||||
|
||||
n = network.copy()
|
||||
if "H2 pipeline" not in n.links.carrier.unique():
|
||||
return
|
||||
@ -261,7 +269,11 @@ def plot_h2_map(network, regions):
|
||||
assign_location(n)
|
||||
|
||||
h2_storage = n.stores.query("carrier == 'H2'")
|
||||
regions["H2"] = h2_storage.rename(index=h2_storage.bus.map(n.buses.location)).e_nom_opt.div(1e6) # TWh
|
||||
regions["H2"] = h2_storage.rename(
|
||||
index=h2_storage.bus.map(n.buses.location)
|
||||
).e_nom_opt.div(
|
||||
1e6
|
||||
) # TWh
|
||||
regions["H2"] = regions["H2"].where(regions["H2"] > 0.1)
|
||||
|
||||
bus_size_factor = 1e5
|
||||
@ -276,26 +288,33 @@ def plot_h2_map(network, regions):
|
||||
|
||||
elec = n.links[n.links.carrier.isin(carriers)].index
|
||||
|
||||
bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() / bus_size_factor
|
||||
bus_sizes = (
|
||||
n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum()
|
||||
/ bus_size_factor
|
||||
)
|
||||
|
||||
# make a fake MultiIndex so that area is correct for legend
|
||||
bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True)
|
||||
# drop all links which are not H2 pipelines
|
||||
n.links.drop(n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True)
|
||||
n.links.drop(
|
||||
n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True
|
||||
)
|
||||
|
||||
h2_new = n.links[n.links.carrier == "H2 pipeline"]
|
||||
h2_retro = n.links[n.links.carrier=='H2 pipeline retrofitted']
|
||||
h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"]
|
||||
|
||||
if snakemake.config['foresight'] == 'myopic':
|
||||
if snakemake.config["foresight"] == "myopic":
|
||||
# sum capacitiy for pipelines from different investment periods
|
||||
h2_new = group_pipes(h2_new)
|
||||
|
||||
if not h2_retro.empty:
|
||||
h2_retro = group_pipes(h2_retro, drop_direction=True).reindex(h2_new.index).fillna(0)
|
||||
|
||||
h2_retro = (
|
||||
group_pipes(h2_retro, drop_direction=True)
|
||||
.reindex(h2_new.index)
|
||||
.fillna(0)
|
||||
)
|
||||
|
||||
if not h2_retro.empty:
|
||||
|
||||
positive_order = h2_retro.bus0 < h2_retro.bus1
|
||||
h2_retro_p = h2_retro[positive_order]
|
||||
swap_buses = {"bus0": "bus1", "bus1": "bus0"}
|
||||
@ -305,7 +324,7 @@ def plot_h2_map(network, regions):
|
||||
h2_retro["index_orig"] = h2_retro.index
|
||||
h2_retro.index = h2_retro.apply(
|
||||
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
|
||||
axis=1
|
||||
axis=1,
|
||||
)
|
||||
|
||||
retro_w_new_i = h2_retro.index.intersection(h2_new.index)
|
||||
@ -319,19 +338,20 @@ def plot_h2_map(network, regions):
|
||||
h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum()
|
||||
|
||||
else:
|
||||
|
||||
h2_total = h2_new.p_nom_opt
|
||||
|
||||
link_widths_total = h2_total / linewidth_factor
|
||||
|
||||
n.links.rename(index=lambda x: x.split("-2")[0], inplace=True)
|
||||
n.links = n.links.groupby(level=0).first()
|
||||
link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.)
|
||||
link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.
|
||||
link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0)
|
||||
link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0
|
||||
|
||||
retro = n.links.p_nom_opt.where(n.links.carrier=='H2 pipeline retrofitted', other=0.)
|
||||
retro = n.links.p_nom_opt.where(
|
||||
n.links.carrier == "H2 pipeline retrofitted", other=0.0
|
||||
)
|
||||
link_widths_retro = retro / linewidth_factor
|
||||
link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.
|
||||
link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0
|
||||
|
||||
n.links.bus0 = n.links.bus0.str.replace(" H2", "")
|
||||
n.links.bus1 = n.links.bus1.str.replace(" H2", "")
|
||||
@ -339,18 +359,12 @@ def plot_h2_map(network, regions):
|
||||
proj = ccrs.EqualEarth()
|
||||
regions = regions.to_crs(proj.proj4_init)
|
||||
|
||||
fig, ax = plt.subplots(
|
||||
figsize=(7, 6),
|
||||
subplot_kw={"projection": proj}
|
||||
)
|
||||
fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj})
|
||||
|
||||
color_h2_pipe = '#b3f3f4'
|
||||
color_retrofit = '#499a9c'
|
||||
color_h2_pipe = "#b3f3f4"
|
||||
color_retrofit = "#499a9c"
|
||||
|
||||
bus_colors = {
|
||||
"H2 Electrolysis": "#ff29d9",
|
||||
"H2 Fuel Cell": '#805394'
|
||||
}
|
||||
bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"}
|
||||
|
||||
n.plot(
|
||||
geomap=True,
|
||||
@ -360,7 +374,7 @@ def plot_h2_map(network, regions):
|
||||
link_widths=link_widths_total,
|
||||
branch_components=["Link"],
|
||||
ax=ax,
|
||||
**map_opts
|
||||
**map_opts,
|
||||
)
|
||||
|
||||
n.plot(
|
||||
@ -371,13 +385,13 @@ def plot_h2_map(network, regions):
|
||||
branch_components=["Link"],
|
||||
ax=ax,
|
||||
color_geomap=False,
|
||||
boundaries=map_opts["boundaries"]
|
||||
boundaries=map_opts["boundaries"],
|
||||
)
|
||||
|
||||
regions.plot(
|
||||
ax=ax,
|
||||
column="H2",
|
||||
cmap='Blues',
|
||||
cmap="Blues",
|
||||
linewidths=0,
|
||||
legend=True,
|
||||
vmax=6,
|
||||
@ -401,10 +415,13 @@ def plot_h2_map(network, regions):
|
||||
frameon=False,
|
||||
)
|
||||
|
||||
add_legend_circles(ax, sizes, labels,
|
||||
add_legend_circles(
|
||||
ax,
|
||||
sizes,
|
||||
labels,
|
||||
srid=n.srid,
|
||||
patch_kw=dict(facecolor='lightgrey'),
|
||||
legend_kw=legend_kw
|
||||
patch_kw=dict(facecolor="lightgrey"),
|
||||
legend_kw=legend_kw,
|
||||
)
|
||||
|
||||
sizes = [30, 10]
|
||||
@ -424,7 +441,7 @@ def plot_h2_map(network, regions):
|
||||
ax,
|
||||
sizes,
|
||||
labels,
|
||||
patch_kw=dict(color='lightgrey'),
|
||||
patch_kw=dict(color="lightgrey"),
|
||||
legend_kw=legend_kw,
|
||||
)
|
||||
|
||||
@ -438,23 +455,16 @@ def plot_h2_map(network, regions):
|
||||
frameon=False,
|
||||
)
|
||||
|
||||
add_legend_patches(
|
||||
ax,
|
||||
colors,
|
||||
labels,
|
||||
legend_kw=legend_kw
|
||||
)
|
||||
add_legend_patches(ax, colors, labels, legend_kw=legend_kw)
|
||||
|
||||
ax.set_facecolor("white")
|
||||
|
||||
fig.savefig(
|
||||
snakemake.output.map.replace("-costs-all","-h2_network"),
|
||||
bbox_inches="tight"
|
||||
snakemake.output.map.replace("-costs-all", "-h2_network"), bbox_inches="tight"
|
||||
)
|
||||
|
||||
|
||||
def plot_ch4_map(network):
|
||||
|
||||
n = network.copy()
|
||||
|
||||
if "gas pipeline" not in n.links.carrier.unique():
|
||||
@ -471,21 +481,53 @@ def plot_ch4_map(network):
|
||||
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
|
||||
|
||||
fossil_gas_i = n.generators[n.generators.carrier == "gas"].index
|
||||
fossil_gas = n.generators_t.p.loc[:,fossil_gas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.generators.loc[fossil_gas_i,"bus"]).sum() / bus_size_factor
|
||||
fossil_gas = (
|
||||
n.generators_t.p.loc[:, fossil_gas_i]
|
||||
.mul(n.snapshot_weightings.generators, axis=0)
|
||||
.sum()
|
||||
.groupby(n.generators.loc[fossil_gas_i, "bus"])
|
||||
.sum()
|
||||
/ bus_size_factor
|
||||
)
|
||||
fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True)
|
||||
fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0)
|
||||
# make a fake MultiIndex so that area is correct for legend
|
||||
fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]])
|
||||
|
||||
methanation_i = n.links[n.links.carrier.isin(["helmeth", "Sabatier"])].index
|
||||
methanation = abs(n.links_t.p1.loc[:,methanation_i].mul(n.snapshot_weightings.generators, axis=0)).sum().groupby(n.links.loc[methanation_i,"bus1"]).sum() / bus_size_factor
|
||||
methanation = methanation.groupby(methanation.index).sum().rename(index=lambda x: x.replace(" gas", ""))
|
||||
methanation = (
|
||||
abs(
|
||||
n.links_t.p1.loc[:, methanation_i].mul(
|
||||
n.snapshot_weightings.generators, axis=0
|
||||
)
|
||||
)
|
||||
.sum()
|
||||
.groupby(n.links.loc[methanation_i, "bus1"])
|
||||
.sum()
|
||||
/ bus_size_factor
|
||||
)
|
||||
methanation = (
|
||||
methanation.groupby(methanation.index)
|
||||
.sum()
|
||||
.rename(index=lambda x: x.replace(" gas", ""))
|
||||
)
|
||||
# make a fake MultiIndex so that area is correct for legend
|
||||
methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]])
|
||||
|
||||
biogas_i = n.stores[n.stores.carrier == "biogas"].index
|
||||
biogas = n.stores_t.p.loc[:,biogas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.stores.loc[biogas_i,"bus"]).sum() / bus_size_factor
|
||||
biogas = biogas.groupby(biogas.index).sum().rename(index=lambda x: x.replace(" biogas", ""))
|
||||
biogas = (
|
||||
n.stores_t.p.loc[:, biogas_i]
|
||||
.mul(n.snapshot_weightings.generators, axis=0)
|
||||
.sum()
|
||||
.groupby(n.stores.loc[biogas_i, "bus"])
|
||||
.sum()
|
||||
/ bus_size_factor
|
||||
)
|
||||
biogas = (
|
||||
biogas.groupby(biogas.index)
|
||||
.sum()
|
||||
.rename(index=lambda x: x.replace(" biogas", ""))
|
||||
)
|
||||
# make a fake MultiIndex so that area is correct for legend
|
||||
biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]])
|
||||
|
||||
@ -496,22 +538,22 @@ def plot_ch4_map(network):
|
||||
n.links.drop(to_remove, inplace=True)
|
||||
|
||||
link_widths_rem = n.links.p_nom_opt / linewidth_factor
|
||||
link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.
|
||||
link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0
|
||||
|
||||
link_widths_orig = n.links.p_nom / linewidth_factor
|
||||
link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.
|
||||
link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0
|
||||
|
||||
max_usage = n.links_t.p0.abs().max(axis=0)
|
||||
link_widths_used = max_usage / linewidth_factor
|
||||
link_widths_used[max_usage < line_lower_threshold] = 0.
|
||||
link_widths_used[max_usage < line_lower_threshold] = 0.0
|
||||
|
||||
tech_colors = snakemake.config['plotting']['tech_colors']
|
||||
tech_colors = snakemake.config["plotting"]["tech_colors"]
|
||||
|
||||
pipe_colors = {
|
||||
"gas pipeline": "#f08080",
|
||||
"gas pipeline new": "#c46868",
|
||||
"gas pipeline (in 2020)": 'lightgrey',
|
||||
"gas pipeline (available)": '#e8d1d1',
|
||||
"gas pipeline (in 2020)": "lightgrey",
|
||||
"gas pipeline (available)": "#e8d1d1",
|
||||
}
|
||||
|
||||
link_color_used = n.links.carrier.map(pipe_colors)
|
||||
@ -522,7 +564,7 @@ def plot_ch4_map(network):
|
||||
bus_colors = {
|
||||
"fossil gas": tech_colors["fossil gas"],
|
||||
"methanation": tech_colors["methanation"],
|
||||
"biogas": "seagreen"
|
||||
"biogas": "seagreen",
|
||||
}
|
||||
|
||||
fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()})
|
||||
@ -530,31 +572,31 @@ def plot_ch4_map(network):
|
||||
n.plot(
|
||||
bus_sizes=bus_sizes,
|
||||
bus_colors=bus_colors,
|
||||
link_colors=pipe_colors['gas pipeline (in 2020)'],
|
||||
link_colors=pipe_colors["gas pipeline (in 2020)"],
|
||||
link_widths=link_widths_orig,
|
||||
branch_components=["Link"],
|
||||
ax=ax,
|
||||
**map_opts
|
||||
**map_opts,
|
||||
)
|
||||
|
||||
n.plot(
|
||||
ax=ax,
|
||||
bus_sizes=0.,
|
||||
link_colors=pipe_colors['gas pipeline (available)'],
|
||||
bus_sizes=0.0,
|
||||
link_colors=pipe_colors["gas pipeline (available)"],
|
||||
link_widths=link_widths_rem,
|
||||
branch_components=["Link"],
|
||||
color_geomap=False,
|
||||
boundaries=map_opts["boundaries"]
|
||||
boundaries=map_opts["boundaries"],
|
||||
)
|
||||
|
||||
n.plot(
|
||||
ax=ax,
|
||||
bus_sizes=0.,
|
||||
bus_sizes=0.0,
|
||||
link_colors=link_color_used,
|
||||
link_widths=link_widths_used,
|
||||
branch_components=["Link"],
|
||||
color_geomap=False,
|
||||
boundaries=map_opts["boundaries"]
|
||||
boundaries=map_opts["boundaries"],
|
||||
)
|
||||
|
||||
sizes = [100, 10]
|
||||
@ -567,7 +609,7 @@ def plot_ch4_map(network):
|
||||
labelspacing=0.8,
|
||||
frameon=False,
|
||||
handletextpad=1,
|
||||
title='gas sources',
|
||||
title="gas sources",
|
||||
)
|
||||
|
||||
add_legend_circles(
|
||||
@ -575,7 +617,7 @@ def plot_ch4_map(network):
|
||||
sizes,
|
||||
labels,
|
||||
srid=n.srid,
|
||||
patch_kw=dict(facecolor='lightgrey'),
|
||||
patch_kw=dict(facecolor="lightgrey"),
|
||||
legend_kw=legend_kw,
|
||||
)
|
||||
|
||||
@ -590,14 +632,14 @@ def plot_ch4_map(network):
|
||||
frameon=False,
|
||||
labelspacing=0.8,
|
||||
handletextpad=1,
|
||||
title='gas pipeline'
|
||||
title="gas pipeline",
|
||||
)
|
||||
|
||||
add_legend_lines(
|
||||
ax,
|
||||
sizes,
|
||||
labels,
|
||||
patch_kw=dict(color='lightgrey'),
|
||||
patch_kw=dict(color="lightgrey"),
|
||||
legend_kw=legend_kw,
|
||||
)
|
||||
|
||||
@ -611,7 +653,7 @@ def plot_ch4_map(network):
|
||||
# )
|
||||
|
||||
legend_kw = dict(
|
||||
loc='upper left',
|
||||
loc="upper left",
|
||||
bbox_to_anchor=(0, 1.24),
|
||||
ncol=2,
|
||||
frameon=False,
|
||||
@ -625,26 +667,21 @@ def plot_ch4_map(network):
|
||||
)
|
||||
|
||||
fig.savefig(
|
||||
snakemake.output.map.replace("-costs-all","-ch4_network"),
|
||||
bbox_inches="tight"
|
||||
snakemake.output.map.replace("-costs-all", "-ch4_network"), bbox_inches="tight"
|
||||
)
|
||||
|
||||
|
||||
def plot_map_without(network):
|
||||
|
||||
n = network.copy()
|
||||
assign_location(n)
|
||||
|
||||
# Drop non-electric buses so they don't clutter the plot
|
||||
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
|
||||
|
||||
fig, ax = plt.subplots(
|
||||
figsize=(7, 6),
|
||||
subplot_kw={"projection": ccrs.EqualEarth()}
|
||||
)
|
||||
fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()})
|
||||
|
||||
# PDF has minimum width, so set these to zero
|
||||
line_lower_threshold = 200.
|
||||
line_lower_threshold = 200.0
|
||||
line_upper_threshold = 1e4
|
||||
linewidth_factor = 3e3
|
||||
ac_color = "rosybrown"
|
||||
@ -652,7 +689,9 @@ def plot_map_without(network):
|
||||
|
||||
# hack because impossible to drop buses...
|
||||
if "EU gas" in n.buses.index:
|
||||
eu_location = snakemake.config["plotting"].get("eu_node_location", dict(x=-5.5, y=46))
|
||||
eu_location = snakemake.config["plotting"].get(
|
||||
"eu_node_location", dict(x=-5.5, y=46)
|
||||
)
|
||||
n.buses.loc["EU gas", "x"] = eu_location["x"]
|
||||
n.buses.loc["EU gas", "y"] = eu_location["y"]
|
||||
|
||||
@ -678,32 +717,34 @@ def plot_map_without(network):
|
||||
link_colors=dc_color,
|
||||
line_widths=line_widths / linewidth_factor,
|
||||
link_widths=link_widths / linewidth_factor,
|
||||
ax=ax, **map_opts
|
||||
ax=ax,
|
||||
**map_opts,
|
||||
)
|
||||
|
||||
handles = []
|
||||
labels = []
|
||||
|
||||
for s in (10, 5):
|
||||
handles.append(plt.Line2D([0], [0], color=ac_color,
|
||||
linewidth=s * 1e3 / linewidth_factor))
|
||||
handles.append(
|
||||
plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor)
|
||||
)
|
||||
labels.append(f"{s} GW")
|
||||
l1_1 = ax.legend(handles, labels,
|
||||
loc="upper left", bbox_to_anchor=(0.05, 1.01),
|
||||
l1_1 = ax.legend(
|
||||
handles,
|
||||
labels,
|
||||
loc="upper left",
|
||||
bbox_to_anchor=(0.05, 1.01),
|
||||
frameon=False,
|
||||
labelspacing=0.8, handletextpad=1.5,
|
||||
title='Today\'s transmission')
|
||||
labelspacing=0.8,
|
||||
handletextpad=1.5,
|
||||
title="Today's transmission",
|
||||
)
|
||||
ax.add_artist(l1_1)
|
||||
|
||||
fig.savefig(
|
||||
snakemake.output.today,
|
||||
transparent=True,
|
||||
bbox_inches="tight"
|
||||
)
|
||||
fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight")
|
||||
|
||||
|
||||
def plot_series(network, carrier="AC", name="test"):
|
||||
|
||||
n = network.copy()
|
||||
assign_location(n)
|
||||
assign_carriers(n)
|
||||
@ -712,28 +753,41 @@ def plot_series(network, carrier="AC", name="test"):
|
||||
|
||||
supply = pd.DataFrame(index=n.snapshots)
|
||||
for c in n.iterate_components(n.branch_components):
|
||||
n_port = 4 if c.name=='Link' else 2
|
||||
n_port = 4 if c.name == "Link" else 2
|
||||
for i in range(n_port):
|
||||
supply = pd.concat((supply,
|
||||
(-1) * c.pnl["p" + str(i)].loc[:,
|
||||
c.df.index[c.df["bus" + str(i)].isin(buses)]].groupby(c.df.carrier,
|
||||
axis=1).sum()),
|
||||
axis=1)
|
||||
supply = pd.concat(
|
||||
(
|
||||
supply,
|
||||
(-1)
|
||||
* c.pnl["p" + str(i)]
|
||||
.loc[:, c.df.index[c.df["bus" + str(i)].isin(buses)]]
|
||||
.groupby(c.df.carrier, axis=1)
|
||||
.sum(),
|
||||
),
|
||||
axis=1,
|
||||
)
|
||||
|
||||
for c in n.iterate_components(n.one_port_components):
|
||||
comps = c.df.index[c.df.bus.isin(buses)]
|
||||
supply = pd.concat((supply, ((c.pnl["p"].loc[:, comps]).multiply(
|
||||
c.df.loc[comps, "sign"])).groupby(c.df.carrier, axis=1).sum()), axis=1)
|
||||
supply = pd.concat(
|
||||
(
|
||||
supply,
|
||||
((c.pnl["p"].loc[:, comps]).multiply(c.df.loc[comps, "sign"]))
|
||||
.groupby(c.df.carrier, axis=1)
|
||||
.sum(),
|
||||
),
|
||||
axis=1,
|
||||
)
|
||||
|
||||
supply = supply.groupby(rename_techs_tyndp, axis=1).sum()
|
||||
|
||||
both = supply.columns[(supply < 0.).any() & (supply > 0.).any()]
|
||||
both = supply.columns[(supply < 0.0).any() & (supply > 0.0).any()]
|
||||
|
||||
positive_supply = supply[both]
|
||||
negative_supply = supply[both]
|
||||
|
||||
positive_supply[positive_supply < 0.] = 0.
|
||||
negative_supply[negative_supply > 0.] = 0.
|
||||
positive_supply[positive_supply < 0.0] = 0.0
|
||||
negative_supply[negative_supply > 0.0] = 0.0
|
||||
|
||||
supply[both] = positive_supply
|
||||
|
||||
@ -761,14 +815,16 @@ def plot_series(network, carrier="AC", name="test"):
|
||||
|
||||
supply = supply / 1e3
|
||||
|
||||
supply.rename(columns={"electricity": "electric demand",
|
||||
"heat": "heat demand"},
|
||||
inplace=True)
|
||||
supply.rename(
|
||||
columns={"electricity": "electric demand", "heat": "heat demand"}, inplace=True
|
||||
)
|
||||
supply.columns = supply.columns.str.replace("residential ", "")
|
||||
supply.columns = supply.columns.str.replace("services ", "")
|
||||
supply.columns = supply.columns.str.replace("urban decentral ", "decentral ")
|
||||
|
||||
preferred_order = pd.Index(["electric demand",
|
||||
preferred_order = pd.Index(
|
||||
[
|
||||
"electric demand",
|
||||
"transmission lines",
|
||||
"hydroelectricity",
|
||||
"hydro reservoir",
|
||||
@ -790,19 +846,30 @@ def plot_series(network, carrier="AC", name="test"):
|
||||
"methanation",
|
||||
"hydrogen storage",
|
||||
"battery storage",
|
||||
"hot water storage"])
|
||||
"hot water storage",
|
||||
]
|
||||
)
|
||||
|
||||
new_columns = (preferred_order.intersection(supply.columns)
|
||||
.append(supply.columns.difference(preferred_order)))
|
||||
new_columns = preferred_order.intersection(supply.columns).append(
|
||||
supply.columns.difference(preferred_order)
|
||||
)
|
||||
|
||||
supply = supply.groupby(supply.columns, axis=1).sum()
|
||||
fig, ax = plt.subplots()
|
||||
fig.set_size_inches((8, 5))
|
||||
|
||||
(supply.loc[start:stop, new_columns]
|
||||
.plot(ax=ax, kind="area", stacked=True, linewidth=0.,
|
||||
color=[snakemake.config['plotting']['tech_colors'][i.replace(suffix, "")]
|
||||
for i in new_columns]))
|
||||
(
|
||||
supply.loc[start:stop, new_columns].plot(
|
||||
ax=ax,
|
||||
kind="area",
|
||||
stacked=True,
|
||||
linewidth=0.0,
|
||||
color=[
|
||||
snakemake.config["plotting"]["tech_colors"][i.replace(suffix, "")]
|
||||
for i in new_columns
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
handles, labels = ax.get_legend_handles_labels()
|
||||
|
||||
@ -824,39 +891,48 @@ def plot_series(network, carrier="AC", name="test"):
|
||||
ax.set_ylabel("Power [GW]")
|
||||
fig.tight_layout()
|
||||
|
||||
fig.savefig("{}{}/maps/series-{}-{}-{}-{}-{}.pdf".format(
|
||||
snakemake.config['results_dir'], snakemake.config['run'],
|
||||
fig.savefig(
|
||||
"{}{}/maps/series-{}-{}-{}-{}-{}.pdf".format(
|
||||
snakemake.config["results_dir"],
|
||||
snakemake.config["run"],
|
||||
snakemake.wildcards["lv"],
|
||||
carrier, start, stop, name),
|
||||
transparent=True)
|
||||
carrier,
|
||||
start,
|
||||
stop,
|
||||
name,
|
||||
),
|
||||
transparent=True,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'plot_network',
|
||||
simpl='',
|
||||
"plot_network",
|
||||
simpl="",
|
||||
clusters="181",
|
||||
lv='opt',
|
||||
opts='',
|
||||
sector_opts='Co2L0-730H-T-H-B-I-A-solar+p3-linemaxext10',
|
||||
lv="opt",
|
||||
opts="",
|
||||
sector_opts="Co2L0-730H-T-H-B-I-A-solar+p3-linemaxext10",
|
||||
planning_horizons="2050",
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
overrides = override_component_attrs(snakemake.input.overrides)
|
||||
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
|
||||
|
||||
regions = gpd.read_file(snakemake.input.regions).set_index("name")
|
||||
|
||||
map_opts = snakemake.config['plotting']['map']
|
||||
map_opts = snakemake.config["plotting"]["map"]
|
||||
|
||||
plot_map(n,
|
||||
plot_map(
|
||||
n,
|
||||
components=["generators", "links", "stores", "storage_units"],
|
||||
bus_size_factor=2e10,
|
||||
transmission=False
|
||||
transmission=False,
|
||||
)
|
||||
|
||||
plot_h2_map(n, regions)
|
||||
|
@ -1,25 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import matplotlib.gridspec as gridspec
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
plt.style.use('ggplot')
|
||||
plt.style.use("ggplot")
|
||||
|
||||
from prepare_sector_network import co2_emissions_year
|
||||
from helper import update_config_with_sector_opts
|
||||
|
||||
|
||||
# consolidate and rename
|
||||
def rename_techs(label):
|
||||
|
||||
prefix_to_remove = [
|
||||
"residential ",
|
||||
"services ",
|
||||
"urban ",
|
||||
"rural ",
|
||||
"central ",
|
||||
"decentral "
|
||||
"decentral ",
|
||||
]
|
||||
|
||||
rename_if_contains = [
|
||||
@ -30,7 +32,7 @@ def rename_techs(label):
|
||||
"air heat pump",
|
||||
"ground heat pump",
|
||||
"resistive heater",
|
||||
"Fischer-Tropsch"
|
||||
"Fischer-Tropsch",
|
||||
]
|
||||
|
||||
rename_if_contains_dict = {
|
||||
@ -58,7 +60,7 @@ def rename_techs(label):
|
||||
"co2 stored": "CO2 sequestration",
|
||||
"AC": "transmission lines",
|
||||
"DC": "transmission lines",
|
||||
"B2B": "transmission lines"
|
||||
"B2B": "transmission lines",
|
||||
}
|
||||
|
||||
for ptr in prefix_to_remove:
|
||||
@ -79,7 +81,8 @@ def rename_techs(label):
|
||||
return label
|
||||
|
||||
|
||||
preferred_order = pd.Index([
|
||||
preferred_order = pd.Index(
|
||||
[
|
||||
"transmission lines",
|
||||
"hydroelectricity",
|
||||
"hydro reservoir",
|
||||
@ -115,16 +118,14 @@ preferred_order = pd.Index([
|
||||
"power-to-liquid",
|
||||
"battery storage",
|
||||
"hot water storage",
|
||||
"CO2 sequestration"
|
||||
])
|
||||
"CO2 sequestration",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def plot_costs():
|
||||
|
||||
|
||||
cost_df = pd.read_csv(
|
||||
snakemake.input.costs,
|
||||
index_col=list(range(3)),
|
||||
header=list(range(n_header))
|
||||
snakemake.input.costs, index_col=list(range(3)), header=list(range(n_header))
|
||||
)
|
||||
|
||||
df = cost_df.groupby(cost_df.index.get_level_values(2)).sum()
|
||||
@ -134,16 +135,20 @@ def plot_costs():
|
||||
|
||||
df = df.groupby(df.index.map(rename_techs)).sum()
|
||||
|
||||
to_drop = df.index[df.max(axis=1) < snakemake.config['plotting']['costs_threshold']]
|
||||
to_drop = df.index[df.max(axis=1) < snakemake.config["plotting"]["costs_threshold"]]
|
||||
|
||||
logger.info(f"Dropping technology with costs below {snakemake.config['plotting']['costs_threshold']} EUR billion per year")
|
||||
logger.info(
|
||||
f"Dropping technology with costs below {snakemake.config['plotting']['costs_threshold']} EUR billion per year"
|
||||
)
|
||||
logger.debug(df.loc[to_drop])
|
||||
|
||||
df = df.drop(to_drop)
|
||||
|
||||
logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year")
|
||||
|
||||
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
|
||||
new_index = preferred_order.intersection(df.index).append(
|
||||
df.index.difference(preferred_order)
|
||||
)
|
||||
|
||||
new_columns = df.sum().sort_values().index
|
||||
|
||||
@ -153,7 +158,7 @@ def plot_costs():
|
||||
kind="bar",
|
||||
ax=ax,
|
||||
stacked=True,
|
||||
color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]
|
||||
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index],
|
||||
)
|
||||
|
||||
handles, labels = ax.get_legend_handles_labels()
|
||||
@ -161,25 +166,24 @@ def plot_costs():
|
||||
handles.reverse()
|
||||
labels.reverse()
|
||||
|
||||
ax.set_ylim([0,snakemake.config['plotting']['costs_max']])
|
||||
ax.set_ylim([0, snakemake.config["plotting"]["costs_max"]])
|
||||
|
||||
ax.set_ylabel("System Cost [EUR billion per year]")
|
||||
|
||||
ax.set_xlabel("")
|
||||
|
||||
ax.grid(axis='x')
|
||||
ax.grid(axis="x")
|
||||
|
||||
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1,1], frameon=False)
|
||||
ax.legend(
|
||||
handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False
|
||||
)
|
||||
|
||||
fig.savefig(snakemake.output.costs, bbox_inches='tight')
|
||||
fig.savefig(snakemake.output.costs, bbox_inches="tight")
|
||||
|
||||
|
||||
def plot_energy():
|
||||
|
||||
energy_df = pd.read_csv(
|
||||
snakemake.input.energy,
|
||||
index_col=list(range(2)),
|
||||
header=list(range(n_header))
|
||||
snakemake.input.energy, index_col=list(range(2)), header=list(range(n_header))
|
||||
)
|
||||
|
||||
df = energy_df.groupby(energy_df.index.get_level_values(1)).sum()
|
||||
@ -189,16 +193,22 @@ def plot_energy():
|
||||
|
||||
df = df.groupby(df.index.map(rename_techs)).sum()
|
||||
|
||||
to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']]
|
||||
to_drop = df.index[
|
||||
df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"]
|
||||
]
|
||||
|
||||
logger.info(f"Dropping all technology with energy consumption or production below {snakemake.config['plotting']['energy_threshold']} TWh/a")
|
||||
logger.info(
|
||||
f"Dropping all technology with energy consumption or production below {snakemake.config['plotting']['energy_threshold']} TWh/a"
|
||||
)
|
||||
logger.debug(df.loc[to_drop])
|
||||
|
||||
df = df.drop(to_drop)
|
||||
|
||||
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
|
||||
|
||||
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
|
||||
new_index = preferred_order.intersection(df.index).append(
|
||||
df.index.difference(preferred_order)
|
||||
)
|
||||
|
||||
new_columns = df.columns.sort_values()
|
||||
|
||||
@ -210,7 +220,7 @@ def plot_energy():
|
||||
kind="bar",
|
||||
ax=ax,
|
||||
stacked=True,
|
||||
color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]
|
||||
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index],
|
||||
)
|
||||
|
||||
handles, labels = ax.get_legend_handles_labels()
|
||||
@ -218,7 +228,12 @@ def plot_energy():
|
||||
handles.reverse()
|
||||
labels.reverse()
|
||||
|
||||
ax.set_ylim([snakemake.config['plotting']['energy_min'], snakemake.config['plotting']['energy_max']])
|
||||
ax.set_ylim(
|
||||
[
|
||||
snakemake.config["plotting"]["energy_min"],
|
||||
snakemake.config["plotting"]["energy_max"],
|
||||
]
|
||||
)
|
||||
|
||||
ax.set_ylabel("Energy [TWh/a]")
|
||||
|
||||
@ -226,29 +241,28 @@ def plot_energy():
|
||||
|
||||
ax.grid(axis="x")
|
||||
|
||||
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False)
|
||||
|
||||
fig.savefig(snakemake.output.energy, bbox_inches='tight')
|
||||
ax.legend(
|
||||
handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False
|
||||
)
|
||||
|
||||
fig.savefig(snakemake.output.energy, bbox_inches="tight")
|
||||
|
||||
|
||||
def plot_balances():
|
||||
|
||||
co2_carriers = ["co2", "co2 stored", "process emissions"]
|
||||
|
||||
balances_df = pd.read_csv(
|
||||
snakemake.input.balances,
|
||||
index_col=list(range(3)),
|
||||
header=list(range(n_header))
|
||||
snakemake.input.balances, index_col=list(range(3)), header=list(range(n_header))
|
||||
)
|
||||
|
||||
balances = {i.replace(" ", "_"): [i] for i in balances_df.index.levels[0]}
|
||||
balances["energy"] = [i for i in balances_df.index.levels[0] if i not in co2_carriers]
|
||||
balances["energy"] = [
|
||||
i for i in balances_df.index.levels[0] if i not in co2_carriers
|
||||
]
|
||||
|
||||
fig, ax = plt.subplots(figsize=(12, 8))
|
||||
|
||||
for k, v in balances.items():
|
||||
|
||||
df = balances_df.loc[v]
|
||||
df = df.groupby(df.index.get_level_values(2)).sum()
|
||||
|
||||
@ -256,18 +270,27 @@ def plot_balances():
|
||||
df = df / 1e6
|
||||
|
||||
# remove trailing link ports
|
||||
df.index = [i[:-1] if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0","1","2","3"])) else i for i in df.index]
|
||||
df.index = [
|
||||
i[:-1]
|
||||
if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"]))
|
||||
else i
|
||||
for i in df.index
|
||||
]
|
||||
|
||||
df = df.groupby(df.index.map(rename_techs)).sum()
|
||||
|
||||
to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']/10]
|
||||
to_drop = df.index[
|
||||
df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"] / 10
|
||||
]
|
||||
|
||||
if v[0] in co2_carriers:
|
||||
units = "MtCO2/a"
|
||||
else:
|
||||
units = "TWh/a"
|
||||
|
||||
logger.debug(f"Dropping technology energy balance smaller than {snakemake.config['plotting']['energy_threshold']/10} {units}")
|
||||
logger.debug(
|
||||
f"Dropping technology energy balance smaller than {snakemake.config['plotting']['energy_threshold']/10} {units}"
|
||||
)
|
||||
logger.debug(df.loc[to_drop])
|
||||
|
||||
df = df.drop(to_drop)
|
||||
@ -277,12 +300,18 @@ def plot_balances():
|
||||
if df.empty:
|
||||
continue
|
||||
|
||||
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
|
||||
new_index = preferred_order.intersection(df.index).append(
|
||||
df.index.difference(preferred_order)
|
||||
)
|
||||
|
||||
new_columns = df.columns.sort_values()
|
||||
|
||||
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index])
|
||||
|
||||
df.loc[new_index, new_columns].T.plot(
|
||||
kind="bar",
|
||||
ax=ax,
|
||||
stacked=True,
|
||||
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index],
|
||||
)
|
||||
|
||||
handles, labels = ax.get_legend_handles_labels()
|
||||
|
||||
@ -298,17 +327,23 @@ def plot_balances():
|
||||
|
||||
ax.grid(axis="x")
|
||||
|
||||
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False)
|
||||
ax.legend(
|
||||
handles,
|
||||
labels,
|
||||
ncol=1,
|
||||
loc="upper left",
|
||||
bbox_to_anchor=[1, 1],
|
||||
frameon=False,
|
||||
)
|
||||
|
||||
|
||||
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches='tight')
|
||||
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
|
||||
|
||||
plt.cla()
|
||||
|
||||
|
||||
def historical_emissions(cts):
|
||||
"""
|
||||
read historical emissions to add them to the carbon budget plot
|
||||
Read historical emissions to add them to the carbon budget plot.
|
||||
"""
|
||||
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
|
||||
# downloaded 201228 (modified by EEA last on 201221)
|
||||
@ -316,25 +351,27 @@ def historical_emissions(cts):
|
||||
df = pd.read_csv(fn, encoding="latin-1")
|
||||
df.loc[df["Year"] == "1985-1987", "Year"] = 1986
|
||||
df["Year"] = df["Year"].astype(int)
|
||||
df = df.set_index(['Year', 'Sector_name', 'Country_code', 'Pollutant_name']).sort_index()
|
||||
df = df.set_index(
|
||||
["Year", "Sector_name", "Country_code", "Pollutant_name"]
|
||||
).sort_index()
|
||||
|
||||
e = pd.Series()
|
||||
e["electricity"] = '1.A.1.a - Public Electricity and Heat Production'
|
||||
e['residential non-elec'] = '1.A.4.b - Residential'
|
||||
e['services non-elec'] = '1.A.4.a - Commercial/Institutional'
|
||||
e['rail non-elec'] = "1.A.3.c - Railways"
|
||||
e["road non-elec"] = '1.A.3.b - Road Transportation'
|
||||
e["electricity"] = "1.A.1.a - Public Electricity and Heat Production"
|
||||
e["residential non-elec"] = "1.A.4.b - Residential"
|
||||
e["services non-elec"] = "1.A.4.a - Commercial/Institutional"
|
||||
e["rail non-elec"] = "1.A.3.c - Railways"
|
||||
e["road non-elec"] = "1.A.3.b - Road Transportation"
|
||||
e["domestic navigation"] = "1.A.3.d - Domestic Navigation"
|
||||
e['international navigation'] = '1.D.1.b - International Navigation'
|
||||
e["domestic aviation"] = '1.A.3.a - Domestic Aviation'
|
||||
e["international aviation"] = '1.D.1.a - International Aviation'
|
||||
e['total energy'] = '1 - Energy'
|
||||
e['industrial processes'] = '2 - Industrial Processes and Product Use'
|
||||
e['agriculture'] = '3 - Agriculture'
|
||||
e['LULUCF'] = '4 - Land Use, Land-Use Change and Forestry'
|
||||
e['waste management'] = '5 - Waste management'
|
||||
e['other'] = '6 - Other Sector'
|
||||
e['indirect'] = 'ind_CO2 - Indirect CO2'
|
||||
e["international navigation"] = "1.D.1.b - International Navigation"
|
||||
e["domestic aviation"] = "1.A.3.a - Domestic Aviation"
|
||||
e["international aviation"] = "1.D.1.a - International Aviation"
|
||||
e["total energy"] = "1 - Energy"
|
||||
e["industrial processes"] = "2 - Industrial Processes and Product Use"
|
||||
e["agriculture"] = "3 - Agriculture"
|
||||
e["LULUCF"] = "4 - Land Use, Land-Use Change and Forestry"
|
||||
e["waste management"] = "5 - Waste management"
|
||||
e["other"] = "6 - Other Sector"
|
||||
e["indirect"] = "ind_CO2 - Indirect CO2"
|
||||
e["total wL"] = "Total (with LULUCF)"
|
||||
e["total woL"] = "Total (without LULUCF)"
|
||||
|
||||
@ -347,104 +384,166 @@ def historical_emissions(cts):
|
||||
year = np.arange(1990, 2018).tolist()
|
||||
|
||||
idx = pd.IndexSlice
|
||||
co2_totals = df.loc[idx[year,e.values,cts,pol],"emissions"].unstack("Year").rename(index=pd.Series(e.index,e.values))
|
||||
co2_totals = (
|
||||
df.loc[idx[year, e.values, cts, pol], "emissions"]
|
||||
.unstack("Year")
|
||||
.rename(index=pd.Series(e.index, e.values))
|
||||
)
|
||||
|
||||
co2_totals = (1 / 1e6) * co2_totals.groupby(level=0, axis=0).sum() # Gton CO2
|
||||
|
||||
co2_totals.loc['industrial non-elec'] = co2_totals.loc['total energy'] - co2_totals.loc[['electricity', 'services non-elec','residential non-elec', 'road non-elec',
|
||||
'rail non-elec', 'domestic aviation', 'international aviation', 'domestic navigation',
|
||||
'international navigation']].sum()
|
||||
co2_totals.loc["industrial non-elec"] = (
|
||||
co2_totals.loc["total energy"]
|
||||
- co2_totals.loc[
|
||||
[
|
||||
"electricity",
|
||||
"services non-elec",
|
||||
"residential non-elec",
|
||||
"road non-elec",
|
||||
"rail non-elec",
|
||||
"domestic aviation",
|
||||
"international aviation",
|
||||
"domestic navigation",
|
||||
"international navigation",
|
||||
]
|
||||
].sum()
|
||||
)
|
||||
|
||||
emissions = co2_totals.loc["electricity"]
|
||||
if "T" in opts:
|
||||
emissions += co2_totals.loc[[i + " non-elec" for i in ["rail", "road"]]].sum()
|
||||
if "H" in opts:
|
||||
emissions += co2_totals.loc[[i+ " non-elec" for i in ["residential","services"]]].sum()
|
||||
emissions += co2_totals.loc[
|
||||
[i + " non-elec" for i in ["residential", "services"]]
|
||||
].sum()
|
||||
if "I" in opts:
|
||||
emissions += co2_totals.loc[["industrial non-elec","industrial processes",
|
||||
"domestic aviation","international aviation",
|
||||
"domestic navigation","international navigation"]].sum()
|
||||
emissions += co2_totals.loc[
|
||||
[
|
||||
"industrial non-elec",
|
||||
"industrial processes",
|
||||
"domestic aviation",
|
||||
"international aviation",
|
||||
"domestic navigation",
|
||||
"international navigation",
|
||||
]
|
||||
].sum()
|
||||
return emissions
|
||||
|
||||
|
||||
|
||||
def plot_carbon_budget_distribution(input_eurostat):
|
||||
"""
|
||||
Plot historical carbon emissions in the EU and decarbonization path
|
||||
Plot historical carbon emissions in the EU and decarbonization path.
|
||||
"""
|
||||
|
||||
import matplotlib.gridspec as gridspec
|
||||
import seaborn as sns; sns.set()
|
||||
sns.set_style('ticks')
|
||||
plt.style.use('seaborn-ticks')
|
||||
plt.rcParams['xtick.direction'] = 'in'
|
||||
plt.rcParams['ytick.direction'] = 'in'
|
||||
plt.rcParams['xtick.labelsize'] = 20
|
||||
plt.rcParams['ytick.labelsize'] = 20
|
||||
import seaborn as sns
|
||||
|
||||
sns.set()
|
||||
sns.set_style("ticks")
|
||||
plt.style.use("seaborn-ticks")
|
||||
plt.rcParams["xtick.direction"] = "in"
|
||||
plt.rcParams["ytick.direction"] = "in"
|
||||
plt.rcParams["xtick.labelsize"] = 20
|
||||
plt.rcParams["ytick.labelsize"] = 20
|
||||
|
||||
plt.figure(figsize=(10, 7))
|
||||
gs1 = gridspec.GridSpec(1, 1)
|
||||
ax1 = plt.subplot(gs1[0, 0])
|
||||
ax1.set_ylabel('CO$_2$ emissions (Gt per year)',fontsize=22)
|
||||
ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22)
|
||||
ax1.set_ylim([0, 5])
|
||||
ax1.set_xlim([1990,snakemake.config['scenario']['planning_horizons'][-1]+1])
|
||||
ax1.set_xlim([1990, snakemake.config["scenario"]["planning_horizons"][-1] + 1])
|
||||
|
||||
path_cb = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/'
|
||||
path_cb = snakemake.config["results_dir"] + snakemake.config["run"] + "/csvs/"
|
||||
countries = pd.read_csv(snakemake.input.country_codes, index_col=1)
|
||||
cts = countries.index.to_list()
|
||||
e_1990 = co2_emissions_year(cts, input_eurostat, opts, year=1990)
|
||||
CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv',
|
||||
index_col=0)
|
||||
CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
|
||||
|
||||
|
||||
ax1.plot(e_1990*CO2_CAP[o],linewidth=3,
|
||||
color='dodgerblue', label=None)
|
||||
ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None)
|
||||
|
||||
emissions = historical_emissions(cts)
|
||||
|
||||
ax1.plot(emissions, color='black', linewidth=3, label=None)
|
||||
ax1.plot(emissions, color="black", linewidth=3, label=None)
|
||||
|
||||
# plot committed and uder-discussion targets
|
||||
# (notice that historical emissions include all countries in the
|
||||
# network, but targets refer to EU)
|
||||
ax1.plot([2020],[0.8*emissions[1990]],
|
||||
marker='*', markersize=12, markerfacecolor='black',
|
||||
markeredgecolor='black')
|
||||
ax1.plot(
|
||||
[2020],
|
||||
[0.8 * emissions[1990]],
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="black",
|
||||
markeredgecolor="black",
|
||||
)
|
||||
|
||||
ax1.plot([2030],[0.45*emissions[1990]],
|
||||
marker='*', markersize=12, markerfacecolor='white',
|
||||
markeredgecolor='black')
|
||||
ax1.plot(
|
||||
[2030],
|
||||
[0.45 * emissions[1990]],
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="white",
|
||||
markeredgecolor="black",
|
||||
)
|
||||
|
||||
ax1.plot([2030],[0.6*emissions[1990]],
|
||||
marker='*', markersize=12, markerfacecolor='black',
|
||||
markeredgecolor='black')
|
||||
ax1.plot(
|
||||
[2030],
|
||||
[0.6 * emissions[1990]],
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="black",
|
||||
markeredgecolor="black",
|
||||
)
|
||||
|
||||
ax1.plot([2050, 2050],[x*emissions[1990] for x in [0.2, 0.05]],
|
||||
color='gray', linewidth=2, marker='_', alpha=0.5)
|
||||
ax1.plot(
|
||||
[2050, 2050],
|
||||
[x * emissions[1990] for x in [0.2, 0.05]],
|
||||
color="gray",
|
||||
linewidth=2,
|
||||
marker="_",
|
||||
alpha=0.5,
|
||||
)
|
||||
|
||||
ax1.plot([2050],[0.01*emissions[1990]],
|
||||
marker='*', markersize=12, markerfacecolor='white',
|
||||
linewidth=0, markeredgecolor='black',
|
||||
label='EU under-discussion target', zorder=10,
|
||||
clip_on=False)
|
||||
ax1.plot(
|
||||
[2050],
|
||||
[0.01 * emissions[1990]],
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="white",
|
||||
linewidth=0,
|
||||
markeredgecolor="black",
|
||||
label="EU under-discussion target",
|
||||
zorder=10,
|
||||
clip_on=False,
|
||||
)
|
||||
|
||||
ax1.plot([2050],[0.125*emissions[1990]],'ro',
|
||||
marker='*', markersize=12, markerfacecolor='black',
|
||||
markeredgecolor='black', label='EU committed target')
|
||||
ax1.plot(
|
||||
[2050],
|
||||
[0.125 * emissions[1990]],
|
||||
"ro",
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="black",
|
||||
markeredgecolor="black",
|
||||
label="EU committed target",
|
||||
)
|
||||
|
||||
ax1.legend(fancybox=True, fontsize=18, loc=(0.01,0.01),
|
||||
facecolor='white', frameon=True)
|
||||
ax1.legend(
|
||||
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
|
||||
)
|
||||
|
||||
path_cb_plot = snakemake.config['results_dir'] + snakemake.config['run'] + '/graphs/'
|
||||
plt.savefig(path_cb_plot+'carbon_budget_plot.pdf', dpi=300)
|
||||
path_cb_plot = (
|
||||
snakemake.config["results_dir"] + snakemake.config["run"] + "/graphs/"
|
||||
)
|
||||
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('plot_summary')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
snakemake = mock_snakemake("plot_summary")
|
||||
|
||||
logging.basicConfig(level=snakemake.config["logging_level"])
|
||||
|
||||
n_header = 4
|
||||
|
||||
@ -454,8 +553,8 @@ if __name__ == "__main__":
|
||||
|
||||
plot_balances()
|
||||
|
||||
for sector_opts in snakemake.config['scenario']['sector_opts']:
|
||||
opts=sector_opts.split('-')
|
||||
for sector_opts in snakemake.config["scenario"]["sector_opts"]:
|
||||
opts = sector_opts.split("-")
|
||||
for o in opts:
|
||||
if "cb" in o:
|
||||
plot_carbon_budget_distribution(snakemake.input.eurostat)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,23 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Retrieve gas infrastructure data from https://zenodo.org/record/4767098/files/IGGIELGN.zip
|
||||
Retrieve gas infrastructure data from
|
||||
https://zenodo.org/record/4767098/files/IGGIELGN.zip.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from helper import progress_retrieve
|
||||
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from helper import progress_retrieve
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('retrieve_gas_network_data')
|
||||
rootpath = '..'
|
||||
|
||||
snakemake = mock_snakemake("retrieve_gas_network_data")
|
||||
rootpath = ".."
|
||||
else:
|
||||
rootpath = '.'
|
||||
rootpath = "."
|
||||
|
||||
url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip"
|
||||
|
||||
|
@ -1,8 +1,10 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Retrieve and extract sector data bundle.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import os
|
||||
@ -13,8 +15,7 @@ from pathlib import Path
|
||||
# Add pypsa-eur scripts to path for import of _helpers
|
||||
sys.path.insert(0, os.getcwd() + "/../pypsa-eur/scripts")
|
||||
|
||||
from _helpers import progress_retrieve, configure_logging
|
||||
|
||||
from _helpers import configure_logging, progress_retrieve
|
||||
|
||||
if __name__ == "__main__":
|
||||
configure_logging(snakemake)
|
||||
|
@ -1,19 +1,21 @@
|
||||
"""Solve network."""
|
||||
|
||||
import pypsa
|
||||
import numpy as np
|
||||
|
||||
from vresutils.benchmark import memory_logger
|
||||
from helper import override_component_attrs, update_config_with_sector_opts
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Solve network.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
import pypsa
|
||||
from helper import override_component_attrs, update_config_with_sector_opts
|
||||
from vresutils.benchmark import memory_logger
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
pypsa.pf.logger.setLevel(logging.WARNING)
|
||||
|
||||
|
||||
def add_land_use_constraint(n):
|
||||
|
||||
if 'm' in snakemake.wildcards.clusters:
|
||||
if "m" in snakemake.wildcards.clusters:
|
||||
_add_land_use_constraint_m(n)
|
||||
else:
|
||||
_add_land_use_constraint(n)
|
||||
@ -22,19 +24,28 @@ def add_land_use_constraint(n):
|
||||
def _add_land_use_constraint(n):
|
||||
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
|
||||
|
||||
for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']:
|
||||
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
|
||||
ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
|
||||
existing = n.generators.loc[ext_i,"p_nom"].groupby(n.generators.bus.map(n.buses.location)).sum()
|
||||
existing = (
|
||||
n.generators.loc[ext_i, "p_nom"]
|
||||
.groupby(n.generators.bus.map(n.buses.location))
|
||||
.sum()
|
||||
)
|
||||
existing.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons
|
||||
n.generators.loc[existing.index, "p_nom_max"] -= existing
|
||||
|
||||
# check if existing capacities are larger than technical potential
|
||||
existing_large = n.generators[n.generators["p_nom_min"] > n.generators["p_nom_max"]].index
|
||||
existing_large = n.generators[
|
||||
n.generators["p_nom_min"] > n.generators["p_nom_max"]
|
||||
].index
|
||||
if len(existing_large):
|
||||
logger.warning(f"Existing capacities larger than technical potential for {existing_large},\
|
||||
adjust technical potential to existing capacities")
|
||||
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[existing_large, "p_nom_min"]
|
||||
|
||||
logger.warning(
|
||||
f"Existing capacities larger than technical potential for {existing_large},\
|
||||
adjust technical potential to existing capacities"
|
||||
)
|
||||
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
|
||||
existing_large, "p_nom_min"
|
||||
]
|
||||
|
||||
n.generators.p_nom_max.clip(lower=0, inplace=True)
|
||||
|
||||
@ -46,80 +57,109 @@ def _add_land_use_constraint_m(n):
|
||||
grouping_years = snakemake.config["existing_capacities"]["grouping_years"]
|
||||
current_horizon = snakemake.wildcards.planning_horizons
|
||||
|
||||
for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']:
|
||||
|
||||
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
|
||||
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
|
||||
ind = list(set([i.split(sep=" ")[0] + ' ' + i.split(sep=" ")[1] for i in existing.index]))
|
||||
ind = list(
|
||||
set(
|
||||
[
|
||||
i.split(sep=" ")[0] + " " + i.split(sep=" ")[1]
|
||||
for i in existing.index
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
previous_years = [
|
||||
str(y) for y in
|
||||
planning_horizons + grouping_years
|
||||
str(y)
|
||||
for y in planning_horizons + grouping_years
|
||||
if y < int(snakemake.wildcards.planning_horizons)
|
||||
]
|
||||
|
||||
for p_year in previous_years:
|
||||
ind2 = [i for i in ind if i + " " + carrier + "-" + p_year in existing.index]
|
||||
ind2 = [
|
||||
i for i in ind if i + " " + carrier + "-" + p_year in existing.index
|
||||
]
|
||||
sel_current = [i + " " + carrier + "-" + current_horizon for i in ind2]
|
||||
sel_p_year = [i + " " + carrier + "-" + p_year for i in ind2]
|
||||
n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[sel_p_year].rename(lambda x: x[:-4] + current_horizon)
|
||||
n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[
|
||||
sel_p_year
|
||||
].rename(lambda x: x[:-4] + current_horizon)
|
||||
|
||||
n.generators.p_nom_max.clip(lower=0, inplace=True)
|
||||
|
||||
|
||||
def add_co2_sequestration_limit(n, limit=200):
|
||||
"""Add a global constraint on the amount of Mt CO2 that can be sequestered."""
|
||||
"""
|
||||
Add a global constraint on the amount of Mt CO2 that can be sequestered.
|
||||
"""
|
||||
n.carriers.loc["co2 stored", "co2_absorptions"] = -1
|
||||
n.carriers.co2_absorptions = n.carriers.co2_absorptions.fillna(0)
|
||||
|
||||
limit = limit * 1e6
|
||||
for o in opts:
|
||||
if not "seq" in o: continue
|
||||
if not "seq" in o:
|
||||
continue
|
||||
limit = float(o[o.find("seq") + 3 :]) * 1e6
|
||||
break
|
||||
|
||||
n.add("GlobalConstraint", 'co2_sequestration_limit', sense="<=", constant=limit,
|
||||
type="primary_energy", carrier_attribute="co2_absorptions")
|
||||
n.add(
|
||||
"GlobalConstraint",
|
||||
"co2_sequestration_limit",
|
||||
sense="<=",
|
||||
constant=limit,
|
||||
type="primary_energy",
|
||||
carrier_attribute="co2_absorptions",
|
||||
)
|
||||
|
||||
|
||||
def prepare_network(n, solve_opts=None, config=None):
|
||||
if "clip_p_max_pu" in solve_opts:
|
||||
for df in (
|
||||
n.generators_t.p_max_pu,
|
||||
n.generators_t.p_min_pu,
|
||||
n.storage_units_t.inflow,
|
||||
):
|
||||
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
|
||||
|
||||
if 'clip_p_max_pu' in solve_opts:
|
||||
for df in (n.generators_t.p_max_pu, n.generators_t.p_min_pu, n.storage_units_t.inflow):
|
||||
df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True)
|
||||
|
||||
if solve_opts.get('load_shedding'):
|
||||
if solve_opts.get("load_shedding"):
|
||||
# intersect between macroeconomic and surveybased willingness to pay
|
||||
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
|
||||
n.add("Carrier", "Load")
|
||||
n.madd("Generator", n.buses.index, " load",
|
||||
n.madd(
|
||||
"Generator",
|
||||
n.buses.index,
|
||||
" load",
|
||||
bus=n.buses.index,
|
||||
carrier='load',
|
||||
carrier="load",
|
||||
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
|
||||
marginal_cost=1e2, # Eur/kWh
|
||||
p_nom=1e9 # kW
|
||||
p_nom=1e9, # kW
|
||||
)
|
||||
|
||||
if solve_opts.get('noisy_costs'):
|
||||
if solve_opts.get("noisy_costs"):
|
||||
for t in n.iterate_components():
|
||||
# if 'capital_cost' in t.df:
|
||||
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
|
||||
if 'marginal_cost' in t.df:
|
||||
if "marginal_cost" in t.df:
|
||||
np.random.seed(174)
|
||||
t.df['marginal_cost'] += 1e-2 + 2e-3 * (np.random.random(len(t.df)) - 0.5)
|
||||
t.df["marginal_cost"] += 1e-2 + 2e-3 * (
|
||||
np.random.random(len(t.df)) - 0.5
|
||||
)
|
||||
|
||||
for t in n.iterate_components(['Line', 'Link']):
|
||||
for t in n.iterate_components(["Line", "Link"]):
|
||||
np.random.seed(123)
|
||||
t.df['capital_cost'] += (1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)) * t.df['length']
|
||||
t.df["capital_cost"] += (
|
||||
1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)
|
||||
) * t.df["length"]
|
||||
|
||||
if solve_opts.get('nhours'):
|
||||
nhours = solve_opts['nhours']
|
||||
if solve_opts.get("nhours"):
|
||||
nhours = solve_opts["nhours"]
|
||||
n.set_snapshots(n.snapshots[:nhours])
|
||||
n.snapshot_weightings[:] = 8760./nhours
|
||||
n.snapshot_weightings[:] = 8760.0 / nhours
|
||||
|
||||
if snakemake.config['foresight'] == 'myopic':
|
||||
if snakemake.config["foresight"] == "myopic":
|
||||
add_land_use_constraint(n)
|
||||
|
||||
if n.stores.carrier.eq('co2 stored').any():
|
||||
if n.stores.carrier.eq("co2 stored").any():
|
||||
limit = config["sector"].get("co2_sequestration_potential", 200)
|
||||
add_co2_sequestration_limit(n, limit=limit)
|
||||
|
||||
@ -138,19 +178,25 @@ def add_battery_constraints(n):
|
||||
chargers_ext = n.links[charger_bool].query("p_nom_extendable").index
|
||||
|
||||
eff = n.links.efficiency[dischargers_ext].values
|
||||
lhs = n.model["Link-p_nom"].loc[chargers_ext] - n.model["Link-p_nom"].loc[dischargers_ext] * eff
|
||||
lhs = (
|
||||
n.model["Link-p_nom"].loc[chargers_ext]
|
||||
- n.model["Link-p_nom"].loc[dischargers_ext] * eff
|
||||
)
|
||||
|
||||
n.model.add_constraints(lhs == 0, name="Link-charger_ratio")
|
||||
|
||||
|
||||
def add_chp_constraints(n):
|
||||
|
||||
electric = (n.links.index.str.contains("urban central")
|
||||
electric = (
|
||||
n.links.index.str.contains("urban central")
|
||||
& n.links.index.str.contains("CHP")
|
||||
& n.links.index.str.contains("electric"))
|
||||
heat = (n.links.index.str.contains("urban central")
|
||||
& n.links.index.str.contains("electric")
|
||||
)
|
||||
heat = (
|
||||
n.links.index.str.contains("urban central")
|
||||
& n.links.index.str.contains("CHP")
|
||||
& n.links.index.str.contains("heat"))
|
||||
& n.links.index.str.contains("heat")
|
||||
)
|
||||
|
||||
electric_ext = n.links[electric].query("p_nom_extendable").index
|
||||
heat_ext = n.links[heat].query("p_nom_extendable").index
|
||||
@ -164,32 +210,44 @@ def add_chp_constraints(n):
|
||||
if not electric_ext.empty:
|
||||
p_nom = n.model["Link-p_nom"]
|
||||
|
||||
lhs = (p_nom.loc[electric_ext] * (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values -
|
||||
p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values)
|
||||
n.model.add_constraints(lhs == 0, name='chplink-fix_p_nom_ratio')
|
||||
lhs = (
|
||||
p_nom.loc[electric_ext]
|
||||
* (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values
|
||||
- p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values
|
||||
)
|
||||
n.model.add_constraints(lhs == 0, name="chplink-fix_p_nom_ratio")
|
||||
|
||||
rename = {"Link-ext": "Link"}
|
||||
lhs = p.loc[:, electric_ext] + p.loc[:, heat_ext] - p_nom.rename(rename).loc[electric_ext]
|
||||
n.model.add_constraints(lhs <= 0, name='chplink-top_iso_fuel_line_ext')
|
||||
|
||||
lhs = (
|
||||
p.loc[:, electric_ext]
|
||||
+ p.loc[:, heat_ext]
|
||||
- p_nom.rename(rename).loc[electric_ext]
|
||||
)
|
||||
n.model.add_constraints(lhs <= 0, name="chplink-top_iso_fuel_line_ext")
|
||||
|
||||
# top_iso_fuel_line for fixed
|
||||
if not electric_fix.empty:
|
||||
lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix]
|
||||
rhs = n.links.p_nom[electric_fix]
|
||||
n.model.add_constraints(lhs <= rhs, name='chplink-top_iso_fuel_line_fix')
|
||||
n.model.add_constraints(lhs <= rhs, name="chplink-top_iso_fuel_line_fix")
|
||||
|
||||
# back-pressure
|
||||
if not electric.empty:
|
||||
lhs = (p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values) -
|
||||
p.loc[:, electric] * n.links.efficiency[electric])
|
||||
n.model.add_constraints(lhs <= rhs, name='chplink-backpressure')
|
||||
lhs = (
|
||||
p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values)
|
||||
- p.loc[:, electric] * n.links.efficiency[electric]
|
||||
)
|
||||
n.model.add_constraints(lhs <= rhs, name="chplink-backpressure")
|
||||
|
||||
|
||||
def add_pipe_retrofit_constraint(n):
|
||||
"""Add constraint for retrofitting existing CH4 pipelines to H2 pipelines."""
|
||||
"""
|
||||
Add constraint for retrofitting existing CH4 pipelines to H2 pipelines.
|
||||
"""
|
||||
gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index
|
||||
h2_retrofitted_i = n.links.query("carrier == 'H2 pipeline retrofitted' and p_nom_extendable").index
|
||||
h2_retrofitted_i = n.links.query(
|
||||
"carrier == 'H2 pipeline retrofitted' and p_nom_extendable"
|
||||
).index
|
||||
|
||||
if h2_retrofitted_i.empty or gas_pipes_i.empty:
|
||||
return
|
||||
@ -200,7 +258,7 @@ def add_pipe_retrofit_constraint(n):
|
||||
lhs = p_nom.loc[gas_pipes_i] + CH4_per_H2 * p_nom.loc[h2_retrofitted_i]
|
||||
rhs = n.links.p_nom[gas_pipes_i].rename_axis("Link-ext")
|
||||
|
||||
n.model.add_constraints(lhs == rhs, name='Link-pipe_retrofit')
|
||||
n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit")
|
||||
|
||||
|
||||
def extra_functionality(n, snapshots):
|
||||
@ -209,9 +267,11 @@ def extra_functionality(n, snapshots):
|
||||
|
||||
|
||||
def solve_network(n, config, opts="", **kwargs):
|
||||
set_of_options = config['solving']['solver']['options']
|
||||
solver_options = config['solving']["solver_options"][set_of_options] if set_of_options else {}
|
||||
solver_name = config['solving']['solver']['name']
|
||||
set_of_options = config["solving"]["solver"]["options"]
|
||||
solver_options = (
|
||||
config["solving"]["solver_options"][set_of_options] if set_of_options else {}
|
||||
)
|
||||
solver_name = config["solving"]["solver"]["name"]
|
||||
cf_solving = config["solving"]["options"]
|
||||
track_iterations = cf_solving.get("track_iterations", False)
|
||||
min_iterations = cf_solving.get("min_iterations", 4)
|
||||
@ -245,46 +305,52 @@ def solve_network(n, config, opts="", **kwargs):
|
||||
)
|
||||
|
||||
if status != "ok":
|
||||
logger.warning(f"Solving status '{status}' with termination condition '{condition}'")
|
||||
logger.warning(
|
||||
f"Solving status '{status}' with termination condition '{condition}'"
|
||||
)
|
||||
|
||||
return n
|
||||
|
||||
|
||||
# %%
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from helper import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
'solve_network_myopic',
|
||||
simpl='',
|
||||
"solve_network_myopic",
|
||||
simpl="",
|
||||
opts="",
|
||||
clusters="45",
|
||||
lv=1.0,
|
||||
sector_opts='8760H-T-H-B-I-A-solar+p3-dist1',
|
||||
sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
|
||||
planning_horizons="2020",
|
||||
)
|
||||
|
||||
logging.basicConfig(filename=snakemake.log.python,
|
||||
level=snakemake.config['logging_level'])
|
||||
logging.basicConfig(
|
||||
filename=snakemake.log.python, level=snakemake.config["logging_level"]
|
||||
)
|
||||
|
||||
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
|
||||
|
||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
||||
tmpdir = snakemake.config["solving"].get("tmpdir")
|
||||
if tmpdir is not None:
|
||||
from pathlib import Path
|
||||
|
||||
Path(tmpdir).mkdir(parents=True, exist_ok=True)
|
||||
opts = snakemake.wildcards.sector_opts.split('-')
|
||||
solve_opts = snakemake.config['solving']['options']
|
||||
|
||||
fn = getattr(snakemake.log, 'memory', None)
|
||||
with memory_logger(filename=fn, interval=30.) as mem:
|
||||
opts = snakemake.wildcards.sector_opts.split("-")
|
||||
solve_opts = snakemake.config["solving"]["options"]
|
||||
|
||||
fn = getattr(snakemake.log, "memory", None)
|
||||
with memory_logger(filename=fn, interval=30.0) as mem:
|
||||
overrides = override_component_attrs(snakemake.input.overrides)
|
||||
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
|
||||
|
||||
n = prepare_network(n, solve_opts, config=snakemake.config)
|
||||
|
||||
n = solve_network(n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver)
|
||||
n = solve_network(
|
||||
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
|
||||
)
|
||||
|
||||
if "lv_limit" in n.global_constraints.index:
|
||||
n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"]
|
||||
|
@ -25,4 +25,3 @@ solving:
|
||||
name: cbc
|
||||
options: cbc-default
|
||||
mem: 4000
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user