From 13769f90af4500948b0376d57df4cceaa13e78b5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Mar 2023 08:27:45 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .github/workflows/ci.yaml | 106 +- .gitignore | 2 +- .syncignore-receive | 2 +- .syncignore-send | 2 +- LICENSE.txt | 2 +- config.default.yaml | 162 +- data/override_component_attrs/buses.csv | 2 +- data/override_component_attrs/loads.csv | 2 +- doc/conf.py | 201 +- doc/data.csv | 1 - doc/index.rst | 8 +- doc/installation.rst | 2 +- doc/limitations.rst | 2 +- doc/myopic.rst | 22 +- doc/spatial_resolution.rst | 6 +- doc/supply_demand.rst | 161 +- matplotlibrc | 2 +- scripts/add_brownfield.py | 108 +- scripts/add_existing_baseyear.py | 496 ++-- scripts/build_ammonia_production.py | 26 +- scripts/build_biomass_potentials.py | 90 +- scripts/build_biomass_transport_costs.py | 30 +- scripts/build_clustered_population_layouts.py | 35 +- scripts/build_cop_profiles.py | 28 +- scripts/build_energy_totals.py | 214 +- scripts/build_gas_input_locations.py | 76 +- scripts/build_gas_network.py | 78 +- scripts/build_heat_demand.py | 38 +- scripts/build_industrial_distribution_key.py | 81 +- ...ustrial_energy_demand_per_country_today.py | 192 +- ...build_industrial_energy_demand_per_node.py | 26 +- ...industrial_energy_demand_per_node_today.py | 69 +- ...build_industrial_production_per_country.py | 290 ++- ...ustrial_production_per_country_tomorrow.py | 47 +- .../build_industrial_production_per_node.py | 70 +- scripts/build_industry_sector_ratios.py | 21 +- scripts/build_population_layouts.py | 58 +- ...build_population_weighted_energy_totals.py | 16 +- scripts/build_retro_cost.py | 869 ++++--- scripts/build_salt_cavern_potentials.py | 39 +- scripts/build_sequestration_potentials.py | 27 +- scripts/build_shipping_demand.py | 44 +- scripts/build_solar_thermal_profiles.py | 47 +- scripts/build_temperature_profiles.py | 46 +- scripts/build_transport_demand.py | 35 +- scripts/cluster_gas_network.py | 80 +- scripts/copy_config.py | 21 +- scripts/helper.py | 64 +- scripts/make_summary.py | 451 ++-- scripts/plot_network.py | 524 ++-- scripts/plot_summary.py | 471 ++-- scripts/prepare_sector_network.py | 2107 ++++++++++------- scripts/retrieve_gas_infrastructure_data.py | 17 +- scripts/retrieve_sector_databundle.py | 7 +- scripts/solve_network.py | 258 +- test/config.myopic.yaml | 13 +- test/config.overnight.yaml | 8 +- 57 files changed, 4679 insertions(+), 3223 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0b3c2d4f..12314f94 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -16,7 +16,7 @@ on: branches: - master schedule: - - cron: "0 5 * * TUE" + - cron: "0 5 * * TUE" env: CONDA_CACHE_NUMBER: 1 # Change this value to manually reset the environment cache @@ -29,9 +29,9 @@ jobs: matrix: include: # Matrix required to handle caching with Mambaforge - - os: ubuntu-latest - label: ubuntu-latest - prefix: /usr/share/miniconda3/envs/pypsa-eur + - os: ubuntu-latest + label: ubuntu-latest + prefix: /usr/share/miniconda3/envs/pypsa-eur # - os: macos-latest # label: macos-latest @@ -44,63 +44,63 @@ jobs: name: ${{ matrix.label }} runs-on: ${{ matrix.os }} - + defaults: run: shell: bash -l {0} - + steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v2 - - name: Clone pypsa-eur subworkflow - run: | - git clone https://github.com/pypsa/pypsa-eur ../pypsa-eur - cp ../pypsa-eur/test/config.test1.yaml ../pypsa-eur/config.yaml - - - name: Setup secrets - run: | - echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc + - name: Clone pypsa-eur subworkflow + run: | + git clone https://github.com/pypsa/pypsa-eur ../pypsa-eur + cp ../pypsa-eur/test/config.test1.yaml ../pypsa-eur/config.yaml - - name: Add solver to environment - run: | - echo -e "- coincbc\n- ipopt<3.13.3" >> ../pypsa-eur/envs/environment.yaml + - name: Setup secrets + run: | + echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc - - name: Setup Mambaforge - uses: conda-incubator/setup-miniconda@v2 - with: - miniforge-variant: Mambaforge - miniforge-version: latest - activate-environment: pypsa-eur - use-mamba: true - - - name: Set cache dates - run: | - echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV - echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV + - name: Add solver to environment + run: | + echo -e "- coincbc\n- ipopt<3.13.3" >> ../pypsa-eur/envs/environment.yaml - - name: Cache data and cutouts folders - uses: actions/cache@v3 - with: - path: | - data - ../pypsa-eur/cutouts - ../pypsa-eur/data - key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }} + - name: Setup Mambaforge + uses: conda-incubator/setup-miniconda@v2 + with: + miniforge-variant: Mambaforge + miniforge-version: latest + activate-environment: pypsa-eur + use-mamba: true - - name: Create environment cache - uses: actions/cache@v2 - id: cache - with: - path: ${{ matrix.prefix }} - key: ${{ matrix.label }}-conda-${{ env.DATE }}-${{ env.CONDA_CACHE_NUMBER }} + - name: Set cache dates + run: | + echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV + echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV - - name: Update environment due to outdated or unavailable cache - run: mamba env update -n pypsa-eur -f ../pypsa-eur/envs/environment.yaml - if: steps.cache.outputs.cache-hit != 'true' + - name: Cache data and cutouts folders + uses: actions/cache@v3 + with: + path: | + data + ../pypsa-eur/cutouts + ../pypsa-eur/data + key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }} - - name: Test snakemake workflow - run: | - conda activate pypsa-eur - conda list - snakemake -call --configfile test/config.overnight.yaml - snakemake -call --configfile test/config.myopic.yaml + - name: Create environment cache + uses: actions/cache@v2 + id: cache + with: + path: ${{ matrix.prefix }} + key: ${{ matrix.label }}-conda-${{ env.DATE }}-${{ env.CONDA_CACHE_NUMBER }} + + - name: Update environment due to outdated or unavailable cache + run: mamba env update -n pypsa-eur -f ../pypsa-eur/envs/environment.yaml + if: steps.cache.outputs.cache-hit != 'true' + + - name: Test snakemake workflow + run: | + conda activate pypsa-eur + conda list + snakemake -call --configfile test/config.overnight.yaml + snakemake -call --configfile test/config.myopic.yaml diff --git a/.gitignore b/.gitignore index 9d1a417e..44ce82ce 100644 --- a/.gitignore +++ b/.gitignore @@ -58,4 +58,4 @@ doc/_build *.ipynb -data/costs_* \ No newline at end of file +data/costs_* diff --git a/.syncignore-receive b/.syncignore-receive index 3ebcbea8..d436afd9 100644 --- a/.syncignore-receive +++ b/.syncignore-receive @@ -11,4 +11,4 @@ __pycache__ data notebooks benchmarks -*.nc \ No newline at end of file +*.nc diff --git a/.syncignore-send b/.syncignore-send index 38f4b664..09915109 100644 --- a/.syncignore-send +++ b/.syncignore-send @@ -11,4 +11,4 @@ __pycache__ notebooks benchmarks resources -results \ No newline at end of file +results diff --git a/LICENSE.txt b/LICENSE.txt index 87f6d959..073e24e9 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -17,4 +17,4 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/config.default.yaml b/config.default.yaml index 5b67552f..bf7a98a3 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -13,17 +13,17 @@ foresight: overnight # options are overnight, myopic, perfect (perfect is not ye scenario: simpl: # only relevant for PyPSA-Eur - - '' + - '' lv: # allowed transmission line volume expansion, can be any float >= 1.0 (today) or "opt" - - 1.0 - - 1.5 + - 1.0 + - 1.5 clusters: # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred - - 45 - - 50 + - 45 + - 50 opts: # only relevant for PyPSA-Eur - - '' + - '' sector_opts: # this is where the main scenario settings are - - Co2L0-3H-T-H-B-I-A-solar+p3-dist1 + - Co2L0-3H-T-H-B-I-A-solar+p3-dist1 # to really understand the options here, look in scripts/prepare_sector_network.py # Co2Lx specifies the CO2 target in x% of the 1990 values; default will give default (5%); # Co2L0p25 will give 25% CO2 emissions; Co2Lm0p05 will give 5% negative emissions @@ -41,7 +41,7 @@ scenario: # cb40ex0 distributes a carbon budget of 40 GtCO2 following an exponential # decay with initial growth rate 0 planning_horizons: # investment years for myopic and perfect; for overnight, year of cost assumptions can be different and is defined under 'costs' - - 2050 + - 2050 # for example, set to # - 2020 # - 2030 @@ -84,18 +84,18 @@ electricity: # in PyPSA-Eur-Sec pypsa_eur: Bus: - - AC + - AC Link: - - DC + - DC Generator: - - onwind - - offwind-ac - - offwind-dc - - solar - - ror + - onwind + - offwind-ac + - offwind-dc + - solar + - ror StorageUnit: - - PHS - - hydro + - PHS + - hydro Store: [] @@ -110,25 +110,25 @@ biomass: scenario: ENS_Med classes: solid biomass: - - Agricultural waste - - Fuelwood residues - - Secondary Forestry residues - woodchips - - Sawdust - - Residues from landscape care - - Municipal waste + - Agricultural waste + - Fuelwood residues + - Secondary Forestry residues - woodchips + - Sawdust + - Residues from landscape care + - Municipal waste not included: - - Sugar from sugar beet - - Rape seed - - "Sunflower, soya seed " - - Bioethanol barley, wheat, grain maize, oats, other cereals and rye - - Miscanthus, switchgrass, RCG - - Willow - - Poplar - - FuelwoodRW - - C&P_RW + - Sugar from sugar beet + - Rape seed + - "Sunflower, soya seed " + - Bioethanol barley, wheat, grain maize, oats, other cereals and rye + - Miscanthus, switchgrass, RCG + - Willow + - Poplar + - FuelwoodRW + - C&P_RW biogas: - - Manure solid, liquid - - Sludge + - Manure solid, liquid + - Sludge solar_thermal: @@ -143,10 +143,10 @@ existing_capacities: grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020 threshold_capacity: 10 conventional_carriers: - - lignite - - coal - - oil - - uranium + - lignite + - coal + - oil + - uranium sector: @@ -238,7 +238,7 @@ sector: 2040: 0.16 2045: 0.21 2050: 0.29 - retrofitting : # co-optimises building renovation to reduce space heat demand + retrofitting: # co-optimises building renovation to reduce space heat demand retro_endogen: false # co-optimise space heat savings cost_factor: 1.0 # weight costs for building renovation interest_rate: 0.04 # for investment in building components @@ -279,7 +279,7 @@ sector: hydrogen_underground_storage: true hydrogen_underground_storage_locations: # - onshore # more than 50 km from sea - - nearshore # within 50 km of sea + - nearshore # within 50 km of sea # - offshore ammonia: false # can be false (no NH3 carrier), true (copperplated NH3), "regional" (regionalised NH3 without network) min_part_load_fischer_tropsch: 0.9 # p_min_pu @@ -401,14 +401,14 @@ solving: min_iterations: 4 max_iterations: 6 keep_shadowprices: - - Bus - - Line - - Link - - Transformer - - GlobalConstraint - - Generator - - Store - - StorageUnit + - Bus + - Line + - Link + - Transformer + - GlobalConstraint + - Generator + - Store + - StorageUnit solver: name: gurobi @@ -486,47 +486,47 @@ plotting: energy_min: -20000 energy_threshold: 50 vre_techs: - - onwind - - offwind-ac - - offwind-dc - - solar - - ror + - onwind + - offwind-ac + - offwind-dc + - solar + - ror renewable_storage_techs: - - PHS - - hydro + - PHS + - hydro conv_techs: - - OCGT - - CCGT - - Nuclear - - Coal + - OCGT + - CCGT + - Nuclear + - Coal storage_techs: - - hydro+PHS - - battery - - H2 + - hydro+PHS + - battery + - H2 load_carriers: - - AC load + - AC load AC_carriers: - - AC line - - AC transformer + - AC line + - AC transformer link_carriers: - - DC line - - Converter AC-DC + - DC line + - Converter AC-DC heat_links: - - heat pump - - resistive heater - - CHP heat - - CHP electric - - gas boiler - - central heat pump - - central resistive heater - - central CHP heat - - central CHP electric - - central gas boiler + - heat pump + - resistive heater + - CHP heat + - CHP electric + - gas boiler + - central heat pump + - central resistive heater + - central CHP heat + - central CHP electric + - central gas boiler heat_generators: - - gas boiler - - central gas boiler - - solar thermal collector - - central solar thermal collector + - gas boiler + - central gas boiler + - solar thermal collector + - central solar thermal collector tech_colors: # wind onwind: "#235ebc" diff --git a/data/override_component_attrs/buses.csv b/data/override_component_attrs/buses.csv index 95e276f9..7581e328 100644 --- a/data/override_component_attrs/buses.csv +++ b/data/override_component_attrs/buses.csv @@ -1,3 +1,3 @@ attribute,type,unit,default,description,status location,string,n/a,n/a,Reference to original electricity bus,Input (optional) -unit,string,n/a,MWh,Unit of the bus (descriptive only), Input (optional) \ No newline at end of file +unit,string,n/a,MWh,Unit of the bus (descriptive only), Input (optional) diff --git a/data/override_component_attrs/loads.csv b/data/override_component_attrs/loads.csv index 16290e7c..10bb5b4f 100644 --- a/data/override_component_attrs/loads.csv +++ b/data/override_component_attrs/loads.csv @@ -1,2 +1,2 @@ attribute,type,unit,default,description,status -carrier,string,n/a,n/a,carrier,Input (optional) \ No newline at end of file +carrier,string,n/a,n/a,carrier,Input (optional) diff --git a/doc/conf.py b/doc/conf.py index 85ceb66e..1e603b78 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -12,19 +12,19 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os import shlex +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../scripts')) +sys.path.insert(0, os.path.abspath("../scripts")) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -32,48 +32,48 @@ sys.path.insert(0, os.path.abspath('../scripts')) extensions = [ #'sphinx.ext.autodoc', #'sphinx.ext.autosummary', - 'sphinx.ext.autosectionlabel', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.mathjax', - 'sphinx.ext.napoleon', - 'sphinx.ext.graphviz', + "sphinx.ext.autosectionlabel", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx.ext.graphviz", #'sphinx.ext.pngmath', #'sphinxcontrib.tikz', #'rinoh.frontend.sphinx', - 'sphinx.ext.imgconverter', # for SVG conversion + "sphinx.ext.imgconverter", # for SVG conversion ] -autodoc_default_flags = ['members'] +autodoc_default_flags = ["members"] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'PyPSA-Eur-Sec' -copyright = u'2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)' -author = u'2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)' +project = "PyPSA-Eur-Sec" +copyright = "2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)" +author = "2019-2023 Tom Brown (KIT, TUB), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB), Fabian Neumann (TUB)" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'0.7' +version = "0.7" # The full version, including alpha/beta/rc tags. -release = u'0.7.0' +release = "0.7.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -84,37 +84,37 @@ language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -124,174 +124,177 @@ todo_include_todos = True # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { - 'display_version': True, - 'sticky_navigation': True, + "display_version": True, + "sticky_navigation": True, } # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] html_context = { - 'css_files': [ - '_static/theme_overrides.css', # override wide tables in RTD theme - ], + "css_files": [ + "_static/theme_overrides.css", # override wide tables in RTD theme + ], } # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'PyPSAEurSecdoc' +htmlhelp_basename = "PyPSAEurSecdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'PyPSA-Eur-Sec.tex', u'PyPSA-Eur-Sec Documentation', - u'author', 'manual'), + ( + master_doc, + "PyPSA-Eur-Sec.tex", + "PyPSA-Eur-Sec Documentation", + "author", + "manual", + ), ] -#Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html -rinoh_documents = [(master_doc, # top-level file (index.rst) - 'PyPSA-Eur-Sec', # output (target.pdf) - 'PyPSA-Eur-Sec Documentation', # document title - 'author')] # document author +# Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html +rinoh_documents = [ + ( + master_doc, # top-level file (index.rst) + "PyPSA-Eur-Sec", # output (target.pdf) + "PyPSA-Eur-Sec Documentation", # document title + "author", + ) +] # document author # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pypsa-eur-sec', u'PyPSA-Eur-Sec Documentation', - [author], 1) -] +man_pages = [(master_doc, "pypsa-eur-sec", "PyPSA-Eur-Sec Documentation", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -300,23 +303,29 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'PyPSA-Eur-Sec', u'PyPSA-Eur-Sec Documentation', - author, 'PyPSA-Eur-Sec', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "PyPSA-Eur-Sec", + "PyPSA-Eur-Sec Documentation", + author, + "PyPSA-Eur-Sec", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = {"https://docs.python.org/": None} diff --git a/doc/data.csv b/doc/data.csv index 012e5be0..0efed84d 100644 --- a/doc/data.csv +++ b/doc/data.csv @@ -27,4 +27,3 @@ Building topologies and corresponding standard values,tabula-calculator-calcsetb Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unknown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/ District heating most countries,jrc-idees-2015/,CC BY 4.0,https://ec.europa.eu/jrc/en/potencia/jrc-idees,, District heating missing countries,district_heat_share.csv,unknown,https://www.euroheat.org/knowledge-hub/country-profiles,, - diff --git a/doc/index.rst b/doc/index.rst index e6fdd415..46f8bd3e 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -21,9 +21,9 @@ transmission network level that covers the full ENTSO-E area. PyPSA-Eur-Sec builds on the electricity generation and transmission model `PyPSA-Eur `_ to add demand and supply for the following sectors: transport, space and water -heating, biomass, energy consumption in the agriculture, industry -and industrial feedstocks, carbon management, carbon capture and usage/sequestration. -This completes the energy system and includes all greenhouse gas emitters except waste management, agriculture, +heating, biomass, energy consumption in the agriculture, industry +and industrial feedstocks, carbon management, carbon capture and usage/sequestration. +This completes the energy system and includes all greenhouse gas emitters except waste management, agriculture, forestry and land use. @@ -37,7 +37,7 @@ patchy. We cannot support this model if you choose to use it. .. note:: - You can find showcases of the model's capabilities in the Supplementary Materials of the + You can find showcases of the model's capabilities in the Supplementary Materials of the preprint `Benefits of a Hydrogen Network in Europe `_, the Supplementary Materials of the `paper in Joule with a description of the industry sector diff --git a/doc/installation.rst b/doc/installation.rst index 029f06ee..133020e6 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -82,7 +82,7 @@ The data licences and sources are given in the following table. Set up the default configuration ================================ -First make your own copy of the ``config.yaml`` based on +First make your own copy of the ``config.yaml`` based on ``config.default.yaml``. For example: .. code:: bash diff --git a/doc/limitations.rst b/doc/limitations.rst index f54813e6..00d34002 100644 --- a/doc/limitations.rst +++ b/doc/limitations.rst @@ -36,7 +36,7 @@ See also the `GitHub repository issues `_ in ``config.yaml``. The carbon budget can be split among the ``planning_horizons`` following an exponential or beta decay. +The total carbon budget for the entire transition path can be indicated in the `sector_opts `_ in ``config.yaml``. The carbon budget can be split among the ``planning_horizons`` following an exponential or beta decay. E.g. ``'cb40ex0'`` splits a carbon budget equal to 40 Gt :math:`_{CO_2}` following an exponential decay whose initial linear growth rate r is zero. They can also follow some user-specified path, if defined `here `_. The paper `Speed of technological transformations required in Europe to achieve different climate goals (2022) `__ defines CO_2 budgets corresponding to global temperature increases (1.5C – 2C) as response to the emissions. Here, global carbon budgets are converted to European budgets assuming equal-per capita distribution which translates into a 6.43% share for Europe. The carbon budgets are in this paper distributed throughout the transition paths assuming an exponential decay. Emissions e(t) in every year t are limited by @@ -99,7 +99,7 @@ General myopic code structure The myopic code solves the network for the time steps included in ``planning_horizons`` in a recursive loop, so that: -1. The existing capacities (those installed before the base year are added as fixed capacities with p_nom=value, p_nom_extendable=False). E.g. for baseyear=2020, capacities installed before 2020 are added. In addition, the network comprises additional generator, storage, and link capacities with p_nom_extendable=True. The non-solved network is saved in ``results/run_name/networks/prenetworks-brownfield``. +1. The existing capacities (those installed before the base year are added as fixed capacities with p_nom=value, p_nom_extendable=False). E.g. for baseyear=2020, capacities installed before 2020 are added. In addition, the network comprises additional generator, storage, and link capacities with p_nom_extendable=True. The non-solved network is saved in ``results/run_name/networks/prenetworks-brownfield``. The base year is the first element in ``planning_horizons``. Step 1 is implemented with the rule add_baseyear for the base year and with the rule add_brownfield for the remaining planning_horizons. 2. The 2020 network is optimized. The solved network is saved in ``results/run_name/networks/postnetworks`` diff --git a/doc/spatial_resolution.rst b/doc/spatial_resolution.rst index 31cc8378..6b29b928 100644 --- a/doc/spatial_resolution.rst +++ b/doc/spatial_resolution.rst @@ -10,7 +10,7 @@ The total number of nodes for Europe is set in the ``config.yaml`` file under `` Exemplary unsolved network clustered to 512 nodes: -.. image:: ../graphics/elec_s_512.png +.. image:: ../graphics/elec_s_512.png Exemplary unsolved network clustered to 37 nodes: @@ -38,12 +38,12 @@ Here are some examples of how spatial resolution is set for different sectors in • Solid biomass: It can be modeled as a single node for Europe or it can be nodally resolved if activated in the `config `_. Nodal modeling includes modeling biomass potential per country (given per country, then distributed by population density within) and the transport of solid biomass between countries. -• CO2: It can be modeled as a single node for Europe or it can be nodally resolved with CO2 transport pipelines if activated in the `config `_. It should mentioned that in single node mode a transport and storage cost is added for sequestered CO2, the cost of which can be adjusted in the `config `_. +• CO2: It can be modeled as a single node for Europe or it can be nodally resolved with CO2 transport pipelines if activated in the `config `_. It should mentioned that in single node mode a transport and storage cost is added for sequestered CO2, the cost of which can be adjusted in the `config `_. • Liquid hydrocarbons: Modeled as a single node for Europe, since transport costs for liquids are low and no bottlenecks are expected. **Electricity distribution network** -Contrary to the transmission grid, the grid topology at the distribution level (at and below 110 kV) is not included due to the very high computational burden. However, a link per node can be used (if activated in the `Config `_ file) to represent energy transferred between distribution and transmission levels at every node. In essence, the total energy capacity connecting the transmission grid and the low-voltage level is optimized. The cost assumptions for this link can be adjusted in Config file `options `_ , and is currently assumed to be 500 Eur/kW. +Contrary to the transmission grid, the grid topology at the distribution level (at and below 110 kV) is not included due to the very high computational burden. However, a link per node can be used (if activated in the `Config `_ file) to represent energy transferred between distribution and transmission levels at every node. In essence, the total energy capacity connecting the transmission grid and the low-voltage level is optimized. The cost assumptions for this link can be adjusted in Config file `options `_ , and is currently assumed to be 500 Eur/kW. Rooftop PV, heat pumps, resistive heater, home batteries chargers for passenger EVs, as well as individual heating technologies (heat pumps and resistive heaters) are connected to low-voltage level. All the remaining generation and storage technologies are connected to the transmission grid. In practice, this means that the distribution grid capacity is only extended if it is necessary to balance the mismatch between local generation and demand. diff --git a/doc/supply_demand.rst b/doc/supply_demand.rst index 76a201a3..7bec6bf3 100644 --- a/doc/supply_demand.rst +++ b/doc/supply_demand.rst @@ -40,7 +40,7 @@ Heat demand =========== Building heating in residential and services sectors is resolved regionally, both for individual buildings and district heating systems, which include different supply options (see :ref:`heat-supply`.) -Annual heat demands per country are retrieved from `JRC-IDEES `_ and split into space and water heating. For space heating, the annual demands are converted to daily values based on the population-weighted Heating Degree Day (HDD) using the `atlite tool `_, where space heat demand is proportional to the difference between the daily average ambient temperature (read from `ERA5 `_) and a threshold temperature above which space heat demand is zero. A threshold temperature of 15 °C is assumed by default. The daily space heat demand is distributed to the hours of the day following heat demand profiles from `BDEW `_. These differ for weekdays and weekends/holidays and between residential and services demand. +Annual heat demands per country are retrieved from `JRC-IDEES `_ and split into space and water heating. For space heating, the annual demands are converted to daily values based on the population-weighted Heating Degree Day (HDD) using the `atlite tool `_, where space heat demand is proportional to the difference between the daily average ambient temperature (read from `ERA5 `_) and a threshold temperature above which space heat demand is zero. A threshold temperature of 15 °C is assumed by default. The daily space heat demand is distributed to the hours of the day following heat demand profiles from `BDEW `_. These differ for weekdays and weekends/holidays and between residential and services demand. *Space heating* @@ -62,7 +62,7 @@ Hot water demand is assumed to be constant throughout the year. *Urban and rural heating* -For every country, heat demand is split between low and high population density areas. These country-level totals are then distributed to each region in proportion to their rural and urban populations respectively. Urban areas with dense heat demand can be supplied with large-scale district heating systems. The percentage of urban heat demand that can be supplied by district heating networks as well as lump-sum losses in district heating systems is exogenously determined in the `config file `_. +For every country, heat demand is split between low and high population density areas. These country-level totals are then distributed to each region in proportion to their rural and urban populations respectively. Urban areas with dense heat demand can be supplied with large-scale district heating systems. The percentage of urban heat demand that can be supplied by district heating networks as well as lump-sum losses in district heating systems is exogenously determined in the `config file `_. *Cooling demand* @@ -74,21 +74,21 @@ As below figure shows, the current total heat demand in Europe is similar to the .. image:: ../graphics/Heat_and_el_demand_timeseries.png -In practice, in PyPSA-Eur-Sec, there are heat demand buses to which the corresponding heat demands are added. +In practice, in PyPSA-Eur-Sec, there are heat demand buses to which the corresponding heat demands are added. 1) Urban central heat: large-scale district heating networks in urban areas with dense heat population. Residential and services demand in these areas are added as demands to this bus 2) Residential urban decentral heat: heating for residential buildings in urban areas not using district heating 3) Services urban decentral heat: heating for services buildings in urban areas not using district heating 4) Residential rural heat: heating for residential buildings in rural areas with low population density. -5) Services rural heat: heating for residential services buildings in rural areas with low population density. Heat demand from agriculture sector is also included here. +5) Services rural heat: heating for residential services buildings in rural areas with low population density. Heat demand from agriculture sector is also included here. .. _heat-supply: Heat supply ======================= -Different supply options are available depending on whether demand is met centrally through district heating systems, or decentrally through appliances in individual buildings. +Different supply options are available depending on whether demand is met centrally through district heating systems, or decentrally through appliances in individual buildings. **Urban central heat** @@ -106,7 +106,7 @@ Below are more detailed explanations for each heating supply component, all of w **Large-scale CHP** Large Combined Heat and Power plants are included in the model if it is specified in the `config file `_. - + CHPs are based on back pressure plants operating with a fixed ratio of electricity to heat output. The efficiencies of each are given on the back pressure line, where the back pressure coefficient cb is the electricity output divided by the heat output. (For a more complete explanation of the operation of CHPs refer to the study by Dahl et al. : `Cost sensitivity of optimal sector-coupled district heating production systems `_. PyPSA-Eur-Sec includes CHP plants fueled by methane and solid biomass from waste and residues. Hydrogen fuel cells also produce both electricity and heat. @@ -129,37 +129,37 @@ The coefficient of performance (COP) of air- and ground-sourced heat pumps depen For the sink water temperature Tsink we assume 55 °C [`Config `_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 `_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. `_. For air-sourced heat pumps (ASHP), we use the function: .. math:: - COP (\Delta T) = 6.81 + 0.121\Delta T + 0.000630\Delta T^2 + COP (\Delta T) = 6.81 + 0.121\Delta T + 0.000630\Delta T^2 for ground-sourced heat pumps (GSHP), we use the function: .. math:: - COP(\Delta T) = 8.77 + 0.150\Delta T + 0.000734\Delta T^2 + COP(\Delta T) = 8.77 + 0.150\Delta T + 0.000734\Delta T^2 **Resistive heaters** Can be activated in Config from the `boilers `_ option. -Resistive heaters produce heat with a fixed conversion efficiency (refer to `Technology-data repository `_ ). +Resistive heaters produce heat with a fixed conversion efficiency (refer to `Technology-data repository `_ ). **Gas, oil, and biomass boilers** Can be activated in Config from the `boilers `_ , `oil boilers `_ , and `biomass boiler `_ option. Similar to resistive heaters, boilers have a fixed efficiency and produce heat using gas, oil or biomass. -**Solar thermal collectors** +**Solar thermal collectors** Can be activated in the config file from the `solar_thermal `_ option. Solar thermal profiles are built based on weather data and also have the `options `_ for setting the sky model and the orientation of the panel in the config file, which are then used by the atlite tool to calculate the solar resource time series. **Waste heat from Fuel Cells, Methanation and Fischer-Tropsch plants** -Waste heat from `fuel cells `_ in addition to processes like `Fischer-Tropsch `_, methanation, and Direct Air Capture (DAC) is dumped into district heating networks. +Waste heat from `fuel cells `_ in addition to processes like `Fischer-Tropsch `_, methanation, and Direct Air Capture (DAC) is dumped into district heating networks. **Existing heating capacities and decommissioning** For the myopic transition paths, capacities already existing for technologies supplying heat are retrieved from `“Mapping and analyses of the current and future (2020 - 2030)” `_ . For the sake of simplicity, coal, oil and gas boiler capacities are assimilated to gas boilers. Besides that, existing capacities for heat resistors, air-sourced and ground-sourced heat pumps are included in the model. For heating capacities, 25% of existing capacities in 2015 are assumed to be decommissioned in every 5-year time step after 2020. -**Thermal Energy Storage** +**Thermal Energy Storage** Activated in Config from the `tes `_ option. @@ -195,7 +195,7 @@ Further information are given in the study by Zeyen et al. : `Mitigating heat de Hydrogen demand ============================= -Hydrogen is consumed in the industry sector (see :ref:`Industry demand`) to produce ammonia (see :ref:`Chemicals Industry`) and direct reduced iron (DRI) (see :ref:`Iron and Steel`). Hydrogen is also consumed to produce synthetic methane (see :ref:`Methane supply`) and liquid hydrocarbons (see :ref:`Oil-based products supply`) which have multiple uses in industry and other sectors. +Hydrogen is consumed in the industry sector (see :ref:`Industry demand`) to produce ammonia (see :ref:`Chemicals Industry`) and direct reduced iron (DRI) (see :ref:`Iron and Steel`). Hydrogen is also consumed to produce synthetic methane (see :ref:`Methane supply`) and liquid hydrocarbons (see :ref:`Oil-based products supply`) which have multiple uses in industry and other sectors. Hydrogen is also used for transport applications (see :ref:`Transportation`), where it is exogenously fixed. It is used in `heavy-duty land transport `_ and as liquified hydrogen in the shipping sector (see :ref:`Shipping`). Furthermore, stationary fuel cells may re-electrify hydrogen (with waste heat as a byproduct) to balance renewable fluctuations (see :ref:`Electricity supply and demand`). The waste heat from the stationary fuel cells can be used in `district-heating systems `_. .. _Hydrogen supply: @@ -205,7 +205,7 @@ Hydrogen supply Today, most of the :math:`H_2` consumed globally is produced from natural gas by steam methane reforming (SMR) -.. math:: +.. math:: CH_4 + H_2O \xrightarrow{} CO + 3H_2 @@ -216,12 +216,12 @@ combined with a water-gas shift reaction CO + H_2O \xrightarrow{} CO_2 + H_2 -SMR is included `here `_. +SMR is included `here `_. PyPSA-Eur-Sec allows this route of :math:`H_2` production with and without [carbon capture (CC)] (see :ref:`Carbon dioxide capture, usage and sequestration (CCU/S)`). These routes are often referred to as blue and grey hydrogen. Here, methane input can be both of fossil or synthetic origin. Green hydrogen can be produced by electrolysis to split water into hydrogen and oxygen -.. math:: +.. math:: 2H_2O \xrightarrow{} 2H_2 + O_2 @@ -239,32 +239,32 @@ Hydrogen can be stored in overground steel tanks or `underground salt caverns `_. If gas infrastructure is regionally resolved, fossil gas can enter the system only at existing and planned LNG terminals, pipeline entry-points, and intra- European gas extraction sites, which are retrieved from the SciGRID Gas IGGIELGN dataset and the GEM Wiki. -Biogas can be upgraded to methane. -Synthetic methane can be produced by processing hydrogen and captures :math:`CO_2` in the Sabatier reaction +In addition to methane from fossil origins, the model also considers biogenic and synthetic sources. `The gas network can either be modelled, or it can be assumed that gas transport is not limited `_. If gas infrastructure is regionally resolved, fossil gas can enter the system only at existing and planned LNG terminals, pipeline entry-points, and intra- European gas extraction sites, which are retrieved from the SciGRID Gas IGGIELGN dataset and the GEM Wiki. +Biogas can be upgraded to methane. +Synthetic methane can be produced by processing hydrogen and captures :math:`CO_2` in the Sabatier reaction -.. math:: +.. math:: CO_2 + 4H_2 \xrightarrow{} CH_4 + 2H_2O Direct power-to-methane conversion with efficient heat integration developed in the HELMETH project is also an option. The share of synthetic, biogenic and fossil methane is an optimisation result depending on the techno-economic assumptions. - + *Methane transport* - + The existing European gas transmission network is represented based on the SciGRID Gas IGGIELGN dataset. This dataset is based on compiled and merged data from the ENTSOG maps and other publicly available data sources. It includes data on the capacity, diameter, pressure, length, and directionality of pipelines. Missing capacity data is conservatively inferred from the pipe diameter following conversion factors derived from an EHB report. The gas network is clustered to the selected number of model regions. Gas pipelines can be endogenously expanded or repurposed for hydrogen transport. Gas flows are represented by a lossless transport model. Methane is assumed to be transmitted without cost or capacity constraints because future demand is predicted to be low compared to available transport capacities. -The following figure shows the unclustered European gas transmission network based on the SciGRID Gas IGGIELGN dataset. Pipelines are color-coded by estimated capacities. Markers indicate entry-points, sites of fossil resource extraction, and LNG terminals. - +The following figure shows the unclustered European gas transmission network based on the SciGRID Gas IGGIELGN dataset. Pipelines are color-coded by estimated capacities. Markers indicate entry-points, sites of fossil resource extraction, and LNG terminals. + .. image:: ../graphics/gas_pipeline_figure.png .. _Biomass supply: @@ -319,14 +319,14 @@ The model can only use biogas by first upgrading it to natural gas quality [see Oil-based products demand ======================== -Naphtha is used as a feedstock in the chemicals industry (see :ref:`Chemicals Industry`). Furthermore, kerosene is used as transport fuel in the aviation sector (see :ref:`Aviation`). Non-electrified agriculture machinery also consumes gasoline. -Land transport [(see :ref:`Land transport`) that is not electrified or converted into using :math:`H_2`-fuel cells also consumes oil-based products. While there is regional distribution of demand, the carrier is copperplated in the model, which means that transport costs and constraints are neglected. +Naphtha is used as a feedstock in the chemicals industry (see :ref:`Chemicals Industry`). Furthermore, kerosene is used as transport fuel in the aviation sector (see :ref:`Aviation`). Non-electrified agriculture machinery also consumes gasoline. +Land transport [(see :ref:`Land transport`) that is not electrified or converted into using :math:`H_2`-fuel cells also consumes oil-based products. While there is regional distribution of demand, the carrier is copperplated in the model, which means that transport costs and constraints are neglected. .. _Oil-based products supply: -Oil-based products supply -======================== -Oil-based products can be either of fossil origin or synthetically produced by combining :math:`H_2` (see :ref:`Hydrogen supply`) and captured :math:`CO_2` (see :ref:`Carbon dioxide capture, usage and sequestration (CCU/S)`) in Fischer-Tropsch plants +Oil-based products supply +======================== +Oil-based products can be either of fossil origin or synthetically produced by combining :math:`H_2` (see :ref:`Hydrogen supply`) and captured :math:`CO_2` (see :ref:`Carbon dioxide capture, usage and sequestration (CCU/S)`) in Fischer-Tropsch plants .. math:: 𝑛CO+(2𝑛+1)H_2 → C_{n}H_{2n + 2} +𝑛H_2O @@ -336,8 +336,8 @@ with costs as included from the `technology-data repository `_ and the fuel and process switching described in the subsequent sections. Second, the 2050 energy demands and process emissions are calculated using the per-unit-of-material ratios based on the industry transformations and the `country-level material production in 2015 `_, assuming constant material demand. -Missing or too coarsely aggregated data in the JRC-IDEES database is supplemented with additional datasets: `Eurostat energy balances `_, `United States `_, `Geological Survey `_ for ammonia production, `DECHEMA `_ for methanol and chlorine, and `national statistics from Switzerland `_. +Missing or too coarsely aggregated data in the JRC-IDEES database is supplemented with additional datasets: `Eurostat energy balances `_, `United States `_, `Geological Survey `_ for ammonia production, `DECHEMA `_ for methanol and chlorine, and `national statistics from Switzerland `_. -Where there are fossil and electrified alternatives for the same process (e.g. in glass manufacture or drying), we assume that the process is completely electrified. Current electricity demands (lighting, air compressors, motor drives, fans, pumps) will remain electric. Processes that require temperatures below 500 °C are supplied with solid biomass, since we assume that residues and wastes are not suitable for high-temperature applications. We see solid biomass use primarily in the pulp and paper industry, where it is already widespread, and in food, beverages and tobacco, where it replaces natural gas. Industries which require high temperatures (above 500 °C), such as metals, chemicals and non-metallic minerals are either electrified where suitable processes already exist, or the heat is provided with synthetic methane. +Where there are fossil and electrified alternatives for the same process (e.g. in glass manufacture or drying), we assume that the process is completely electrified. Current electricity demands (lighting, air compressors, motor drives, fans, pumps) will remain electric. Processes that require temperatures below 500 °C are supplied with solid biomass, since we assume that residues and wastes are not suitable for high-temperature applications. We see solid biomass use primarily in the pulp and paper industry, where it is already widespread, and in food, beverages and tobacco, where it replaces natural gas. Industries which require high temperatures (above 500 °C), such as metals, chemicals and non-metallic minerals are either electrified where suitable processes already exist, or the heat is provided with synthetic methane. -Hydrogen for high-temperature process heat is not part of the model currently. +Hydrogen for high-temperature process heat is not part of the model currently. -Where process heat is required, our approach depends on the necessary temperature. For example, due to the high share of high-temperature process heat demand (see `Naegler et al. `_ and `Rehfeldt el al. `_), we disregard geothermal and solar thermal energy as sources for process heat since they cannot attain high-temperature heat. +Where process heat is required, our approach depends on the necessary temperature. For example, due to the high share of high-temperature process heat demand (see `Naegler et al. `_ and `Rehfeldt el al. `_), we disregard geothermal and solar thermal energy as sources for process heat since they cannot attain high-temperature heat. -The following figure shows the final consumption of energy and non-energy feedstocks in industry today in comparison to the scenario in 2050 assumed in `Neumann et al `_. +The following figure shows the final consumption of energy and non-energy feedstocks in industry today in comparison to the scenario in 2050 assumed in `Neumann et al `_. .. image:: ../graphics/fec_industry_today_tomorrow.png The following figure shows the process emissions in industry today (top bar) and in 2050 without -carbon capture (bottom bar) assumed in `Neumann et al `_. +carbon capture (bottom bar) assumed in `Neumann et al `_. @@ -390,9 +390,9 @@ Inside each country the industrial demand is then distributed using the `Hotmaps **Iron and Steel** Two alternative routes are used today to manufacture steel in Europe. The primary route (integrated steelworks) represents 60% of steel production, while the secondary route (electric arc furnaces, EAF), represents the other 40% `(Lechtenböhmer et. al) `_. - + The primary route uses blast furnaces in which coke is used to reduce iron ore into molten iron, which is then converted into steel: - + .. math:: CO_2 + C \xrightarrow{} 2 CO @@ -408,13 +408,13 @@ The primary route uses blast furnaces in which coke is used to reduce iron ore i .. math:: FeO + CO \xrightarrow{} Fe + CO_2 - -The primary route of steelmaking implies large process emissions of 0.22 t :math:`_{CO_2}` /t of steel, amounting to 7% of global greenhouse gas emissions `(Vogl et. al) `_. - + +The primary route of steelmaking implies large process emissions of 0.22 t :math:`_{CO_2}` /t of steel, amounting to 7% of global greenhouse gas emissions `(Vogl et. al) `_. + In the secondary route, electric arc furnaces are used to melt scrap metal. This limits the :math:`CO_2` emissions to the burning of graphite electrodes `(Friedrichsen et. al) `_, and reduces process emissions to 0.03 t :math:`_{CO_2}` /t of steel. - + We assume that the primary route can be replaced by a third route in 2050, using direct reduced iron (DRI) and subsequent processing in an EAF. - + .. math:: 3 Fe_2O_3 + H_2 \xrightarrow{} 2 Fe_3O_4 + H_2O @@ -427,25 +427,25 @@ We assume that the primary route can be replaced by a third route in 2050, using FeO + H_2 \xrightarrow{} Fe + H_2O -This circumvents the process emissions associated with the use of coke. For hydrogen- based DRI, we assume energy requirements of 1.7 MWh :math:`_{H_2}` /t steel `(Vogl et. al) `_ and 0.322 MWh :math:`_{el}`/t steel `(HYBRIT 2016) `_. - - +This circumvents the process emissions associated with the use of coke. For hydrogen- based DRI, we assume energy requirements of 1.7 MWh :math:`_{H_2}` /t steel `(Vogl et. al) `_ and 0.322 MWh :math:`_{el}`/t steel `(HYBRIT 2016) `_. + + The share of steel produced via the primary route is exogenously set in the `config file `_. The share of steel obtained via hydrogen-based DRI plus EAF is also set exogenously in the `config file `_. The remaining share is manufactured through the secondary route using scrap metal in EAF. Bioenergy as alternative to coke in blast furnaces is not considered in the model (`Mandova et.al `_, `Suopajärvi et.al `_). - -For the remaining subprocesses in this sector, the following transformations are assumed. Methane is used as energy source for the smelting process. Activities associated with furnaces, refining and rolling, and product finishing are electrified assuming the current efficiency values for these cases. These transformations result in changes in process emissions as outlined in the process emissions figure presented in the industry overview section (see :ref:`Overview`). + +For the remaining subprocesses in this sector, the following transformations are assumed. Methane is used as energy source for the smelting process. Activities associated with furnaces, refining and rolling, and product finishing are electrified assuming the current efficiency values for these cases. These transformations result in changes in process emissions as outlined in the process emissions figure presented in the industry overview section (see :ref:`Overview`). .. _Chemicals Industry: **Chemicals Industry** The chemicals industry includes a wide range of diverse industries, including the production of basic organic compounds (olefins, alcohols, aromatics), basic inorganic compounds (ammonia, chlorine), polymers (plastics), and end-user products (cosmetics, pharmaceutics). - + The chemicals industry consumes large amounts of fossil-fuel based feedstocks (see `Levi et. al `_), which can also be produced from renewables as outlined for hydrogen (see :ref:`Hydrogen supply`), for methane (see :ref:`Methane supply`), and for oil-based products (see :ref:`Oil-based products supply`). The ratio between synthetic and fossil-based fuels used in the industry is an endogenous result of the optimisation. - + The basic chemicals consumption data from the `JRC IDEES `_ database comprises high- value chemicals (ethylene, propylene and BTX), chlorine, methanol and ammonia. However, it is necessary to separate out these chemicals because their current and future production routes are different. - + Statistics for the production of ammonia, which is commonly used as a fertilizer, are taken from the `USGS `_ for every country. Ammonia can be made from hydrogen and nitrogen using the Haber-Bosch process. - + .. math:: N_2 + 3H_2 \xrightarrow{} 2NH_3 @@ -454,32 +454,32 @@ Statistics for the production of ammonia, which is commonly used as a fertilizer The Haber-Bosch process is not explicitly represented in the model, such that demand for ammonia enters the model as a demand for hydrogen ( 6.5 MWh :math:`_{H_2}` / t :math:`_{NH_3}` ) and electricity ( 1.17 MWh :math:`_{el}` /t :math:`_{NH_3}` ) (see `Wang et. al `_). Today, natural gas dominates in Europe as the source for the hydrogen used in the Haber-Bosch process, but the model can choose among the various hydrogen supply options described in the hydrogen section (see :ref:`Hydrogen supply`) The total production and specific energy consumption of chlorine and methanol is taken from a `DECHEMA report `_. According to this source, the production of chlorine amounts to 9.58 MtCl/a, which is assumed to require electricity at 3.6 MWh :math:`_{el}`/t of chlorine and yield hydrogen at 0.937 MWh :math:`_{H_2}`/t of chlorine in the chloralkali process. The production of methanol adds up to 1.5 MtMeOH/a, requiring electricity at 0.167 MWh :math:`_{el}`/t of methanol and methane at 10.25 MWh :math:`_{CH_4}`/t of methanol. - - + + The production of ammonia, methanol, and chlorine production is deducted from the JRC IDEES basic chemicals, leaving the production totals of high-value chemicals. For this, we assume that the liquid hydrocarbon feedstock comes from synthetic or fossil- origin naphtha (14 MWh :math:`_{naphtha}`/t of HVC, similar to `Lechtenböhmer et al `_), ignoring the methanol-to-olefin route. Furthermore, we assume the following transformations of the energy-consuming processes in the production of plastics: the final energy consumption in steam processing is converted to methane since requires temperature above 500 °C (4.1 MWh :math:`_{CH_4}` /t of HVC, see `Rehfeldt et al. `_); and the remaining processes are electrified using the current efficiency of microwave for high-enthalpy heat processing, electric furnaces, electric process cooling and electric generic processes (2.85 MWh :math:`_{el}`/t of HVC). - -The process emissions from feedstock in the chemical industry are as high as 0.369 t :math:`_{CO_2}`/t of ethylene equivalent. We consider process emissions for all the material output, which is a conservative approach since it assumes that all plastic-embedded :math:`CO_2` will eventually be released into the atmosphere. However, plastic disposal in landfilling will avoid, or at least delay, associated :math:`CO_2` emissions. - -Circular economy practices drastically reduce the amount of primary feedstock needed for the production of plastics in the model (see `Kullmann et al. `_, `Meys et al. (2021) `_, `Meys et al. (2020) `_, `Gu et al. `_) and consequently, also the energy demands and level of process emission. The percentage of plastics that are assumed to be mechanically recycled can be selected in the `config file `_, as well as -the percentage that is chemically recycled, see `config file `_ The energy consumption for those recycling processes are respectively 0.547 MWh :math:`_{el}`/t of HVC (as indicated in the `config file `_) (`Meys et al. (2020) `_), and 6.9 MWh :math:`_{el}`/t of HVC (as indicated in the `config file `_) based on pyrolysis and electric steam cracking (see `Materials Economics `_ report). + +The process emissions from feedstock in the chemical industry are as high as 0.369 t :math:`_{CO_2}`/t of ethylene equivalent. We consider process emissions for all the material output, which is a conservative approach since it assumes that all plastic-embedded :math:`CO_2` will eventually be released into the atmosphere. However, plastic disposal in landfilling will avoid, or at least delay, associated :math:`CO_2` emissions. + +Circular economy practices drastically reduce the amount of primary feedstock needed for the production of plastics in the model (see `Kullmann et al. `_, `Meys et al. (2021) `_, `Meys et al. (2020) `_, `Gu et al. `_) and consequently, also the energy demands and level of process emission. The percentage of plastics that are assumed to be mechanically recycled can be selected in the `config file `_, as well as +the percentage that is chemically recycled, see `config file `_ The energy consumption for those recycling processes are respectively 0.547 MWh :math:`_{el}`/t of HVC (as indicated in the `config file `_) (`Meys et al. (2020) `_), and 6.9 MWh :math:`_{el}`/t of HVC (as indicated in the `config file `_) based on pyrolysis and electric steam cracking (see `Materials Economics `_ report). **Non-metallic Mineral Products** -This subsector includes the manufacturing of cement, ceramics, and glass. +This subsector includes the manufacturing of cement, ceramics, and glass. *Cement* -Cement is used in construction to make concrete. The production of cement involves high energy consumption and large process emissions. The calcination of limestone to chemically reactive calcium oxide, also known as lime, involves process emissions of 0.54 t :math:`_{CO_2}` /t cement (see `Akhtar et al. `_. +Cement is used in construction to make concrete. The production of cement involves high energy consumption and large process emissions. The calcination of limestone to chemically reactive calcium oxide, also known as lime, involves process emissions of 0.54 t :math:`_{CO_2}` /t cement (see `Akhtar et al. `_. .. math:: - CaCO_3 \xrightarrow{} CaO + CO_2 + CaCO_3 \xrightarrow{} CaO + CO_2 Additionally, :math:`CO_2` is emitted from the combustion of fossil fuels to provide process heat. Thereby, cement constitutes the biggest source of industry process emissions in Europe. -Cement process emissions can be captured assuming a capture rate of 90%. Whether emissions are captured is decided by the model taking into account the capital costs of carbon capture modules. The electricity and heat demand of process emission carbon capture is currently ignored. For net-zero emission scenarios, the remaining process emissions need to be compensated by negative emissions. +Cement process emissions can be captured assuming a capture rate of 90%. Whether emissions are captured is decided by the model taking into account the capital costs of carbon capture modules. The electricity and heat demand of process emission carbon capture is currently ignored. For net-zero emission scenarios, the remaining process emissions need to be compensated by negative emissions. With the exception of electricity demand and biomass demand for low-temperature heat (0.06 MWh/t and 0.2 MWh/t), the final energy consumption of this subsector is assumed to be supplied by methane (0.52 MWh/t), which is capable of delivering the required high-temperature heat. This implies a switch from burning solid fuels to burning gas which will require adjustments of the `kilns <10.1109/CITCON.2013.6525276>`_. The share of fossil vs. synthetic methane consumed is a result of the optimisation @@ -495,21 +495,21 @@ The production of glass is assumed to be fully electrified based on the current **Non-ferrous Metals** -The non-ferrous metal subsector includes the manufacturing of base metals (aluminium, copper, lead, zinc), precious metals (gold, silver), and technology metals (molybdenum, cobalt, silicon). +The non-ferrous metal subsector includes the manufacturing of base metals (aluminium, copper, lead, zinc), precious metals (gold, silver), and technology metals (molybdenum, cobalt, silicon). -The manufacturing of aluminium accounts for more than half of the final energy consumption of this subsector. Two alternative processing routes are used today to manufacture aluminium in Europe. The primary route represents 40% of the aluminium pro- duction, while the secondary route represents the remaining 60%. +The manufacturing of aluminium accounts for more than half of the final energy consumption of this subsector. Two alternative processing routes are used today to manufacture aluminium in Europe. The primary route represents 40% of the aluminium pro- duction, while the secondary route represents the remaining 60%. The primary route involves two energy-intensive processes: the production of alumina from bauxite (aluminium ore) and the electrolysis to transform alumina into aluminium via the Hall-Héroult process .. math:: - 2Al_2O_3 +3C \xrightarrow{} 4Al+3CO_2 + 2Al_2O_3 +3C \xrightarrow{} 4Al+3CO_2 The primary route requires high-enthalpy heat (2.3 MWh/t) to produce alumina which is supplied by methane and causes process emissions of 1.5 t :math:`_{CO_2}`/t aluminium. According to `Friedrichsen et al. `_, inert anodes might become commercially available by 2030 that would eliminate the process emissions, but they are not included in the model. Assuming all subprocesses are electrified, the primary route requires 15.4 MWh :math:`_{el}`/t of aluminium. - + In the secondary route, scrap aluminium is remelted. The energy demand for this process is only 10% of the primary route and there are no associated process emissions. Assuming all subprocesses are electrified, the secondary route requires 1.7 MWh/t of aluminium. The share of aliminum manufactured by the primary and secondary route can be selected in the `config file `_] - -For the other non-ferrous metals, we assume the electrification of the entire manufacturing process with an average electricity demand of 3.2 MWh :math:`_{el}`/t lead equivalent. + +For the other non-ferrous metals, we assume the electrification of the entire manufacturing process with an average electricity demand of 3.2 MWh :math:`_{el}`/t lead equivalent. **Other Industry Subsectors** @@ -552,7 +552,7 @@ The share of all land transport that is specified to be be FCEV will be converte FCEVs are typically used to simulate demand for transport that is hard to electrify directly, e.g. heavy construction machinery. But it may also be used to investigate a more widespread adoption of the technology. -*Internal combustion engine vehicles (ICE)* +*Internal combustion engine vehicles (ICE)* All land transport that is not specified to be either BEV or FCEV will be treated as conventional ICEs. The transport demand is converted to a demand for oil products (see :ref:`Oil-based products supply`) using the `ICE efficiency `_. @@ -584,13 +584,13 @@ PyPSA-Eur-Sec includes carbon capture from air (i.e., direct air capture (DAC)), **Carbon dioxide capture** -For the following point source emissions, carbon capture is applicable: +For the following point source emissions, carbon capture is applicable: • Industry process emissions, e.g., from limestone in cement production • Methane or biomass used for process heat in the industry -• Hydrogen production by SMR +• Hydrogen production by SMR • CHP plants using biomass or methane @@ -599,12 +599,12 @@ For the following point source emissions, carbon capture is applicable: Point source emissions are captured assuming a capture rate, e.g. 90%, which can be specified in the `config file `_. The electricity and heat demand of process emission carbon capture is currently ignored. -DAC (if `included `_) includes the adsorption phase where electricity and heat consumptionsare required to assist the adsorption process and regenerate the adsorbent. It also includes the drying and compression of :math:`CO_2` prior to storage which consumes electricity and rejects heat. +DAC (if `included `_) includes the adsorption phase where electricity and heat consumptionsare required to assist the adsorption process and regenerate the adsorbent. It also includes the drying and compression of :math:`CO_2` prior to storage which consumes electricity and rejects heat. *Carbon dioxide usage* Captured :math:`CO_2` can be used to produce synthetic methane and synthetic oil products (e.g. -naphtha). If captured carbon is used, the :math:`CO_2` emissions of the synthetic fuels are net-neutral. +naphtha). If captured carbon is used, the :math:`CO_2` emissions of the synthetic fuels are net-neutral. *Carbon dioxide sequestration* @@ -612,7 +612,4 @@ Captured :math:`CO_2` can also be sequestered underground up to an annual seques *Carbon dioxide transport* -Carbon dioxide can be modelled as a single node for Europe (in this case, :math:`CO_2` transport constraints are neglected). A network for modelling the transport of :math:`CO_2` among the different nodes can also be created if selected in the `config file `_. - - - +Carbon dioxide can be modelled as a single node for Europe (in this case, :math:`CO_2` transport constraints are neglected). A network for modelling the transport of :math:`CO_2` among the different nodes can also be created if selected in the `config file `_. diff --git a/matplotlibrc b/matplotlibrc index 57754c44..aa996c7f 100644 --- a/matplotlibrc +++ b/matplotlibrc @@ -1,3 +1,3 @@ font.family: sans-serif font.sans-serif: Ubuntu, DejaVu Sans -image.cmap: viridis \ No newline at end of file +image.cmap: viridis diff --git a/scripts/add_brownfield.py b/scripts/add_brownfield.py index c25e13bd..4bd13810 100644 --- a/scripts/add_brownfield.py +++ b/scripts/add_brownfield.py @@ -1,70 +1,70 @@ -# coding: utf-8 +# -*- coding: utf-8 -*- import logging + logger = logging.getLogger(__name__) import pandas as pd + idx = pd.IndexSlice +import numpy as np import pypsa import yaml -import numpy as np - from add_existing_baseyear import add_build_year_to_new_assets from helper import override_component_attrs, update_config_with_sector_opts def add_brownfield(n, n_p, year): - logger.info(f"Preparing brownfield for the year {year}") # electric transmission grid set optimised capacities of previous as minimum n.lines.s_nom_min = n_p.lines.s_nom_opt - dc_i = n.links[n.links.carrier=="DC"].index + dc_i = n.links[n.links.carrier == "DC"].index n.links.loc[dc_i, "p_nom_min"] = n_p.links.loc[dc_i, "p_nom_opt"] for c in n_p.iterate_components(["Link", "Generator", "Store"]): - attr = "e" if c.name == "Store" else "p" # first, remove generators, links and stores that track # CO2 or global EU values since these are already in n - n_p.mremove( - c.name, - c.df.index[c.df.lifetime==np.inf] - ) + n_p.mremove(c.name, c.df.index[c.df.lifetime == np.inf]) # remove assets whose build_year + lifetime < year - n_p.mremove( - c.name, - c.df.index[c.df.build_year + c.df.lifetime < year] - ) + n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime < year]) # remove assets if their optimized nominal capacity is lower than a threshold # since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible - chp_heat = c.df.index[( - c.df[attr + "_nom_extendable"] - & c.df.index.str.contains("urban central") - & c.df.index.str.contains("CHP") - & c.df.index.str.contains("heat") - )] + chp_heat = c.df.index[ + ( + c.df[attr + "_nom_extendable"] + & c.df.index.str.contains("urban central") + & c.df.index.str.contains("CHP") + & c.df.index.str.contains("heat") + ) + ] - threshold = snakemake.config['existing_capacities']['threshold_capacity'] + threshold = snakemake.config["existing_capacities"]["threshold_capacity"] if not chp_heat.empty: - threshold_chp_heat = (threshold + threshold_chp_heat = ( + threshold * c.df.efficiency[chp_heat.str.replace("heat", "electric")].values * c.df.p_nom_ratio[chp_heat.str.replace("heat", "electric")].values / c.df.efficiency[chp_heat].values ) n_p.mremove( c.name, - chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat] + chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat], ) n_p.mremove( c.name, - c.df.index[c.df[attr + "_nom_extendable"] & ~c.df.index.isin(chp_heat) & (c.df[attr + "_nom_opt"] < threshold)] + c.df.index[ + c.df[attr + "_nom_extendable"] + & ~c.df.index.isin(chp_heat) + & (c.df[attr + "_nom_opt"] < threshold) + ], ) # copy over assets but fix their capacity @@ -74,56 +74,68 @@ def add_brownfield(n, n_p, year): n.import_components_from_dataframe(c.df, c.name) # copy time-dependent - selection = ( - n.component_attrs[c.name].type.str.contains("series") - & n.component_attrs[c.name].status.str.contains("Input") - ) + selection = n.component_attrs[c.name].type.str.contains( + "series" + ) & n.component_attrs[c.name].status.str.contains("Input") for tattr in n.component_attrs[c.name].index[selection]: n.import_series_from_dataframe(c.pnl[tattr], c.name, tattr) # deal with gas network - pipe_carrier = ['gas pipeline'] - if snakemake.config["sector"]['H2_retrofit']: + pipe_carrier = ["gas pipeline"] + if snakemake.config["sector"]["H2_retrofit"]: # drop capacities of previous year to avoid duplicating - to_drop = n.links.carrier.isin(pipe_carrier) & (n.links.build_year!=year) + to_drop = n.links.carrier.isin(pipe_carrier) & (n.links.build_year != year) n.mremove("Link", n.links.loc[to_drop].index) # subtract the already retrofitted from today's gas grid capacity - h2_retrofitted_fixed_i = n.links[(n.links.carrier=='H2 pipeline retrofitted') & (n.links.build_year!=year)].index - gas_pipes_i = n.links[n.links.carrier.isin(pipe_carrier)].index + h2_retrofitted_fixed_i = n.links[ + (n.links.carrier == "H2 pipeline retrofitted") + & (n.links.build_year != year) + ].index + gas_pipes_i = n.links[n.links.carrier.isin(pipe_carrier)].index CH4_per_H2 = 1 / snakemake.config["sector"]["H2_retrofit_capacity_per_CH4"] fr = "H2 pipeline retrofitted" to = "gas pipeline" # today's pipe capacity - pipe_capacity = n.links.loc[gas_pipes_i, 'p_nom'] + pipe_capacity = n.links.loc[gas_pipes_i, "p_nom"] # already retrofitted capacity from gas -> H2 - already_retrofitted = (n.links.loc[h2_retrofitted_fixed_i, 'p_nom'] - .rename(lambda x: x.split("-2")[0].replace(fr, to)).groupby(level=0).sum()) - remaining_capacity = pipe_capacity - CH4_per_H2 * already_retrofitted.reindex(index=pipe_capacity.index).fillna(0) + already_retrofitted = ( + n.links.loc[h2_retrofitted_fixed_i, "p_nom"] + .rename(lambda x: x.split("-2")[0].replace(fr, to)) + .groupby(level=0) + .sum() + ) + remaining_capacity = ( + pipe_capacity + - CH4_per_H2 + * already_retrofitted.reindex(index=pipe_capacity.index).fillna(0) + ) n.links.loc[gas_pipes_i, "p_nom"] = remaining_capacity else: - new_pipes = n.links.carrier.isin(pipe_carrier) & (n.links.build_year==year) - n.links.loc[new_pipes, "p_nom"] = 0. - n.links.loc[new_pipes, "p_nom_min"] = 0. + new_pipes = n.links.carrier.isin(pipe_carrier) & ( + n.links.build_year == year + ) + n.links.loc[new_pipes, "p_nom"] = 0.0 + n.links.loc[new_pipes, "p_nom_min"] = 0.0 - -#%% +# %% if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'add_brownfield', - simpl='', + "add_brownfield", + simpl="", clusters="37", opts="", lv=1.0, - sector_opts='168H-T-H-B-I-solar+p3-dist1', + sector_opts="168H-T-H-B-I-solar+p3-dist1", planning_horizons=2030, ) - logging.basicConfig(level=snakemake.config['logging_level']) - + logging.basicConfig(level=snakemake.config["logging_level"]) + update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}") diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 96a74144..41c0fada 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -1,23 +1,25 @@ -# coding: utf-8 +# -*- coding: utf-8 -*- import logging + logger = logging.getLogger(__name__) import pandas as pd + idx = pd.IndexSlice -import numpy as np -import xarray as xr - -import pypsa -import yaml - -from prepare_sector_network import prepare_costs, define_spatial, cluster_heat_buses -from helper import override_component_attrs, update_config_with_sector_opts - from types import SimpleNamespace + +import numpy as np +import pypsa +import xarray as xr +import yaml +from helper import override_component_attrs, update_config_with_sector_opts +from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs + spatial = SimpleNamespace() + def add_build_year_to_new_assets(n, baseyear): """ Parameters @@ -29,8 +31,7 @@ def add_build_year_to_new_assets(n, baseyear): # Give assets with lifetimes and no build year the build year baseyear for c in n.iterate_components(["Link", "Generator", "Store"]): - - assets = c.df.index[(c.df.lifetime!=np.inf) & (c.df.build_year==0)] + assets = c.df.index[(c.df.lifetime != np.inf) & (c.df.build_year == 0)] c.df.loc[assets, "build_year"] = baseyear # add -baseyear to name @@ -39,40 +40,34 @@ def add_build_year_to_new_assets(n, baseyear): c.df.rename(index=rename, inplace=True) # rename time-dependent - selection = ( - n.component_attrs[c.name].type.str.contains("series") - & n.component_attrs[c.name].status.str.contains("Input") - ) + selection = n.component_attrs[c.name].type.str.contains( + "series" + ) & n.component_attrs[c.name].status.str.contains("Input") for attr in n.component_attrs[c.name].index[selection]: c.pnl[attr].rename(columns=rename, inplace=True) def add_existing_renewables(df_agg): """ - Append existing renewables to the df_agg pd.DataFrame - with the conventional power plants. + Append existing renewables to the df_agg pd.DataFrame with the conventional + power plants. """ cc = pd.read_csv(snakemake.input.country_codes, index_col=0) - carriers = { - "solar": "solar", - "onwind": "onwind", - "offwind": "offwind-ac" - } - - for tech in ['solar', 'onwind', 'offwind']: + carriers = {"solar": "solar", "onwind": "onwind", "offwind": "offwind-ac"} + for tech in ["solar", "onwind", "offwind"]: carrier = carriers[tech] - df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.) + df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.0) df.columns = df.columns.astype(int) rename_countries = { - 'Czechia': 'Czech Republic', - 'UK': 'United Kingdom', - 'Bosnia Herzg': 'Bosnia Herzegovina', - 'North Macedonia': 'Macedonia' + "Czechia": "Czech Republic", + "UK": "United Kingdom", + "Bosnia Herzg": "Bosnia Herzegovina", + "North Macedonia": "Macedonia", } df.rename(index=rename_countries, inplace=True) @@ -80,16 +75,21 @@ def add_existing_renewables(df_agg): df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True) # calculate yearly differences - df.insert(loc=0, value=.0, column='1999') - df = df.diff(axis=1).drop('1999', axis=1).clip(lower=0) + df.insert(loc=0, value=0.0, column="1999") + df = df.diff(axis=1).drop("1999", axis=1).clip(lower=0) # distribute capacities among nodes according to capacity factor # weighting with nodal_fraction - elec_buses = n.buses.index[n.buses.carrier == "AC"].union(n.buses.index[n.buses.carrier == "DC"]) - nodal_fraction = pd.Series(0., elec_buses) + elec_buses = n.buses.index[n.buses.carrier == "AC"].union( + n.buses.index[n.buses.carrier == "DC"] + ) + nodal_fraction = pd.Series(0.0, elec_buses) for country in n.buses.loc[elec_buses, "country"].unique(): - gens = n.generators.index[(n.generators.index.str[:2] == country) & (n.generators.carrier == carrier)] + gens = n.generators.index[ + (n.generators.index.str[:2] == country) + & (n.generators.carrier == carrier) + ] cfs = n.generators_t.p_max_pu[gens].mean() cfs_key = cfs / cfs.sum() nodal_fraction.loc[n.generators.loc[gens, "bus"]] = cfs_key.values @@ -102,7 +102,7 @@ def add_existing_renewables(df_agg): for node in nodal_df.index: name = f"{node}-{tech}-{year}" capacity = nodal_df.loc[node, year] - if capacity > 0.: + if capacity > 0.0: df_agg.at[name, "Fueltype"] = tech df_agg.at[name, "Capacity"] = capacity df_agg.at[name, "DateIn"] = year @@ -120,35 +120,34 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas to read lifetime to estimate YearDecomissioning baseyear : int """ - logger.debug(f"Adding power capacities installed before {baseyear} from powerplants.csv") + logger.debug( + f"Adding power capacities installed before {baseyear} from powerplants.csv" + ) df_agg = pd.read_csv(snakemake.input.powerplants, index_col=0) rename_fuel = { - 'Hard Coal': 'coal', - 'Lignite': 'lignite', - 'Nuclear': 'nuclear', - 'Oil': 'oil', - 'OCGT': 'OCGT', - 'CCGT': 'CCGT', - 'Natural Gas': 'gas', - 'Bioenergy': 'urban central solid biomass CHP', + "Hard Coal": "coal", + "Lignite": "lignite", + "Nuclear": "nuclear", + "Oil": "oil", + "OCGT": "OCGT", + "CCGT": "CCGT", + "Natural Gas": "gas", + "Bioenergy": "urban central solid biomass CHP", } fueltype_to_drop = [ - 'Hydro', - 'Wind', - 'Solar', - 'Geothermal', - 'Waste', - 'Other', - 'CCGT, Thermal' + "Hydro", + "Wind", + "Solar", + "Geothermal", + "Waste", + "Other", + "CCGT, Thermal", ] - technology_to_drop = [ - 'Pv', - 'Storage Technologies' - ] + technology_to_drop = ["Pv", "Storage Technologies"] # drop unused fueltyps and technologies df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)], inplace=True) @@ -157,16 +156,15 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas # Intermediate fix for DateIn & DateOut # Fill missing DateIn - biomass_i = df_agg.loc[df_agg.Fueltype=='urban central solid biomass CHP'].index - mean = df_agg.loc[biomass_i, 'DateIn'].mean() - df_agg.loc[biomass_i, 'DateIn'] = df_agg.loc[biomass_i, 'DateIn'].fillna(int(mean)) + biomass_i = df_agg.loc[df_agg.Fueltype == "urban central solid biomass CHP"].index + mean = df_agg.loc[biomass_i, "DateIn"].mean() + df_agg.loc[biomass_i, "DateIn"] = df_agg.loc[biomass_i, "DateIn"].fillna(int(mean)) # Fill missing DateOut - dateout = df_agg.loc[biomass_i, 'DateIn'] + snakemake.config['costs']['lifetime'] - df_agg.loc[biomass_i, 'DateOut'] = df_agg.loc[biomass_i, 'DateOut'].fillna(dateout) - + dateout = df_agg.loc[biomass_i, "DateIn"] + snakemake.config["costs"]["lifetime"] + df_agg.loc[biomass_i, "DateOut"] = df_agg.loc[biomass_i, "DateOut"].fillna(dateout) # drop assets which are already phased out / decommissioned - phased_out = df_agg[df_agg["DateOut"] snakemake.config['existing_capacities']['threshold_capacity']] - suffix = '-ac' if generator == 'offwind' else '' - name_suffix = f' {generator}{suffix}-{grouping_year}' + capacity = capacity[ + capacity > snakemake.config["existing_capacities"]["threshold_capacity"] + ] + suffix = "-ac" if generator == "offwind" else "" + name_suffix = f" {generator}{suffix}-{grouping_year}" asset_i = capacity.index + name_suffix - if generator in ['solar', 'onwind', 'offwind']: - + if generator in ["solar", "onwind", "offwind"]: # to consider electricity grid connection costs or a split between # solar utility and rooftop as well, rather take cost assumptions # from existing network than from the cost database - capital_cost = n.generators.loc[n.generators.carrier==generator+suffix, "capital_cost"].mean() - marginal_cost = n.generators.loc[n.generators.carrier==generator+suffix, "marginal_cost"].mean() + capital_cost = n.generators.loc[ + n.generators.carrier == generator + suffix, "capital_cost" + ].mean() + marginal_cost = n.generators.loc[ + n.generators.carrier == generator + suffix, "marginal_cost" + ].mean() # check if assets are already in network (e.g. for 2020) already_build = n.generators.index.intersection(asset_i) new_build = asset_i.difference(n.generators.index) # this is for the year 2020 if not already_build.empty: - n.generators.loc[already_build, "p_nom_min"] = capacity.loc[already_build.str.replace(name_suffix, "")].values + n.generators.loc[already_build, "p_nom_min"] = capacity.loc[ + already_build.str.replace(name_suffix, "") + ].values new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")] - if 'm' in snakemake.wildcards.clusters: - + if "m" in snakemake.wildcards.clusters: for ind in new_capacity.index: - # existing capacities are split evenly among regions in every country inv_ind = [i for i in inv_busmap[ind]] # for offshore the splitting only includes coastal regions - inv_ind = [i for i in inv_ind if (i + name_suffix) in n.generators.index] + inv_ind = [ + i for i in inv_ind if (i + name_suffix) in n.generators.index + ] - p_max_pu = n.generators_t.p_max_pu[[i + name_suffix for i in inv_ind]] - p_max_pu.columns=[i + name_suffix for i in inv_ind ] + p_max_pu = n.generators_t.p_max_pu[ + [i + name_suffix for i in inv_ind] + ] + p_max_pu.columns = [i + name_suffix for i in inv_ind] - n.madd("Generator", + n.madd( + "Generator", [i + name_suffix for i in inv_ind], bus=ind, carrier=generator, - p_nom=new_capacity[ind] / len(inv_ind), # split among regions in a country + p_nom=new_capacity[ind] + / len(inv_ind), # split among regions in a country marginal_cost=marginal_cost, capital_cost=capital_cost, - efficiency=costs.at[generator, 'efficiency'], + efficiency=costs.at[generator, "efficiency"], p_max_pu=p_max_pu, build_year=grouping_year, - lifetime=costs.at[generator,'lifetime'] + lifetime=costs.at[generator, "lifetime"], ) else: - - p_max_pu = n.generators_t.p_max_pu[capacity.index + f' {generator}{suffix}-{baseyear}'] + p_max_pu = n.generators_t.p_max_pu[ + capacity.index + f" {generator}{suffix}-{baseyear}" + ] if not new_build.empty: - n.madd("Generator", + n.madd( + "Generator", new_capacity.index, - suffix=' ' + name_suffix, + suffix=" " + name_suffix, bus=new_capacity.index, carrier=generator, p_nom=new_capacity, marginal_cost=marginal_cost, capital_cost=capital_cost, - efficiency=costs.at[generator, 'efficiency'], + efficiency=costs.at[generator, "efficiency"], p_max_pu=p_max_pu.rename(columns=n.generators.bus), build_year=grouping_year, - lifetime=costs.at[generator, 'lifetime'] + lifetime=costs.at[generator, "lifetime"], ) else: @@ -296,56 +304,79 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas already_build = n.links.index.intersection(asset_i) new_build = asset_i.difference(n.links.index) - lifetime_assets = lifetime.loc[grouping_year,generator].dropna() + lifetime_assets = lifetime.loc[grouping_year, generator].dropna() # this is for the year 2020 if not already_build.empty: - n.links.loc[already_build, "p_nom_min"] = capacity.loc[already_build.str.replace(name_suffix, "")].values + n.links.loc[already_build, "p_nom_min"] = capacity.loc[ + already_build.str.replace(name_suffix, "") + ].values if not new_build.empty: new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")] - if generator!="urban central solid biomass CHP": - n.madd("Link", + if generator != "urban central solid biomass CHP": + n.madd( + "Link", new_capacity.index, - suffix= name_suffix, + suffix=name_suffix, bus0=bus0, bus1=new_capacity.index, bus2="co2 atmosphere", carrier=generator, - marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel - capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel - p_nom=new_capacity / costs.at[generator, 'efficiency'], - efficiency=costs.at[generator, 'efficiency'], - efficiency2=costs.at[carrier[generator], 'CO2 intensity'], + marginal_cost=costs.at[generator, "efficiency"] + * costs.at[generator, "VOM"], # NB: VOM is per MWel + capital_cost=costs.at[generator, "efficiency"] + * costs.at[generator, "fixed"], # NB: fixed cost is per MWel + p_nom=new_capacity / costs.at[generator, "efficiency"], + efficiency=costs.at[generator, "efficiency"], + efficiency2=costs.at[carrier[generator], "CO2 intensity"], build_year=grouping_year, lifetime=lifetime_assets.loc[new_capacity.index], ) else: - key = 'central solid biomass CHP' - n.madd("Link", + key = "central solid biomass CHP" + n.madd( + "Link", new_capacity.index, - suffix= name_suffix, + suffix=name_suffix, bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values, bus1=new_capacity.index, bus2=new_capacity.index + " urban central heat", carrier=generator, - p_nom=new_capacity / costs.at[key, 'efficiency'], - capital_cost=costs.at[key, 'fixed'] * costs.at[key, 'efficiency'], - marginal_cost=costs.at[key, 'VOM'], - efficiency=costs.at[key, 'efficiency'], + p_nom=new_capacity / costs.at[key, "efficiency"], + capital_cost=costs.at[key, "fixed"] + * costs.at[key, "efficiency"], + marginal_cost=costs.at[key, "VOM"], + efficiency=costs.at[key, "efficiency"], build_year=grouping_year, - efficiency2=costs.at[key, 'efficiency-heat'], - lifetime=lifetime_assets.loc[new_capacity.index] + efficiency2=costs.at[key, "efficiency-heat"], + lifetime=lifetime_assets.loc[new_capacity.index], ) # check if existing capacities are larger than technical potential - existing_large = n.generators[n.generators["p_nom_min"] > n.generators["p_nom_max"]].index + existing_large = n.generators[ + n.generators["p_nom_min"] > n.generators["p_nom_max"] + ].index if len(existing_large): - logger.warning(f"Existing capacities larger than technical potential for {existing_large},\ - adjust technical potential to existing capacities") - n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[existing_large, "p_nom_min"] + logger.warning( + f"Existing capacities larger than technical potential for {existing_large},\ + adjust technical potential to existing capacities" + ) + n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[ + existing_large, "p_nom_min" + ] -def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime): + +def add_heating_capacities_installed_before_baseyear( + n, + baseyear, + grouping_years, + ashp_cop, + gshp_cop, + time_dep_hp_cop, + costs, + default_lifetime, +): """ Parameters ---------- @@ -368,20 +399,20 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years # retrieve existing heating capacities techs = [ - 'gas boiler', - 'oil boiler', - 'resistive heater', - 'air heat pump', - 'ground heat pump' + "gas boiler", + "oil boiler", + "resistive heater", + "air heat pump", + "ground heat pump", ] df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) # data for Albania, Montenegro and Macedonia not included in database - df.loc['Albania'] = np.nan - df.loc['Montenegro'] = np.nan - df.loc['Macedonia'] = np.nan + df.loc["Albania"] = np.nan + df.loc["Montenegro"] = np.nan + df.loc["Macedonia"] = np.nan - df.fillna(0., inplace=True) + df.fillna(0.0, inplace=True) # convert GW to MW df *= 1e3 @@ -391,8 +422,8 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True) # coal and oil boilers are assimilated to oil boilers - df['oil boiler'] = df['oil boiler'] + df['coal boiler'] - df.drop(['coal boiler'], axis=1, inplace=True) + df["oil boiler"] = df["oil boiler"] + df["coal boiler"] + df.drop(["coal boiler"], axis=1, inplace=True) # distribute technologies to nodes by population pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) @@ -403,36 +434,54 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years # split existing capacities between residential and services # proportional to energy demand - ratio_residential=pd.Series([(n.loads_t.p_set.sum()['{} residential rural heat'.format(node)] / - (n.loads_t.p_set.sum()['{} residential rural heat'.format(node)] + - n.loads_t.p_set.sum()['{} services rural heat'.format(node)] )) - for node in nodal_df.index], index=nodal_df.index) + ratio_residential = pd.Series( + [ + ( + n.loads_t.p_set.sum()["{} residential rural heat".format(node)] + / ( + n.loads_t.p_set.sum()["{} residential rural heat".format(node)] + + n.loads_t.p_set.sum()["{} services rural heat".format(node)] + ) + ) + for node in nodal_df.index + ], + index=nodal_df.index, + ) for tech in techs: - nodal_df['residential ' + tech] = nodal_df[tech] * ratio_residential - nodal_df['services ' + tech] = nodal_df[tech] * (1 - ratio_residential) + nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential + nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) names = [ "residential rural", "services rural", "residential urban decentral", "services urban decentral", - "urban central" + "urban central", ] nodes = {} p_nom = {} for name in names: - name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index([n.buses.at[index, "location"] for index in n.buses.index[n.buses.index.str.contains(name) & n.buses.index.str.contains('heat')]]) + nodes[name] = pd.Index( + [ + n.buses.at[index, "location"] + for index in n.buses.index[ + n.buses.index.str.contains(name) + & n.buses.index.str.contains("heat") + ] + ] + ) heat_pump_type = "air" if "urban" in name else "ground" - heat_type= "residential" if "residential" in name else "services" + heat_type = "residential" if "residential" in name else "services" if name == "urban central": - p_nom[name] = nodal_df['air heat pump'][nodes[name]] + p_nom[name] = nodal_df["air heat pump"][nodes[name]] else: - p_nom[name] = nodal_df[f'{heat_type} {heat_pump_type} heat pump'][nodes[name]] + p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ + nodes[name] + ] # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -442,131 +491,182 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years if time_dep_hp_cop: efficiency = cop[heat_pump_type][nodes[name]] else: - efficiency = costs.at[costs_name, 'efficiency'] + efficiency = costs.at[costs_name, "efficiency"] for i, grouping_year in enumerate(grouping_years): - if int(grouping_year) + default_lifetime <= int(baseyear): continue # installation is assumed to be linear for the past 25 years (default lifetime) - ratio = (int(grouping_year) - int(grouping_years[i-1])) / default_lifetime + ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime - n.madd("Link", + n.madd( + "Link", nodes[name], suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", bus0=nodes[name], bus1=nodes[name] + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, - capital_cost=costs.at[costs_name, 'efficiency'] * costs.at[costs_name, 'fixed'], - p_nom=p_nom[name] * ratio / costs.at[costs_name, 'efficiency'], + capital_cost=costs.at[costs_name, "efficiency"] + * costs.at[costs_name, "fixed"], + p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), - lifetime=costs.at[costs_name, 'lifetime'] + lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers # (50% capacities to rural buses, 50% to urban buses) - n.madd("Link", + n.madd( + "Link", nodes[name], suffix=f" {name} resistive heater-{grouping_year}", bus0=nodes[name], bus1=nodes[name] + " " + name + " heat", carrier=name + " resistive heater", - efficiency=costs.at[name_type + ' resistive heater', 'efficiency'], - capital_cost=costs.at[name_type + ' resistive heater', 'efficiency'] * costs.at[name_type + ' resistive heater', 'fixed'], - p_nom=0.5 * nodal_df[f'{heat_type} resistive heater'][nodes[name]] * ratio / costs.at[name_type + ' resistive heater', 'efficiency'], + efficiency=costs.at[name_type + " resistive heater", "efficiency"], + capital_cost=costs.at[name_type + " resistive heater", "efficiency"] + * costs.at[name_type + " resistive heater", "fixed"], + p_nom=0.5 + * nodal_df[f"{heat_type} resistive heater"][nodes[name]] + * ratio + / costs.at[name_type + " resistive heater", "efficiency"], build_year=int(grouping_year), - lifetime=costs.at[costs_name, 'lifetime'] + lifetime=costs.at[costs_name, "lifetime"], ) - - n.madd("Link", + n.madd( + "Link", nodes[name], - suffix= f" {name} gas boiler-{grouping_year}", + suffix=f" {name} gas boiler-{grouping_year}", bus0=spatial.gas.nodes, bus1=nodes[name] + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", - efficiency=costs.at[name_type + ' gas boiler', 'efficiency'], - efficiency2=costs.at['gas', 'CO2 intensity'], - capital_cost=costs.at[name_type + ' gas boiler', 'efficiency'] * costs.at[name_type + ' gas boiler', 'fixed'], - p_nom=0.5*nodal_df[f'{heat_type} gas boiler'][nodes[name]] * ratio / costs.at[name_type + ' gas boiler', 'efficiency'], + efficiency=costs.at[name_type + " gas boiler", "efficiency"], + efficiency2=costs.at["gas", "CO2 intensity"], + capital_cost=costs.at[name_type + " gas boiler", "efficiency"] + * costs.at[name_type + " gas boiler", "fixed"], + p_nom=0.5 + * nodal_df[f"{heat_type} gas boiler"][nodes[name]] + * ratio + / costs.at[name_type + " gas boiler", "efficiency"], build_year=int(grouping_year), - lifetime=costs.at[name_type + ' gas boiler', 'lifetime'] + lifetime=costs.at[name_type + " gas boiler", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes[name], suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, bus1=nodes[name] + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", - efficiency=costs.at['decentral oil boiler', 'efficiency'], - efficiency2=costs.at['oil', 'CO2 intensity'], - capital_cost=costs.at['decentral oil boiler', 'efficiency'] * costs.at['decentral oil boiler', 'fixed'], - p_nom=0.5 * nodal_df[f'{heat_type} oil boiler'][nodes[name]] * ratio / costs.at['decentral oil boiler', 'efficiency'], + efficiency=costs.at["decentral oil boiler", "efficiency"], + efficiency2=costs.at["oil", "CO2 intensity"], + capital_cost=costs.at["decentral oil boiler", "efficiency"] + * costs.at["decentral oil boiler", "fixed"], + p_nom=0.5 + * nodal_df[f"{heat_type} oil boiler"][nodes[name]] + * ratio + / costs.at["decentral oil boiler", "efficiency"], build_year=int(grouping_year), - lifetime=costs.at[name_type + ' gas boiler', 'lifetime'] + lifetime=costs.at[name_type + " gas boiler", "lifetime"], ) # delete links with p_nom=nan corresponding to extra nodes in country - n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and np.isnan(n.links.p_nom[index])]) + n.mremove( + "Link", + [ + index + for index in n.links.index.to_list() + if str(grouping_year) in index and np.isnan(n.links.p_nom[index]) + ], + ) # delete links with capacities below threshold - threshold = snakemake.config['existing_capacities']['threshold_capacity'] - n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and n.links.p_nom[index] < threshold]) + threshold = snakemake.config["existing_capacities"]["threshold_capacity"] + n.mremove( + "Link", + [ + index + for index in n.links.index.to_list() + if str(grouping_year) in index and n.links.p_nom[index] < threshold + ], + ) -#%% + +# %% if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'add_existing_baseyear', - simpl='', + "add_existing_baseyear", + simpl="", clusters="45", lv=1.0, - opts='', - sector_opts='8760H-T-H-B-I-A-solar+p3-dist1', + opts="", + sector_opts="8760H-T-H-B-I-A-solar+p3-dist1", planning_horizons=2020, ) - logging.basicConfig(level=snakemake.config['logging_level']) + logging.basicConfig(level=snakemake.config["logging_level"]) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) options = snakemake.config["sector"] - opts = snakemake.wildcards.sector_opts.split('-') + opts = snakemake.wildcards.sector_opts.split("-") - baseyear = snakemake.config['scenario']["planning_horizons"][0] + baseyear = snakemake.config["scenario"]["planning_horizons"][0] overrides = override_component_attrs(snakemake.input.overrides) n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) # define spatial resolution of carriers - spatial = define_spatial(n.buses[n.buses.carrier=="AC"].index, options) + spatial = define_spatial(n.buses[n.buses.carrier == "AC"].index, options) add_build_year_to_new_assets(n, baseyear) - Nyears = n.snapshot_weightings.generators.sum() / 8760. + Nyears = n.snapshot_weightings.generators.sum() / 8760.0 costs = prepare_costs( snakemake.input.costs, - snakemake.config['costs']['USD2013_to_EUR2013'], - snakemake.config['costs']['discountrate'], + snakemake.config["costs"]["USD2013_to_EUR2013"], + snakemake.config["costs"]["discountrate"], Nyears, - snakemake.config['costs']['lifetime'] + snakemake.config["costs"]["lifetime"], ) - grouping_years_power = snakemake.config['existing_capacities']['grouping_years_power'] - grouping_years_heat = snakemake.config['existing_capacities']['grouping_years_heat'] - add_power_capacities_installed_before_baseyear(n, grouping_years_power, costs, baseyear) + grouping_years_power = snakemake.config["existing_capacities"][ + "grouping_years_power" + ] + grouping_years_heat = snakemake.config["existing_capacities"]["grouping_years_heat"] + add_power_capacities_installed_before_baseyear( + n, grouping_years_power, costs, baseyear + ) if "H" in opts: time_dep_hp_cop = options["time_dep_hp_cop"] - ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots) - gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots) - default_lifetime = snakemake.config['costs']['lifetime'] - add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years_heat, - ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime) + ashp_cop = ( + xr.open_dataarray(snakemake.input.cop_air_total) + .to_pandas() + .reindex(index=n.snapshots) + ) + gshp_cop = ( + xr.open_dataarray(snakemake.input.cop_soil_total) + .to_pandas() + .reindex(index=n.snapshots) + ) + default_lifetime = snakemake.config["costs"]["lifetime"] + add_heating_capacities_installed_before_baseyear( + n, + baseyear, + grouping_years_heat, + ashp_cop, + gshp_cop, + time_dep_hp_cop, + costs, + default_lifetime, + ) if options.get("cluster_heat_buses", False): cluster_heat_buses(n) diff --git a/scripts/build_ammonia_production.py b/scripts/build_ammonia_production.py index 7fac74ec..25e08a3e 100644 --- a/scripts/build_ammonia_production.py +++ b/scripts/build_ammonia_production.py @@ -1,4 +1,7 @@ -"""Build ammonia production.""" +# -*- coding: utf-8 -*- +""" +Build ammonia production. +""" import pandas as pd @@ -27,17 +30,20 @@ country_to_alpha2 = { "United Kingdom": "GB", } -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_ammonia_production') - ammonia = pd.read_excel(snakemake.input.usgs, - sheet_name="T12", - skiprows=5, - header=0, - index_col=0, - skipfooter=19) + snakemake = mock_snakemake("build_ammonia_production") + + ammonia = pd.read_excel( + snakemake.input.usgs, + sheet_name="T12", + skiprows=5, + header=0, + index_col=0, + skipfooter=19, + ) ammonia.rename(country_to_alpha2, inplace=True) diff --git a/scripts/build_biomass_potentials.py b/scripts/build_biomass_potentials.py index d5e8139a..b22adbda 100644 --- a/scripts/build_biomass_potentials.py +++ b/scripts/build_biomass_potentials.py @@ -1,27 +1,29 @@ -import pandas as pd +# -*- coding: utf-8 -*- import geopandas as gpd +import pandas as pd def build_nuts_population_data(year=2013): - pop = pd.read_csv( snakemake.input.nuts3_population, - sep=r'\,| \t|\t', - engine='python', + sep=r"\,| \t|\t", + engine="python", na_values=[":"], - index_col=1 + index_col=1, )[str(year)] - + # only countries pop.drop("EU28", inplace=True) # mapping from Cantons to NUTS3 cantons = pd.read_csv(snakemake.input.swiss_cantons) cantons = cantons.set_index(cantons.HASC.str[3:]).NUTS - cantons = cantons.str.pad(5, side='right', fillchar='0') + cantons = cantons.str.pad(5, side="right", fillchar="0") # get population by NUTS3 - swiss = pd.read_excel(snakemake.input.swiss_population, skiprows=3, index_col=0).loc["Residents in 1000"] + swiss = pd.read_excel( + snakemake.input.swiss_population, skiprows=3, index_col=0 + ).loc["Residents in 1000"] swiss = swiss.rename(cantons).filter(like="CH") # aggregate also to higher order NUTS levels @@ -29,21 +31,21 @@ def build_nuts_population_data(year=2013): # merge Europe + Switzerland pop = pd.concat([pop, pd.concat(swiss)]).to_frame("total") - + # add missing manually pop["AL"] = 2893 pop["BA"] = 3871 pop["RS"] = 7210 - + pop["ct"] = pop.index.str[:2] - + return pop def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"): """ Loads the JRC ENSPRESO biomass potentials. - + Parameters ---------- year : int @@ -51,7 +53,7 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"): Can be {2010, 2020, 2030, 2040, 2050}. scenario : str The scenario. Can be {"ENS_Low", "ENS_Med", "ENS_High"}. - + Returns ------- pd.DataFrame @@ -64,13 +66,13 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"): sheet_name="Glossary", usecols="B:D", skiprows=1, - index_col=0 + index_col=0, ) - + df = pd.read_excel( str(snakemake.input.enspreso_biomass), sheet_name="ENER - NUTS2 BioCom E", - usecols="A:H" + usecols="A:H", ) df["group"] = df["E-Comm"].map(glossary.group) @@ -81,9 +83,9 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"): "NUST2": "NUTS2", } df.rename(columns=to_rename, inplace=True) - + # fill up with NUTS0 if NUTS2 is not given - df.NUTS2 = df.apply(lambda x: x.NUTS0 if x.NUTS2 == '-' else x.NUTS2, axis=1) + df.NUTS2 = df.apply(lambda x: x.NUTS0 if x.NUTS2 == "-" else x.NUTS2, axis=1) # convert PJ to TWh df.potential /= 3.6 @@ -92,32 +94,31 @@ def enspreso_biomass_potentials(year=2020, scenario="ENS_Low"): dff = df.query("Year == @year and Scenario == @scenario") bio = dff.groupby(["NUTS2", "commodity"]).potential.sum().unstack() - + # currently Serbia and Kosovo not split, so aggregate bio.loc["RS"] += bio.loc["XK"] bio.drop("XK", inplace=True) - + return bio -def disaggregate_nuts0(bio): +def disaggregate_nuts0(bio): """ - Some commodities are only given on NUTS0 level. - These are disaggregated here using the NUTS2 - population as distribution key. - + Some commodities are only given on NUTS0 level. These are disaggregated + here using the NUTS2 population as distribution key. + Parameters ---------- bio : pd.DataFrame from enspreso_biomass_potentials() - + Returns ------- pd.DataFrame """ - + pop = build_nuts_population_data() - + # get population in nuts2 pop_nuts2 = pop.loc[pop.index.str.len() == 4] by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum() @@ -130,7 +131,7 @@ def disaggregate_nuts0(bio): # update inplace bio.update(bio_nodal) - + return bio @@ -141,9 +142,11 @@ def build_nuts2_shapes(): - consistently name ME, MK """ - nuts2 = gpd.GeoDataFrame(gpd.read_file(snakemake.input.nuts2).set_index('id').geometry) + nuts2 = gpd.GeoDataFrame( + gpd.read_file(snakemake.input.nuts2).set_index("id").geometry + ) - countries = gpd.read_file(snakemake.input.country_shapes).set_index('name') + countries = gpd.read_file(snakemake.input.country_shapes).set_index("name") missing_iso2 = countries.index.intersection(["AL", "RS", "BA"]) missing = countries.loc[missing_iso2] @@ -153,14 +156,16 @@ def build_nuts2_shapes(): def area(gdf): - """Returns area of GeoDataFrame geometries in square kilometers.""" + """ + Returns area of GeoDataFrame geometries in square kilometers. + """ return gdf.to_crs(epsg=3035).area.div(1e6) def convert_nuts2_to_regions(bio_nuts2, regions): """ - Converts biomass potentials given in NUTS2 to PyPSA-Eur regions based on the - overlay of both GeoDataFrames in proportion to the area. + Converts biomass potentials given in NUTS2 to PyPSA-Eur regions based on + the overlay of both GeoDataFrames in proportion to the area. Parameters ---------- @@ -173,7 +178,7 @@ def convert_nuts2_to_regions(bio_nuts2, regions): ------- gpd.GeoDataFrame """ - + # calculate area of nuts2 regions bio_nuts2["area_nuts2"] = area(bio_nuts2) @@ -183,22 +188,25 @@ def convert_nuts2_to_regions(bio_nuts2, regions): overlay["share"] = area(overlay) / overlay["area_nuts2"] # multiply all nuts2-level values with share of nuts2 inside region - adjust_cols = overlay.columns.difference({"name", "area_nuts2", "geometry", "share"}) + adjust_cols = overlay.columns.difference( + {"name", "area_nuts2", "geometry", "share"} + ) overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0) bio_regions = overlay.groupby("name").sum() bio_regions.drop(["area_nuts2", "share"], axis=1, inplace=True) - + return bio_regions if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_biomass_potentials', simpl='', clusters='5') - config = snakemake.config['biomass'] + snakemake = mock_snakemake("build_biomass_potentials", simpl="", clusters="5") + + config = snakemake.config["biomass"] year = config["year"] scenario = config["scenario"] @@ -219,7 +227,7 @@ if __name__ == "__main__": grouper = {v: k for k, vv in config["classes"].items() for v in vv} df = df.groupby(grouper, axis=1).sum() - df *= 1e6 # TWh/a to MWh/a + df *= 1e6 # TWh/a to MWh/a df.index.name = "MWh/a" df.to_csv(snakemake.output.biomass_potentials) diff --git a/scripts/build_biomass_transport_costs.py b/scripts/build_biomass_transport_costs.py index aaec215b..4820985e 100644 --- a/scripts/build_biomass_transport_costs.py +++ b/scripts/build_biomass_transport_costs.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- """ -Reads biomass transport costs for different countries of the JRC report +Reads biomass transport costs for different countries of the JRC report. "The JRC-EU-TIMES model. Bioenergy potentials @@ -18,29 +19,24 @@ import tabula as tbl ENERGY_CONTENT = 4.8 # unit MWh/t (wood pellets) -def get_countries(): - pandas_options = dict( - skiprows=range(6), - header=None, - index_col=0 - ) +def get_countries(): + pandas_options = dict(skiprows=range(6), header=None, index_col=0) return tbl.read_pdf( str(snakemake.input.transport_cost_data), pages="145", multiple_tables=False, - pandas_options=pandas_options + pandas_options=pandas_options, )[0].index def get_cost_per_tkm(page, countries): - pandas_options = dict( skiprows=range(6), header=0, - sep=' |,', - engine='python', + sep=" |,", + engine="python", index_col=False, ) @@ -48,16 +44,15 @@ def get_cost_per_tkm(page, countries): str(snakemake.input.transport_cost_data), pages=page, multiple_tables=False, - pandas_options=pandas_options + pandas_options=pandas_options, )[0] sc.index = countries sc.columns = sc.columns.str.replace("€", "EUR") - + return sc def build_biomass_transport_costs(): - countries = get_countries() sc1 = get_cost_per_tkm(146, countries) @@ -72,11 +67,7 @@ def build_biomass_transport_costs(): transport_costs.name = "EUR/km/MWh" # rename country names - to_rename = { - "UK": "GB", - "XK": "KO", - "EL": "GR" - } + to_rename = {"UK": "GB", "XK": "KO", "EL": "GR"} transport_costs.rename(to_rename, inplace=True) # add missing Norway with data from Sweden @@ -86,5 +77,4 @@ def build_biomass_transport_costs(): if __name__ == "__main__": - build_biomass_transport_costs() diff --git a/scripts/build_clustered_population_layouts.py b/scripts/build_clustered_population_layouts.py index 0ad1af99..d3ab7cfa 100644 --- a/scripts/build_clustered_population_layouts.py +++ b/scripts/build_clustered_population_layouts.py @@ -1,31 +1,38 @@ -"""Build clustered population layouts.""" +# -*- coding: utf-8 -*- +""" +Build clustered population layouts. +""" -import geopandas as gpd -import xarray as xr -import pandas as pd import atlite +import geopandas as gpd +import pandas as pd +import xarray as xr - -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_clustered_population_layouts', - simpl='', + "build_clustered_population_layouts", + simpl="", clusters=48, ) - cutout = atlite.Cutout(snakemake.config['atlite']['cutout']) + cutout = atlite.Cutout(snakemake.config["atlite"]["cutout"]) - clustered_regions = gpd.read_file( - snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() + clustered_regions = ( + gpd.read_file(snakemake.input.regions_onshore) + .set_index("name") + .buffer(0) + .squeeze() + ) I = cutout.indicatormatrix(clustered_regions) pop = {} for item in ["total", "urban", "rural"]: - pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{item}']) - pop[item] = I.dot(pop_layout.stack(spatial=('y', 'x'))) + pop_layout = xr.open_dataarray(snakemake.input[f"pop_layout_{item}"]) + pop[item] = I.dot(pop_layout.stack(spatial=("y", "x"))) pop = pd.DataFrame(pop, index=clustered_regions.index) diff --git a/scripts/build_cop_profiles.py b/scripts/build_cop_profiles.py index 8b502fc4..f26bfca9 100644 --- a/scripts/build_cop_profiles.py +++ b/scripts/build_cop_profiles.py @@ -1,39 +1,41 @@ -"""Build COP time series for air- or ground-sourced heat pumps.""" +# -*- coding: utf-8 -*- +""" +Build COP time series for air- or ground-sourced heat pumps. +""" import xarray as xr -def coefficient_of_performance(delta_T, source='air'): +def coefficient_of_performance(delta_T, source="air"): """ COP is function of temp difference source to sink. + The quadratic regression is based on Staffell et al. (2012) https://doi.org/10.1039/C2EE22653G. """ - if source == 'air': + if source == "air": return 6.81 - 0.121 * delta_T + 0.000630 * delta_T**2 - elif source == 'soil': + elif source == "soil": return 8.77 - 0.150 * delta_T + 0.000734 * delta_T**2 else: raise NotImplementedError("'source' must be one of ['air', 'soil']") -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_cop_profiles', - simpl='', + "build_cop_profiles", + simpl="", clusters=48, ) for area in ["total", "urban", "rural"]: - for source in ["air", "soil"]: + source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"]) - source_T = xr.open_dataarray( - snakemake.input[f"temp_{source}_{area}"]) - - delta_T = snakemake.config['sector']['heat_pump_sink_T'] - source_T + delta_T = snakemake.config["sector"]["heat_pump_sink_T"] - source_T cop = coefficient_of_performance(delta_T, source) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 48852728..338cc232 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -1,25 +1,31 @@ +# -*- coding: utf-8 -*- import logging + logger = logging.getLogger(__name__) -from functools import partial -from tqdm import tqdm -from helper import mute_print - import multiprocessing as mp -import pandas as pd +from functools import partial + import geopandas as gpd import numpy as np +import pandas as pd +from helper import mute_print +from tqdm import tqdm idx = pd.IndexSlice def cartesian(s1, s2): - """Cartesian product of two pd.Series""" + """ + Cartesian product of two pd.Series. + """ return pd.DataFrame(np.outer(s1, s2), index=s1.index, columns=s2.index) def reverse(dictionary): - """reverses a keys and values of a dictionary""" + """ + Reverses a keys and values of a dictionary. + """ return {v: k for k, v in dictionary.items()} @@ -122,7 +128,7 @@ to_ipcc = { "total energy": "1 - Energy", "industrial processes": "2 - Industrial Processes and Product Use", "agriculture": "3 - Agriculture", - "agriculture, forestry and fishing": '1.A.4.c - Agriculture/Forestry/Fishing', + "agriculture, forestry and fishing": "1.A.4.c - Agriculture/Forestry/Fishing", "LULUCF": "4 - Land Use, Land-Use Change and Forestry", "waste management": "5 - Waste management", "other": "6 - Other Sector", @@ -131,12 +137,15 @@ to_ipcc = { "total woL": "Total (without LULUCF)", } -def build_eurostat(input_eurostat, countries, report_year, year): - """Return multi-index for all countries' energy data in TWh/a.""" + +def build_eurostat(input_eurostat, countries, report_year, year): + """ + Return multi-index for all countries' energy data in TWh/a. + """ filenames = { 2016: f"/{year}-Energy-Balances-June2016edition.xlsx", - 2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx" + 2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx", } with mute_print(): @@ -149,9 +158,11 @@ def build_eurostat(input_eurostat, countries, report_year, year): # sorted_index necessary for slicing lookup = eurostat_country_to_alpha2 - labelled_dfs = {lookup[df.columns[0]]: df - for df in dfs.values() - if lookup[df.columns[0]] in countries} + labelled_dfs = { + lookup[df.columns[0]]: df + for df in dfs.values() + if lookup[df.columns[0]] in countries + } df = pd.concat(labelled_dfs, sort=True).sort_index() # drop non-numeric and country columns @@ -167,11 +178,13 @@ def build_eurostat(input_eurostat, countries, report_year, year): def build_swiss(year): - """Return a pd.Series of Swiss energy data in TWh/a""" + """ + Return a pd.Series of Swiss energy data in TWh/a. + """ fn = snakemake.input.swiss - df = pd.read_csv(fn, index_col=[0,1]).loc["CH", str(year)] + df = pd.read_csv(fn, index_col=[0, 1]).loc["CH", str(year)] # convert PJ/a to TWh/a df /= 3.6 @@ -180,7 +193,6 @@ def build_swiss(year): def idees_per_country(ct, year): - base_dir = snakemake.input.idees ct_totals = {} @@ -220,7 +232,7 @@ def idees_per_country(ct, year): assert df.index[46] == "Derived heat" ct_totals["derived heat residential"] = df[46] - assert df.index[50] == 'Thermal uses' + assert df.index[50] == "Thermal uses" ct_totals["thermal uses residential"] = df[50] # services @@ -253,10 +265,9 @@ def idees_per_country(ct, year): assert df.index[49] == "Derived heat" ct_totals["derived heat services"] = df[49] - assert df.index[53] == 'Thermal uses' + assert df.index[53] == "Thermal uses" ct_totals["thermal uses services"] = df[53] - # agriculture, forestry and fishing start = "Detailed split of energy consumption (ktoe)" @@ -268,7 +279,7 @@ def idees_per_country(ct, year): "Lighting", "Ventilation", "Specific electricity uses", - "Pumping devices (electric)" + "Pumping devices (electric)", ] ct_totals["total agriculture electricity"] = df[rows].sum() @@ -352,7 +363,7 @@ def idees_per_country(ct, year): assert df.index[8] == "International - Intra-EU" assert df.index[9] == "International - Extra-EU" - ct_totals["total international aviation passenger"] = df[[8,9]].sum() + ct_totals["total international aviation passenger"] = df[[8, 9]].sum() assert df.index[11] == "Domestic and International - Intra-EU" ct_totals["total domestic aviation freight"] = df[11] @@ -360,11 +371,15 @@ def idees_per_country(ct, year): assert df.index[12] == "International - Extra-EU" ct_totals["total international aviation freight"] = df[12] - ct_totals["total domestic aviation"] = ct_totals["total domestic aviation freight"] \ - + ct_totals["total domestic aviation passenger"] + ct_totals["total domestic aviation"] = ( + ct_totals["total domestic aviation freight"] + + ct_totals["total domestic aviation passenger"] + ) - ct_totals["total international aviation"] = ct_totals["total international aviation freight"] \ - + ct_totals["total international aviation passenger"] + ct_totals["total international aviation"] = ( + ct_totals["total international aviation freight"] + + ct_totals["total international aviation passenger"] + ) df = pd.read_excel(fn_transport, "TrNavi_ene", index_col=0)[year] @@ -380,17 +395,19 @@ def idees_per_country(ct, year): def build_idees(countries, year): - nprocesses = snakemake.threads func = partial(idees_per_country, year=year) - tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries), - desc='Build from IDEES database') + tqdm_kwargs = dict( + ascii=False, + unit=" country", + total=len(countries), + desc="Build from IDEES database", + ) with mute_print(): with mp.Pool(processes=nprocesses) as pool: totals_list = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) - totals = pd.concat(totals_list, axis=1) # convert ktoe to TWh @@ -401,19 +418,17 @@ def build_idees(countries, year): totals.loc["passenger car efficiency"] *= 10 # district heating share - district_heat = totals.loc[["derived heat residential", - "derived heat services"]].sum() - total_heat = totals.loc[["thermal uses residential", - "thermal uses services"]].sum() + district_heat = totals.loc[ + ["derived heat residential", "derived heat services"] + ].sum() + total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() totals.loc["district heat share"] = district_heat.div(total_heat) return totals.T def build_energy_totals(countries, eurostat, swiss, idees): - - eurostat_fuels = {"electricity": "Electricity", - "total": "Total all products"} + eurostat_fuels = {"electricity": "Electricity", "total": "Total all products"} to_drop = ["passenger cars", "passenger car efficiency"] df = idees.reindex(countries).drop(to_drop, axis=1) @@ -439,36 +454,47 @@ def build_energy_totals(countries, eurostat, swiss, idees): uses = ["space", "cooking", "water"] for sector in ["residential", "services", "road", "rail"]: - eurostat_sector = sector.capitalize() # fuel use for fuel in ["electricity", "total"]: slicer = idx[to_fill, :, :, eurostat_sector] - fill_values = eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=0).sum() + fill_values = ( + eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=0).sum() + ) df.loc[to_fill, f"{fuel} {sector}"] = fill_values for sector in ["residential", "services"]: - # electric use for use in uses: fuel_use = df[f"electricity {sector} {use}"] fuel = df[f"electricity {sector}"] avg = fuel_use.div(fuel).mean() - logger.debug(f"{sector}: average fraction of electricity for {use} is {avg:.3f}") - df.loc[to_fill, f"electricity {sector} {use}"] = avg * df.loc[to_fill, f"electricity {sector}"] + logger.debug( + f"{sector}: average fraction of electricity for {use} is {avg:.3f}" + ) + df.loc[to_fill, f"electricity {sector} {use}"] = ( + avg * df.loc[to_fill, f"electricity {sector}"] + ) # non-electric use for use in uses: - nonelectric_use = df[f"total {sector} {use}"] - df[f"electricity {sector} {use}"] + nonelectric_use = ( + df[f"total {sector} {use}"] - df[f"electricity {sector} {use}"] + ) nonelectric = df[f"total {sector}"] - df[f"electricity {sector}"] avg = nonelectric_use.div(nonelectric).mean() - logger.debug(f"{sector}: average fraction of non-electric for {use} is {avg:.3f}") + logger.debug( + f"{sector}: average fraction of non-electric for {use} is {avg:.3f}" + ) electric_use = df.loc[to_fill, f"electricity {sector} {use}"] - nonelectric = df.loc[to_fill, f"total {sector}"] - df.loc[to_fill, f"electricity {sector}"] + nonelectric = ( + df.loc[to_fill, f"total {sector}"] + - df.loc[to_fill, f"electricity {sector}"] + ) df.loc[to_fill, f"total {sector} {use}"] = electric_use + avg * nonelectric # Fix Norway space and water heating fractions @@ -480,17 +506,25 @@ def build_energy_totals(countries, eurostat, swiss, idees): no_norway = df.drop("NO") for sector in ["residential", "services"]: - # assume non-electric is heating - nonelectric = df.loc["NO", f"total {sector}"] - df.loc["NO", f"electricity {sector}"] + nonelectric = ( + df.loc["NO", f"total {sector}"] - df.loc["NO", f"electricity {sector}"] + ) total_heating = nonelectric / (1 - elec_fraction) for use in uses: - nonelectric_use = no_norway[f"total {sector} {use}"] - no_norway[f"electricity {sector} {use}"] - nonelectric = no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"] + nonelectric_use = ( + no_norway[f"total {sector} {use}"] + - no_norway[f"electricity {sector} {use}"] + ) + nonelectric = ( + no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"] + ) fraction = nonelectric_use.div(nonelectric).mean() df.loc["NO", f"total {sector} {use}"] = total_heating * fraction - df.loc["NO", f"electricity {sector} {use}"] = total_heating * fraction * elec_fraction + df.loc["NO", f"electricity {sector} {use}"] = ( + total_heating * fraction * elec_fraction + ) # Missing aviation @@ -517,10 +551,7 @@ def build_energy_totals(countries, eurostat, swiss, idees): f"{fuel} light duty road freight", ] if fuel == "total": - selection.extend([ - f"{fuel} two-wheel", - f"{fuel} heavy duty road freight" - ]) + selection.extend([f"{fuel} two-wheel", f"{fuel} heavy duty road freight"]) road = df[selection].sum() road_fraction = road / road.sum() fill_values = cartesian(df.loc[missing, f"{fuel} road"], road_fraction) @@ -544,33 +575,40 @@ def build_energy_totals(countries, eurostat, swiss, idees): ] aviation = df[selection].sum() aviation_fraction = aviation / aviation.sum() - fill_values = cartesian(df.loc[missing, f"total {destination} aviation"], aviation_fraction) + fill_values = cartesian( + df.loc[missing, f"total {destination} aviation"], aviation_fraction + ) df.loc[missing, aviation_fraction.index] = fill_values for purpose in ["passenger", "freight"]: - attrs = [f"total domestic aviation {purpose}", f"total international aviation {purpose}"] - df.loc[missing, f"total aviation {purpose}"] = df.loc[missing, attrs].sum(axis=1) + attrs = [ + f"total domestic aviation {purpose}", + f"total international aviation {purpose}", + ] + df.loc[missing, f"total aviation {purpose}"] = df.loc[missing, attrs].sum( + axis=1 + ) if "BA" in df.index: # fill missing data for BA (services and road energy data) # proportional to RS with ratio of total residential demand missing = df.loc["BA"] == 0.0 ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] - df.loc['BA', missing] = ratio * df.loc["RS", missing] + df.loc["BA", missing] = ratio * df.loc["RS", missing] # Missing district heating share - dh_share = pd.read_csv(snakemake.input.district_heat_share, - index_col=0, usecols=[0, 1]) + dh_share = pd.read_csv( + snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] + ) # make conservative assumption and take minimum from both data sets - df["district heat share"] = (pd.concat([df["district heat share"], - dh_share.reindex(index=df.index)/100], - axis=1).min(axis=1)) + df["district heat share"] = pd.concat( + [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 + ).min(axis=1) return df def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): - # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 # downloaded 201228 (modified by EEA last on 201221) df = pd.read_csv(input_co2, encoding="latin-1", low_memory=False) @@ -589,7 +627,7 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): df.loc[slicer, "emissions"] .unstack("Sector_name") .rename(columns=reverse(to_ipcc)) - .droplevel([1,2]) + .droplevel([1, 2]) ) emissions.rename(index={"EUA": "EU28", "UK": "GB"}, inplace=True) @@ -604,13 +642,20 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): "international aviation", "domestic navigation", "international navigation", - "agriculture, forestry and fishing" + "agriculture, forestry and fishing", ] - emissions["industrial non-elec"] = emissions["total energy"] - emissions[to_subtract].sum(axis=1) + emissions["industrial non-elec"] = emissions["total energy"] - emissions[ + to_subtract + ].sum(axis=1) emissions["agriculture"] += emissions["agriculture, forestry and fishing"] - to_drop = ["total energy", "total wL", "total woL", "agriculture, forestry and fishing"] + to_drop = [ + "total energy", + "total wL", + "total woL", + "agriculture, forestry and fishing", + ] emissions.drop(columns=to_drop, inplace=True) # convert from Gg to Mt @@ -618,7 +663,6 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): def build_eurostat_co2(input_eurostat, countries, report_year, year=1990): - eurostat = build_eurostat(input_eurostat, countries, report_year, year) specific_emissions = pd.Series(index=eurostat.columns, dtype=float) @@ -637,13 +681,16 @@ def build_eurostat_co2(input_eurostat, countries, report_year, year=1990): def build_co2_totals(countries, eea_co2, eurostat_co2): - co2 = eea_co2.reindex(countries) for ct in countries.intersection(["BA", "RS", "AL", "ME", "MK"]): - mappings = { - "electricity": (ct, "+", "Conventional Thermal Power Stations", "of which From Coal"), + "electricity": ( + ct, + "+", + "Conventional Thermal Power Stations", + "of which From Coal", + ), "residential non-elec": (ct, "+", "+", "Residential"), "services non-elec": (ct, "+", "+", "Services"), "road non-elec": (ct, "+", "+", "Road"), @@ -655,7 +702,8 @@ def build_co2_totals(countries, eea_co2, eurostat_co2): # does not include industrial process emissions or fuel processing/refining "industrial non-elec": (ct, "+", "Industry"), # does not include non-energy emissions - "agriculture": (eurostat_co2.index.get_level_values(0) == ct) & eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3), + "agriculture": (eurostat_co2.index.get_level_values(0) == ct) + & eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3), } for i, mi in mappings.items(): @@ -665,7 +713,6 @@ def build_co2_totals(countries, eea_co2, eurostat_co2): def build_transport_data(countries, population, idees): - transport_data = pd.DataFrame(index=countries) # collect number of cars @@ -676,7 +723,9 @@ def build_transport_data(countries, population, idees): transport_data.at["CH", "number cars"] = 4.136e6 missing = transport_data.index[transport_data["number cars"].isna()] - logger.info(f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data.") + logger.info( + f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data." + ) cars_pp = transport_data["number cars"] / population transport_data.loc[missing, "number cars"] = cars_pp.mean() * population @@ -686,7 +735,9 @@ def build_transport_data(countries, population, idees): transport_data["average fuel efficiency"] = idees["passenger car efficiency"] missing = transport_data.index[transport_data["average fuel efficiency"].isna()] - logger.info(f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data.") + logger.info( + f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data." + ) fill_values = transport_data["average fuel efficiency"].mean() transport_data.loc[missing, "average fuel efficiency"] = fill_values @@ -695,11 +746,12 @@ def build_transport_data(countries, population, idees): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_energy_totals') - logging.basicConfig(level=snakemake.config['logging_level']) + snakemake = mock_snakemake("build_energy_totals") + + logging.basicConfig(level=snakemake.config["logging_level"]) config = snakemake.config["energy"] @@ -722,7 +774,9 @@ if __name__ == "__main__": base_year_emissions = config["base_emissions_year"] emissions_scope = snakemake.config["energy"]["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) - eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, base_year_emissions) + eurostat_co2 = build_eurostat_co2( + input_eurostat, countries, report_year, base_year_emissions + ) co2 = build_co2_totals(countries, eea_co2, eurostat_co2) co2.to_csv(snakemake.output.co2_name) diff --git a/scripts/build_gas_input_locations.py b/scripts/build_gas_input_locations.py index 1e957fed..9b1328e4 100644 --- a/scripts/build_gas_input_locations.py +++ b/scripts/build_gas_input_locations.py @@ -1,15 +1,17 @@ +# -*- coding: utf-8 -*- """ -Build import locations for fossil gas from entry-points, LNG terminals and production sites. +Build import locations for fossil gas from entry-points, LNG terminals and +production sites. """ import logging + logger = logging.getLogger(__name__) -import pandas as pd import geopandas as gpd -from shapely import wkt - +import pandas as pd from cluster_gas_network import load_bus_regions +from shapely import wkt def read_scigrid_gas(fn): @@ -20,24 +22,25 @@ def read_scigrid_gas(fn): def build_gem_lng_data(lng_fn): - df = pd.read_excel(lng_fn[0], sheet_name='LNG terminals - data') + df = pd.read_excel(lng_fn[0], sheet_name="LNG terminals - data") df = df.set_index("ComboID") - remove_status = ['Cancelled'] - remove_country = ['Cyprus','Turkey'] - remove_terminal = ['Puerto de la Luz LNG Terminal', 'Gran Canaria LNG Terminal'] + remove_status = ["Cancelled"] + remove_country = ["Cyprus", "Turkey"] + remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"] - df = df.query("Status != 'Cancelled' \ + df = df.query( + "Status != 'Cancelled' \ & Country != @remove_country \ & TerminalName != @remove_terminal \ - & CapacityInMtpa != '--'") + & CapacityInMtpa != '--'" + ) - geometry = gpd.points_from_xy(df['Longitude'], df['Latitude']) + geometry = gpd.points_from_xy(df["Longitude"], df["Latitude"]) return gpd.GeoDataFrame(df, geometry=geometry, crs="EPSG:4326") def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries): - # LNG terminals lng = build_gem_lng_data(lng_fn) @@ -45,21 +48,19 @@ def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries): entry = read_scigrid_gas(entry_fn) entry["from_country"] = entry.from_country.str.rstrip() entry = entry.loc[ - ~(entry.from_country.isin(countries) & entry.to_country.isin(countries)) & # only take non-EU entries - ~entry.name.str.contains("Tegelen") | # malformed datapoint - (entry.from_country == "NO") # entries from NO to GB + ~(entry.from_country.isin(countries) & entry.to_country.isin(countries)) + & ~entry.name.str.contains("Tegelen") # only take non-EU entries + | (entry.from_country == "NO") # malformed datapoint # entries from NO to GB ] # production sites inside the model scope prod = read_scigrid_gas(prod_fn) prod = prod.loc[ - (prod.geometry.y > 35) & - (prod.geometry.x < 30) & - (prod.country_code != "DE") + (prod.geometry.y > 35) & (prod.geometry.x < 30) & (prod.country_code != "DE") ] - mcm_per_day_to_mw = 437.5 # MCM/day to MWh/h - mtpa_to_mw = 1649.224 # mtpa to MWh/h + mcm_per_day_to_mw = 437.5 # MCM/day to MWh/h + mtpa_to_mw = 1649.224 # mtpa to MWh/h lng["p_nom"] = lng["CapacityInMtpa"] * mtpa_to_mw entry["p_nom"] = entry["max_cap_from_to_M_m3_per_d"] * mcm_per_day_to_mw prod["p_nom"] = prod["max_supply_M_m3_per_d"] * mcm_per_day_to_mw @@ -74,28 +75,29 @@ def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries): if __name__ == "__main__": - - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_gas_input_locations', - simpl='', - clusters='37', + "build_gas_input_locations", + simpl="", + clusters="37", ) - logging.basicConfig(level=snakemake.config['logging_level']) + logging.basicConfig(level=snakemake.config["logging_level"]) regions = load_bus_regions( - snakemake.input.regions_onshore, - snakemake.input.regions_offshore + snakemake.input.regions_onshore, snakemake.input.regions_offshore ) # add a buffer to eastern countries because some # entry points are still in Russian or Ukrainian territory. - buffer = 9000 # meters - eastern_countries = ['FI', 'EE', 'LT', 'LV', 'PL', 'SK', 'HU', 'RO'] + buffer = 9000 # meters + eastern_countries = ["FI", "EE", "LT", "LV", "PL", "SK", "HU", "RO"] add_buffer_b = regions.index.str[:2].isin(eastern_countries) - regions.loc[add_buffer_b] = regions[add_buffer_b].to_crs(3035).buffer(buffer).to_crs(4326) + regions.loc[add_buffer_b] = ( + regions[add_buffer_b].to_crs(3035).buffer(buffer).to_crs(4326) + ) countries = regions.index.str[:2].unique().str.replace("GB", "UK") @@ -103,16 +105,18 @@ if __name__ == "__main__": snakemake.input.lng, snakemake.input.entry, snakemake.input.production, - countries + countries, ) - gas_input_nodes = gpd.sjoin(gas_input_locations, regions, how='left') + gas_input_nodes = gpd.sjoin(gas_input_locations, regions, how="left") gas_input_nodes.rename(columns={"index_right": "bus"}, inplace=True) - gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver='GeoJSON') + gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver="GeoJSON") - gas_input_nodes_s = gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack() + gas_input_nodes_s = ( + gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack() + ) gas_input_nodes_s.columns.name = "p_nom" - gas_input_nodes_s.to_csv(snakemake.output.gas_input_nodes_simplified) \ No newline at end of file + gas_input_nodes_s.to_csv(snakemake.output.gas_input_nodes_simplified) diff --git a/scripts/build_gas_network.py b/scripts/build_gas_network.py index 786b7f8d..4a052174 100644 --- a/scripts/build_gas_network.py +++ b/scripts/build_gas_network.py @@ -1,16 +1,22 @@ -"""Preprocess gas network based on data from bthe SciGRID Gas project (https://www.gas.scigrid.de/).""" +# -*- coding: utf-8 -*- +""" +Preprocess gas network based on data from bthe SciGRID Gas project +(https://www.gas.scigrid.de/). +""" import logging + logger = logging.getLogger(__name__) -import pandas as pd import geopandas as gpd -from shapely.geometry import Point +import pandas as pd from pypsa.geo import haversine_pts +from shapely.geometry import Point def diameter_to_capacity(pipe_diameter_mm): - """Calculate pipe capacity in MW based on diameter in mm. + """ + Calculate pipe capacity in MW based on diameter in mm. 20 inch (500 mm) 50 bar -> 1.5 GW CH4 pipe capacity (LHV) 24 inch (600 mm) 50 bar -> 5 GW CH4 pipe capacity (LHV) @@ -59,22 +65,31 @@ def prepare_dataset( length_factor=1.5, correction_threshold_length=4, correction_threshold_p_nom=8, - bidirectional_below=10 + bidirectional_below=10, ): - # extract start and end from LineString df["point0"] = df.geometry.apply(lambda x: Point(x.coords[0])) df["point1"] = df.geometry.apply(lambda x: Point(x.coords[-1])) - conversion_factor = 437.5 # MCM/day to MWh/h + conversion_factor = 437.5 # MCM/day to MWh/h df["p_nom"] = df.max_cap_M_m3_per_d * conversion_factor # for inferred diameters, assume 500 mm rather than 900 mm (more conservative) - df.loc[df.diameter_mm_method != 'raw', "diameter_mm"] = 500. + df.loc[df.diameter_mm_method != "raw", "diameter_mm"] = 500.0 - keep = ["name", "diameter_mm", "is_H_gas", "is_bothDirection", - "length_km", "p_nom", "max_pressure_bar", - "start_year", "point0", "point1", "geometry"] + keep = [ + "name", + "diameter_mm", + "is_H_gas", + "is_bothDirection", + "length_km", + "p_nom", + "max_pressure_bar", + "start_year", + "point0", + "point1", + "geometry", + ] to_rename = { "is_bothDirection": "bidirectional", "is_H_gas": "H_gas", @@ -96,40 +111,43 @@ def prepare_dataset( df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity) ratio = df.p_nom / df.p_nom_diameter not_nordstream = df.max_pressure_bar < 220 - df.p_nom.update(df.p_nom_diameter.where( - (df.p_nom <= 500) | - ((ratio > correction_threshold_p_nom) & not_nordstream) | - ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) - )) + df.p_nom.update( + df.p_nom_diameter.where( + (df.p_nom <= 500) + | ((ratio > correction_threshold_p_nom) & not_nordstream) + | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) + ) + ) # lines which have way too discrepant line lengths # get assigned haversine length * length factor df["length_haversine"] = df.apply( - lambda p: length_factor * haversine_pts( - [p.point0.x, p.point0.y], - [p.point1.x, p.point1.y] - ), axis=1 + lambda p: length_factor + * haversine_pts([p.point0.x, p.point0.y], [p.point1.x, p.point1.y]), + axis=1, ) ratio = df.eval("length / length_haversine") - df["length"].update(df.length_haversine.where( - (df["length"] < 20) | - (ratio > correction_threshold_length) | - (ratio < 1 / correction_threshold_length) - )) + df["length"].update( + df.length_haversine.where( + (df["length"] < 20) + | (ratio > correction_threshold_length) + | (ratio < 1 / correction_threshold_length) + ) + ) return df if __name__ == "__main__": - - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_gas_network') - logging.basicConfig(level=snakemake.config['logging_level']) + snakemake = mock_snakemake("build_gas_network") + + logging.basicConfig(level=snakemake.config["logging_level"]) gas_network = load_dataset(snakemake.input.gas_network) gas_network = prepare_dataset(gas_network) - gas_network.to_csv(snakemake.output.cleaned_gas_network) \ No newline at end of file + gas_network.to_csv(snakemake.output.cleaned_gas_network) diff --git a/scripts/build_heat_demand.py b/scripts/build_heat_demand.py index 1c49f80d..9f0ee609 100644 --- a/scripts/build_heat_demand.py +++ b/scripts/build_heat_demand.py @@ -1,18 +1,22 @@ -"""Build heat demand time series.""" +# -*- coding: utf-8 -*- +""" +Build heat demand time series. +""" -import geopandas as gpd import atlite +import geopandas as gpd +import numpy as np import pandas as pd import xarray as xr -import numpy as np from dask.distributed import Client, LocalCluster -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_heat_demands', - simpl='', + "build_heat_demands", + simpl="", clusters=48, ) @@ -20,23 +24,29 @@ if __name__ == '__main__': cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) client = Client(cluster, asynchronous=True) - time = pd.date_range(freq='h', **snakemake.config['snapshots']) - cutout_config = snakemake.config['atlite']['cutout'] + time = pd.date_range(freq="h", **snakemake.config["snapshots"]) + cutout_config = snakemake.config["atlite"]["cutout"] cutout = atlite.Cutout(cutout_config).sel(time=time) - clustered_regions = gpd.read_file( - snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() + clustered_regions = ( + gpd.read_file(snakemake.input.regions_onshore) + .set_index("name") + .buffer(0) + .squeeze() + ) I = cutout.indicatormatrix(clustered_regions) pop_layout = xr.open_dataarray(snakemake.input.pop_layout) - stacked_pop = pop_layout.stack(spatial=('y', 'x')) + stacked_pop = pop_layout.stack(spatial=("y", "x")) M = I.T.dot(np.diag(I.dot(stacked_pop))) heat_demand = cutout.heat_demand( - matrix=M.T, index=clustered_regions.index, + matrix=M.T, + index=clustered_regions.index, dask_kwargs=dict(scheduler=client), - show_progress=False) + show_progress=False, + ) heat_demand.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/build_industrial_distribution_key.py b/scripts/build_industrial_distribution_key.py index b888e24f..12cffe43 100644 --- a/scripts/build_industrial_distribution_key.py +++ b/scripts/build_industrial_distribution_key.py @@ -1,40 +1,47 @@ -"""Build industrial distribution keys from hotmaps database.""" +# -*- coding: utf-8 -*- +""" +Build industrial distribution keys from hotmaps database. +""" import logging + logger = logging.getLogger(__name__) import uuid -import pandas as pd -import geopandas as gpd - from itertools import product + +import geopandas as gpd +import pandas as pd from packaging.version import Version, parse def locate_missing_industrial_sites(df): """ - Locate industrial sites without valid locations based on - city and countries. Should only be used if the model's - spatial resolution is coarser than individual cities. + Locate industrial sites without valid locations based on city and + countries. + + Should only be used if the model's spatial resolution is coarser + than individual cities. """ try: - from geopy.geocoders import Nominatim from geopy.extra.rate_limiter import RateLimiter + from geopy.geocoders import Nominatim except: - raise ModuleNotFoundError("Optional dependency 'geopy' not found." - "Install via 'conda install -c conda-forge geopy'" - "or set 'industry: hotmaps_locate_missing: false'.") + raise ModuleNotFoundError( + "Optional dependency 'geopy' not found." + "Install via 'conda install -c conda-forge geopy'" + "or set 'industry: hotmaps_locate_missing: false'." + ) locator = Nominatim(user_agent=str(uuid.uuid4())) geocode = RateLimiter(locator.geocode, min_delay_seconds=2) def locate_missing(s): - if pd.isna(s.City) or s.City == "CONFIDENTIAL": return None - loc = geocode([s.City, s.Country], geometry='wkt') + loc = geocode([s.City, s.Country], geometry="wkt") if loc is not None: logger.debug(f"Found:\t{loc}\nFor:\t{s['City']}, {s['Country']}\n") return f"POINT({loc.longitude} {loc.latitude})" @@ -42,14 +49,16 @@ def locate_missing_industrial_sites(df): return None missing = df.index[df.geom.isna()] - df.loc[missing, 'coordinates'] = df.loc[missing].apply(locate_missing, axis=1) + df.loc[missing, "coordinates"] = df.loc[missing].apply(locate_missing, axis=1) # report stats num_still_missing = df.coordinates.isna().sum() num_found = len(missing) - num_still_missing share_missing = len(missing) / len(df) * 100 share_still_missing = num_still_missing / len(df) * 100 - logger.warning(f"Found {num_found} missing locations. \nShare of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%.") + logger.warning( + f"Found {num_found} missing locations. \nShare of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%." + ) return df @@ -61,19 +70,23 @@ def prepare_hotmaps_database(regions): df = pd.read_csv(snakemake.input.hotmaps_industrial_database, sep=";", index_col=0) - df[["srid", "coordinates"]] = df.geom.str.split(';', expand=True) + df[["srid", "coordinates"]] = df.geom.str.split(";", expand=True) - if snakemake.config['industry'].get('hotmaps_locate_missing', False): + if snakemake.config["industry"].get("hotmaps_locate_missing", False): df = locate_missing_industrial_sites(df) # remove those sites without valid locations df.drop(df.index[df.coordinates.isna()], inplace=True) - df['coordinates'] = gpd.GeoSeries.from_wkt(df['coordinates']) + df["coordinates"] = gpd.GeoSeries.from_wkt(df["coordinates"]) - gdf = gpd.GeoDataFrame(df, geometry='coordinates', crs="EPSG:4326") + gdf = gpd.GeoDataFrame(df, geometry="coordinates", crs="EPSG:4326") - kws = dict(op="within") if parse(gpd.__version__) < Version('0.10') else dict(predicate="within") + kws = ( + dict(op="within") + if parse(gpd.__version__) < Version("0.10") + else dict(predicate="within") + ) gdf = gpd.sjoin(gdf, regions, how="inner", **kws) gdf.rename(columns={"index_right": "bus"}, inplace=True) @@ -83,7 +96,9 @@ def prepare_hotmaps_database(regions): def build_nodal_distribution_key(hotmaps, regions): - """Build nodal distribution keys for each sector.""" + """ + Build nodal distribution keys for each sector. + """ sectors = hotmaps.Subsector.unique() countries = regions.index.str[:2].unique() @@ -91,12 +106,11 @@ def build_nodal_distribution_key(hotmaps, regions): keys = pd.DataFrame(index=regions.index, columns=sectors, dtype=float) pop = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - pop['country'] = pop.index.str[:2] - ct_total = pop.total.groupby(pop['country']).sum() - keys['population'] = pop.total / pop.country.map(ct_total) + pop["country"] = pop.index.str[:2] + ct_total = pop.total.groupby(pop["country"]).sum() + keys["population"] = pop.total / pop.country.map(ct_total) for sector, country in product(sectors, countries): - regions_ct = regions.index[regions.index.str.contains(country)] facilities = hotmaps.query("country == @country and Subsector == @sector") @@ -106,12 +120,12 @@ def build_nodal_distribution_key(hotmaps, regions): if emissions.sum() == 0: key = pd.Series(1 / len(facilities), facilities.index) else: - #BEWARE: this is a strong assumption + # BEWARE: this is a strong assumption emissions = emissions.fillna(emissions.mean()) key = emissions / emissions.sum() - key = key.groupby(facilities.bus).sum().reindex(regions_ct, fill_value=0.) + key = key.groupby(facilities.bus).sum().reindex(regions_ct, fill_value=0.0) else: - key = keys.loc[regions_ct, 'population'] + key = keys.loc[regions_ct, "population"] keys.loc[regions_ct, sector] = key @@ -119,17 +133,18 @@ def build_nodal_distribution_key(hotmaps, regions): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_industrial_distribution_key', - simpl='', + "build_industrial_distribution_key", + simpl="", clusters=48, ) - logging.basicConfig(level=snakemake.config['logging_level']) + logging.basicConfig(level=snakemake.config["logging_level"]) - regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name') + regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name") hotmaps = prepare_hotmaps_database(regions) diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index a3fbf466..3bf6ceee 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -1,84 +1,116 @@ -"""Build industrial energy demand per country.""" +# -*- coding: utf-8 -*- +""" +Build industrial energy demand per country. +""" + +import multiprocessing as mp import pandas as pd -import multiprocessing as mp from tqdm import tqdm ktoe_to_twh = 0.011630 # name in JRC-IDEES Energy Balances -sector_sheets = {'Integrated steelworks': 'cisb', - 'Electric arc': 'cise', - 'Alumina production': 'cnfa', - 'Aluminium - primary production': 'cnfp', - 'Aluminium - secondary production': 'cnfs', - 'Other non-ferrous metals': 'cnfo', - 'Basic chemicals': 'cbch', - 'Other chemicals': 'coch', - 'Pharmaceutical products etc.': 'cpha', - 'Basic chemicals feedstock': 'cpch', - 'Cement': 'ccem', - 'Ceramics & other NMM': 'ccer', - 'Glass production': 'cgla', - 'Pulp production': 'cpul', - 'Paper production': 'cpap', - 'Printing and media reproduction': 'cprp', - 'Food, beverages and tobacco': 'cfbt', - 'Transport Equipment': 'ctre', - 'Machinery Equipment': 'cmae', - 'Textiles and leather': 'ctel', - 'Wood and wood products': 'cwwp', - 'Mining and quarrying': 'cmiq', - 'Construction': 'ccon', - 'Non-specified': 'cnsi', - } +sector_sheets = { + "Integrated steelworks": "cisb", + "Electric arc": "cise", + "Alumina production": "cnfa", + "Aluminium - primary production": "cnfp", + "Aluminium - secondary production": "cnfs", + "Other non-ferrous metals": "cnfo", + "Basic chemicals": "cbch", + "Other chemicals": "coch", + "Pharmaceutical products etc.": "cpha", + "Basic chemicals feedstock": "cpch", + "Cement": "ccem", + "Ceramics & other NMM": "ccer", + "Glass production": "cgla", + "Pulp production": "cpul", + "Paper production": "cpap", + "Printing and media reproduction": "cprp", + "Food, beverages and tobacco": "cfbt", + "Transport Equipment": "ctre", + "Machinery Equipment": "cmae", + "Textiles and leather": "ctel", + "Wood and wood products": "cwwp", + "Mining and quarrying": "cmiq", + "Construction": "ccon", + "Non-specified": "cnsi", +} -fuels = {'All Products': 'all', - 'Solid Fuels': 'solid', - 'Total petroleum products (without biofuels)': 'liquid', - 'Gases': 'gas', - 'Nuclear heat': 'heat', - 'Derived heat': 'heat', - 'Biomass and Renewable wastes': 'biomass', - 'Wastes (non-renewable)': 'waste', - 'Electricity': 'electricity' - } +fuels = { + "All Products": "all", + "Solid Fuels": "solid", + "Total petroleum products (without biofuels)": "liquid", + "Gases": "gas", + "Nuclear heat": "heat", + "Derived heat": "heat", + "Biomass and Renewable wastes": "biomass", + "Wastes (non-renewable)": "waste", + "Electricity": "electricity", +} -eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI', - 'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ', - 'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT'] +eu28 = [ + "FR", + "DE", + "GB", + "IT", + "ES", + "PL", + "SE", + "NL", + "BE", + "FI", + "DK", + "PT", + "RO", + "AT", + "BG", + "EE", + "GR", + "LV", + "CZ", + "HU", + "IE", + "SK", + "LT", + "HR", + "LU", + "SI", + "CY", + "MT", +] jrc_names = {"GR": "EL", "GB": "UK"} def industrial_energy_demand_per_country(country): - jrc_dir = snakemake.input.jrc jrc_country = jrc_names.get(country, country) - fn = f'{jrc_dir}/JRC-IDEES-2015_EnergyBalance_{jrc_country}.xlsx' + fn = f"{jrc_dir}/JRC-IDEES-2015_EnergyBalance_{jrc_country}.xlsx" sheets = list(sector_sheets.values()) df_dict = pd.read_excel(fn, sheet_name=sheets, index_col=0) def get_subsector_data(sheet): - df = df_dict[sheet][year].groupby(fuels).sum() - df["ammonia"] = 0. + df["ammonia"] = 0.0 - df['other'] = df['all'] - df.loc[df.index != 'all'].sum() + df["other"] = df["all"] - df.loc[df.index != "all"].sum() return df - df = pd.concat({sub: get_subsector_data(sheet) - for sub, sheet in sector_sheets.items()}, axis=1) + df = pd.concat( + {sub: get_subsector_data(sheet) for sub, sheet in sector_sheets.items()}, axis=1 + ) - sel = ['Mining and quarrying', 'Construction', 'Non-specified'] - df['Other Industrial Sectors'] = df[sel].sum(axis=1) - df['Basic chemicals'] += df['Basic chemicals feedstock'] + sel = ["Mining and quarrying", "Construction", "Non-specified"] + df["Other Industrial Sectors"] = df[sel].sum(axis=1) + df["Basic chemicals"] += df["Basic chemicals feedstock"] - df.drop(columns=sel+['Basic chemicals feedstock'], index='all', inplace=True) + df.drop(columns=sel + ["Basic chemicals feedstock"], index="all", inplace=True) df *= ktoe_to_twh @@ -86,41 +118,44 @@ def industrial_energy_demand_per_country(country): def add_ammonia_energy_demand(demand): - # MtNH3/a fn = snakemake.input.ammonia_production ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3 def get_ammonia_by_fuel(x): + fuels = { + "gas": config["MWh_CH4_per_tNH3_SMR"], + "electricity": config["MWh_elec_per_tNH3_SMR"], + } - fuels = {'gas': config['MWh_CH4_per_tNH3_SMR'], - 'electricity': config['MWh_elec_per_tNH3_SMR']} - - return pd.Series({k: x*v for k,v in fuels.items()}) + return pd.Series({k: x * v for k, v in fuels.items()}) ammonia_by_fuel = ammonia.apply(get_ammonia_by_fuel).T - ammonia_by_fuel = ammonia_by_fuel.unstack().reindex(index=demand.index, fill_value=0.) + ammonia_by_fuel = ammonia_by_fuel.unstack().reindex( + index=demand.index, fill_value=0.0 + ) - ammonia = pd.DataFrame({"ammonia": ammonia * config['MWh_NH3_per_tNH3']}).T + ammonia = pd.DataFrame({"ammonia": ammonia * config["MWh_NH3_per_tNH3"]}).T - demand['Ammonia'] = ammonia.unstack().reindex(index=demand.index, fill_value=0.) + demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0) - demand['Basic chemicals (without ammonia)'] = demand["Basic chemicals"] - ammonia_by_fuel + demand["Basic chemicals (without ammonia)"] = ( + demand["Basic chemicals"] - ammonia_by_fuel + ) - demand['Basic chemicals (without ammonia)'].clip(lower=0, inplace=True) + demand["Basic chemicals (without ammonia)"].clip(lower=0, inplace=True) - demand.drop(columns='Basic chemicals', inplace=True) + demand.drop(columns="Basic chemicals", inplace=True) return demand def add_non_eu28_industrial_energy_demand(demand): - # output in MtMaterial/a fn = snakemake.input.industrial_production_per_country production = pd.read_csv(fn, index_col=0) / 1e3 - #recombine HVC, Chlorine and Methanol to Basic chemicals (without ammonia) + # recombine HVC, Chlorine and Methanol to Basic chemicals (without ammonia) chemicals = ["HVC", "Chlorine", "Methanol"] production["Basic chemicals (without ammonia)"] = production[chemicals].sum(axis=1) production.drop(columns=chemicals, inplace=True) @@ -131,18 +166,22 @@ def add_non_eu28_industrial_energy_demand(demand): non_eu28 = production.index.symmetric_difference(eu28) - demand_non_eu28 = pd.concat({k: v * eu28_averages - for k, v in production.loc[non_eu28].iterrows()}) + demand_non_eu28 = pd.concat( + {k: v * eu28_averages for k, v in production.loc[non_eu28].iterrows()} + ) return pd.concat([demand, demand_non_eu28]) def industrial_energy_demand(countries): - nprocesses = snakemake.threads func = industrial_energy_demand_per_country - tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries), - desc="Build industrial energy demand") + tqdm_kwargs = dict( + ascii=False, + unit=" country", + total=len(countries), + desc="Build industrial energy demand", + ) with mp.Pool(processes=nprocesses) as pool: demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) @@ -151,13 +190,14 @@ def industrial_energy_demand(countries): return demand -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_industrial_energy_demand_per_country_today') - config = snakemake.config['industry'] - year = config.get('reference_year', 2015) + snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today") + + config = snakemake.config["industry"] + year = config.get("reference_year", 2015) demand = industrial_energy_demand(eu28) @@ -166,10 +206,10 @@ if __name__ == '__main__': demand = add_non_eu28_industrial_energy_demand(demand) # for format compatibility - demand = demand.stack(dropna=False).unstack(level=[0,2]) + demand = demand.stack(dropna=False).unstack(level=[0, 2]) # style and annotation - demand.index.name = 'TWh/a' + demand.index.name = "TWh/a" demand.sort_index(axis=1, inplace=True) fn = snakemake.output.industrial_energy_demand_per_country_today diff --git a/scripts/build_industrial_energy_demand_per_node.py b/scripts/build_industrial_energy_demand_per_node.py index d665f18e..10e10ab8 100644 --- a/scripts/build_industrial_energy_demand_per_node.py +++ b/scripts/build_industrial_energy_demand_per_node.py @@ -1,17 +1,21 @@ -"""Build industrial energy demand per node.""" +# -*- coding: utf-8 -*- +""" +Build industrial energy demand per node. +""" import pandas as pd -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_industrial_energy_demand_per_node', - simpl='', + "build_industrial_energy_demand_per_node", + simpl="", clusters=48, planning_horizons=2030, ) - + # import EU ratios df as csv fn = snakemake.input.industry_sector_ratios industry_sector_ratios = pd.read_csv(fn, index_col=0) @@ -26,14 +30,14 @@ if __name__ == '__main__': # final energy consumption per node and industry (TWh/a) nodal_df = nodal_production.dot(industry_sector_ratios.T) - + # convert GWh to TWh and ktCO2 to MtCO2 nodal_df *= 0.001 rename_sectors = { - 'elec': 'electricity', - 'biomass': 'solid biomass', - 'heat': 'low-temperature heat' + "elec": "electricity", + "biomass": "solid biomass", + "heat": "low-temperature heat", } nodal_df.rename(columns=rename_sectors, inplace=True) @@ -42,4 +46,4 @@ if __name__ == '__main__': nodal_df.index.name = "TWh/a (MtCO2/a)" fn = snakemake.output.industrial_energy_demand_per_node - nodal_df.to_csv(fn, float_format='%.2f') + nodal_df.to_csv(fn, float_format="%.2f") diff --git a/scripts/build_industrial_energy_demand_per_node_today.py b/scripts/build_industrial_energy_demand_per_node_today.py index 366e3a95..85b4b99a 100644 --- a/scripts/build_industrial_energy_demand_per_node_today.py +++ b/scripts/build_industrial_energy_demand_per_node_today.py @@ -1,33 +1,36 @@ -"""Build industrial energy demand per node.""" +# -*- coding: utf-8 -*- +""" +Build industrial energy demand per node. +""" -import pandas as pd -import numpy as np from itertools import product +import numpy as np +import pandas as pd + # map JRC/our sectors to hotmaps sector, where mapping exist sector_mapping = { - 'Electric arc': 'Iron and steel', - 'Integrated steelworks': 'Iron and steel', - 'DRI + Electric arc': 'Iron and steel', - 'Ammonia': 'Chemical industry', - 'Basic chemicals (without ammonia)': 'Chemical industry', - 'Other chemicals': 'Chemical industry', - 'Pharmaceutical products etc.': 'Chemical industry', - 'Cement': 'Cement', - 'Ceramics & other NMM': 'Non-metallic mineral products', - 'Glass production': 'Glass', - 'Pulp production': 'Paper and printing', - 'Paper production': 'Paper and printing', - 'Printing and media reproduction': 'Paper and printing', - 'Alumina production': 'Non-ferrous metals', - 'Aluminium - primary production': 'Non-ferrous metals', - 'Aluminium - secondary production': 'Non-ferrous metals', - 'Other non-ferrous metals': 'Non-ferrous metals', + "Electric arc": "Iron and steel", + "Integrated steelworks": "Iron and steel", + "DRI + Electric arc": "Iron and steel", + "Ammonia": "Chemical industry", + "Basic chemicals (without ammonia)": "Chemical industry", + "Other chemicals": "Chemical industry", + "Pharmaceutical products etc.": "Chemical industry", + "Cement": "Cement", + "Ceramics & other NMM": "Non-metallic mineral products", + "Glass production": "Glass", + "Pulp production": "Paper and printing", + "Paper production": "Paper and printing", + "Printing and media reproduction": "Paper and printing", + "Alumina production": "Non-ferrous metals", + "Aluminium - primary production": "Non-ferrous metals", + "Aluminium - secondary production": "Non-ferrous metals", + "Other non-ferrous metals": "Non-ferrous metals", } def build_nodal_industrial_energy_demand(): - fn = snakemake.input.industrial_energy_demand_per_country_today industrial_demand = pd.read_csv(fn, header=[0, 1], index_col=0) @@ -35,24 +38,23 @@ def build_nodal_industrial_energy_demand(): keys = pd.read_csv(fn, index_col=0) keys["country"] = keys.index.str[:2] - nodal_demand = pd.DataFrame(0., dtype=float, - index=keys.index, - columns=industrial_demand.index) - + nodal_demand = pd.DataFrame( + 0.0, dtype=float, index=keys.index, columns=industrial_demand.index + ) + countries = keys.country.unique() sectors = industrial_demand.columns.levels[1] for country, sector in product(countries, sectors): - buses = keys.index[keys.country == country] - mapping = sector_mapping.get(sector, 'population') + mapping = sector_mapping.get(sector, "population") key = keys.loc[buses, mapping] demand = industrial_demand[country, sector] - outer = pd.DataFrame(np.outer(key, demand), - index=key.index, - columns=demand.index) + outer = pd.DataFrame( + np.outer(key, demand), index=key.index, columns=demand.index + ) nodal_demand.loc[buses] += outer @@ -62,11 +64,12 @@ def build_nodal_industrial_energy_demand(): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_industrial_energy_demand_per_node_today', - simpl='', + "build_industrial_energy_demand_per_node_today", + simpl="", clusters=48, ) diff --git a/scripts/build_industrial_production_per_country.py b/scripts/build_industrial_production_per_country.py index 120d4373..80189321 100644 --- a/scripts/build_industrial_production_per_country.py +++ b/scripts/build_industrial_production_per_country.py @@ -1,132 +1,204 @@ -"""Build industrial production per country.""" +# -*- coding: utf-8 -*- +""" +Build industrial production per country. +""" import logging + logger = logging.getLogger(__name__) -import pandas as pd -import numpy as np import multiprocessing as mp -from tqdm import tqdm + +import numpy as np +import pandas as pd from helper import mute_print +from tqdm import tqdm tj_to_ktoe = 0.0238845 ktoe_to_twh = 0.01163 -sub_sheet_name_dict = {'Iron and steel': 'ISI', - 'Chemicals Industry': 'CHI', - 'Non-metallic mineral products': 'NMM', - 'Pulp, paper and printing': 'PPA', - 'Food, beverages and tobacco': 'FBT', - 'Non Ferrous Metals': 'NFM', - 'Transport Equipment': 'TRE', - 'Machinery Equipment': 'MAE', - 'Textiles and leather': 'TEL', - 'Wood and wood products': 'WWP', - 'Other Industrial Sectors': 'OIS'} +sub_sheet_name_dict = { + "Iron and steel": "ISI", + "Chemicals Industry": "CHI", + "Non-metallic mineral products": "NMM", + "Pulp, paper and printing": "PPA", + "Food, beverages and tobacco": "FBT", + "Non Ferrous Metals": "NFM", + "Transport Equipment": "TRE", + "Machinery Equipment": "MAE", + "Textiles and leather": "TEL", + "Wood and wood products": "WWP", + "Other Industrial Sectors": "OIS", +} -non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL'] +non_EU = ["NO", "CH", "ME", "MK", "RS", "BA", "AL"] jrc_names = {"GR": "EL", "GB": "UK"} -eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI', - 'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ', - 'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT'] +eu28 = [ + "FR", + "DE", + "GB", + "IT", + "ES", + "PL", + "SE", + "NL", + "BE", + "FI", + "DK", + "PT", + "RO", + "AT", + "BG", + "EE", + "GR", + "LV", + "CZ", + "HU", + "IE", + "SK", + "LT", + "HR", + "LU", + "SI", + "CY", + "MT", +] -sect2sub = {'Iron and steel': ['Electric arc', 'Integrated steelworks'], - 'Chemicals Industry': ['Basic chemicals', 'Other chemicals', 'Pharmaceutical products etc.'], - 'Non-metallic mineral products': ['Cement', 'Ceramics & other NMM', 'Glass production'], - 'Pulp, paper and printing': ['Pulp production', 'Paper production', 'Printing and media reproduction'], - 'Food, beverages and tobacco': ['Food, beverages and tobacco'], - 'Non Ferrous Metals': ['Alumina production', 'Aluminium - primary production', 'Aluminium - secondary production', 'Other non-ferrous metals'], - 'Transport Equipment': ['Transport Equipment'], - 'Machinery Equipment': ['Machinery Equipment'], - 'Textiles and leather': ['Textiles and leather'], - 'Wood and wood products': ['Wood and wood products'], - 'Other Industrial Sectors': ['Other Industrial Sectors']} +sect2sub = { + "Iron and steel": ["Electric arc", "Integrated steelworks"], + "Chemicals Industry": [ + "Basic chemicals", + "Other chemicals", + "Pharmaceutical products etc.", + ], + "Non-metallic mineral products": [ + "Cement", + "Ceramics & other NMM", + "Glass production", + ], + "Pulp, paper and printing": [ + "Pulp production", + "Paper production", + "Printing and media reproduction", + ], + "Food, beverages and tobacco": ["Food, beverages and tobacco"], + "Non Ferrous Metals": [ + "Alumina production", + "Aluminium - primary production", + "Aluminium - secondary production", + "Other non-ferrous metals", + ], + "Transport Equipment": ["Transport Equipment"], + "Machinery Equipment": ["Machinery Equipment"], + "Textiles and leather": ["Textiles and leather"], + "Wood and wood products": ["Wood and wood products"], + "Other Industrial Sectors": ["Other Industrial Sectors"], +} sub2sect = {v: k for k, vv in sect2sub.items() for v in vv} -fields = {'Electric arc': 'Electric arc', - 'Integrated steelworks': 'Integrated steelworks', - 'Basic chemicals': 'Basic chemicals (kt ethylene eq.)', - 'Other chemicals': 'Other chemicals (kt ethylene eq.)', - 'Pharmaceutical products etc.': 'Pharmaceutical products etc. (kt ethylene eq.)', - 'Cement': 'Cement (kt)', - 'Ceramics & other NMM': 'Ceramics & other NMM (kt bricks eq.)', - 'Glass production': 'Glass production (kt)', - 'Pulp production': 'Pulp production (kt)', - 'Paper production': 'Paper production (kt)', - 'Printing and media reproduction': 'Printing and media reproduction (kt paper eq.)', - 'Food, beverages and tobacco': 'Physical output (index)', - 'Alumina production': 'Alumina production (kt)', - 'Aluminium - primary production': 'Aluminium - primary production', - 'Aluminium - secondary production': 'Aluminium - secondary production', - 'Other non-ferrous metals': 'Other non-ferrous metals (kt lead eq.)', - 'Transport Equipment': 'Physical output (index)', - 'Machinery Equipment': 'Physical output (index)', - 'Textiles and leather': 'Physical output (index)', - 'Wood and wood products': 'Physical output (index)', - 'Other Industrial Sectors': 'Physical output (index)'} +fields = { + "Electric arc": "Electric arc", + "Integrated steelworks": "Integrated steelworks", + "Basic chemicals": "Basic chemicals (kt ethylene eq.)", + "Other chemicals": "Other chemicals (kt ethylene eq.)", + "Pharmaceutical products etc.": "Pharmaceutical products etc. (kt ethylene eq.)", + "Cement": "Cement (kt)", + "Ceramics & other NMM": "Ceramics & other NMM (kt bricks eq.)", + "Glass production": "Glass production (kt)", + "Pulp production": "Pulp production (kt)", + "Paper production": "Paper production (kt)", + "Printing and media reproduction": "Printing and media reproduction (kt paper eq.)", + "Food, beverages and tobacco": "Physical output (index)", + "Alumina production": "Alumina production (kt)", + "Aluminium - primary production": "Aluminium - primary production", + "Aluminium - secondary production": "Aluminium - secondary production", + "Other non-ferrous metals": "Other non-ferrous metals (kt lead eq.)", + "Transport Equipment": "Physical output (index)", + "Machinery Equipment": "Physical output (index)", + "Textiles and leather": "Physical output (index)", + "Wood and wood products": "Physical output (index)", + "Other Industrial Sectors": "Physical output (index)", +} -eb_names = {'NO': 'Norway', 'AL': 'Albania', 'BA': 'Bosnia and Herzegovina', - 'MK': 'FYR of Macedonia', 'GE': 'Georgia', 'IS': 'Iceland', - 'KO': 'Kosovo', 'MD': 'Moldova', 'ME': 'Montenegro', 'RS': 'Serbia', - 'UA': 'Ukraine', 'TR': 'Turkey', } +eb_names = { + "NO": "Norway", + "AL": "Albania", + "BA": "Bosnia and Herzegovina", + "MK": "FYR of Macedonia", + "GE": "Georgia", + "IS": "Iceland", + "KO": "Kosovo", + "MD": "Moldova", + "ME": "Montenegro", + "RS": "Serbia", + "UA": "Ukraine", + "TR": "Turkey", +} -eb_sectors = {'Iron & steel industry': 'Iron and steel', - 'Chemical and Petrochemical industry': 'Chemicals Industry', - 'Non-ferrous metal industry': 'Non-metallic mineral products', - 'Paper, Pulp and Print': 'Pulp, paper and printing', - 'Food and Tabacco': 'Food, beverages and tobacco', - 'Non-metallic Minerals (Glass, pottery & building mat. Industry)': 'Non Ferrous Metals', - 'Transport Equipment': 'Transport Equipment', - 'Machinery': 'Machinery Equipment', - 'Textile and Leather': 'Textiles and leather', - 'Wood and Wood Products': 'Wood and wood products', - 'Non-specified (Industry)': 'Other Industrial Sectors'} +eb_sectors = { + "Iron & steel industry": "Iron and steel", + "Chemical and Petrochemical industry": "Chemicals Industry", + "Non-ferrous metal industry": "Non-metallic mineral products", + "Paper, Pulp and Print": "Pulp, paper and printing", + "Food and Tabacco": "Food, beverages and tobacco", + "Non-metallic Minerals (Glass, pottery & building mat. Industry)": "Non Ferrous Metals", + "Transport Equipment": "Transport Equipment", + "Machinery": "Machinery Equipment", + "Textile and Leather": "Textiles and leather", + "Wood and Wood Products": "Wood and wood products", + "Non-specified (Industry)": "Other Industrial Sectors", +} # TODO: this should go in a csv in `data` # Annual energy consumption in Switzerland by sector in 2015 (in TJ) # From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat # http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775 -e_switzerland = pd.Series({'Iron and steel': 7889., - 'Chemicals Industry': 26871., - 'Non-metallic mineral products': 15513.+3820., - 'Pulp, paper and printing': 12004., - 'Food, beverages and tobacco': 17728., - 'Non Ferrous Metals': 3037., - 'Transport Equipment': 14993., - 'Machinery Equipment': 4724., - 'Textiles and leather': 1742., - 'Wood and wood products': 0., - 'Other Industrial Sectors': 10825., - 'current electricity': 53760.}) +e_switzerland = pd.Series( + { + "Iron and steel": 7889.0, + "Chemicals Industry": 26871.0, + "Non-metallic mineral products": 15513.0 + 3820.0, + "Pulp, paper and printing": 12004.0, + "Food, beverages and tobacco": 17728.0, + "Non Ferrous Metals": 3037.0, + "Transport Equipment": 14993.0, + "Machinery Equipment": 4724.0, + "Textiles and leather": 1742.0, + "Wood and wood products": 0.0, + "Other Industrial Sectors": 10825.0, + "current electricity": 53760.0, + } +) + def find_physical_output(df): - start = np.where(df.index.str.contains('Physical output', na=''))[0][0] + start = np.where(df.index.str.contains("Physical output", na=""))[0][0] empty_row = np.where(df.index.isnull())[0] end = empty_row[np.argmax(empty_row > start)] return slice(start, end) def get_energy_ratio(country): - - if country == 'CH': + if country == "CH": e_country = e_switzerland * tj_to_ktoe else: # estimate physical output, energy consumption in the sector and country fn = f"{eurostat_dir}/{eb_names[country]}.XLSX" with mute_print(): - df = pd.read_excel(fn, sheet_name='2016', index_col=2, - header=0, skiprows=1).squeeze('columns') - e_country = df.loc[eb_sectors.keys( - ), 'Total all products'].rename(eb_sectors) + df = pd.read_excel( + fn, sheet_name="2016", index_col=2, header=0, skiprows=1 + ).squeeze("columns") + e_country = df.loc[eb_sectors.keys(), "Total all products"].rename(eb_sectors) - fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx' + fn = f"{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx" with mute_print(): - df = pd.read_excel(fn, sheet_name='Ind_Summary', - index_col=0, header=0).squeeze('columns') + df = pd.read_excel(fn, sheet_name="Ind_Summary", index_col=0, header=0).squeeze( + "columns" + ) assert df.index[48] == "by sector" year_i = df.columns.get_loc(year) @@ -139,15 +211,14 @@ def get_energy_ratio(country): def industry_production_per_country(country): - def get_sector_data(sector, country): - jrc_country = jrc_names.get(country, country) - fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_{jrc_country}.xlsx' + fn = f"{jrc_dir}/JRC-IDEES-2015_Industry_{jrc_country}.xlsx" sheet = sub_sheet_name_dict[sector] with mute_print(): - df = pd.read_excel(fn, sheet_name=sheet, - index_col=0, header=0).squeeze('columns') + df = pd.read_excel(fn, sheet_name=sheet, index_col=0, header=0).squeeze( + "columns" + ) year_i = df.columns.get_loc(year) df = df.iloc[find_physical_output(df), year_i] @@ -169,11 +240,14 @@ def industry_production_per_country(country): def industry_production(countries): - nprocesses = snakemake.threads func = industry_production_per_country - tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries), - desc="Build industry production") + tqdm_kwargs = dict( + ascii=False, + unit=" country", + total=len(countries), + desc="Build industry production", + ) with mp.Pool(processes=nprocesses) as pool: demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) @@ -185,7 +259,9 @@ def industry_production(countries): def separate_basic_chemicals(demand): - """Separate basic chemicals into ammonia, chlorine, methanol and HVC.""" + """ + Separate basic chemicals into ammonia, chlorine, methanol and HVC. + """ ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0) @@ -194,14 +270,14 @@ def separate_basic_chemicals(demand): logger.info(f"Following countries have no ammonia demand: {missing.tolist()}") - demand["Ammonia"] = 0. + demand["Ammonia"] = 0.0 demand.loc[there, "Ammonia"] = ammonia.loc[there, str(year)] demand["Basic chemicals"] -= demand["Ammonia"] # EE, HR and LT got negative demand through subtraction - poor data - demand['Basic chemicals'].clip(lower=0., inplace=True) + demand["Basic chemicals"].clip(lower=0.0, inplace=True) # assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum() @@ -211,16 +287,18 @@ def separate_basic_chemicals(demand): demand.drop(columns=["Basic chemicals"], inplace=True) -if __name__ == '__main__': - if 'snakemake' not in globals(): - from helper import mock_snakemake - snakemake = mock_snakemake('build_industrial_production_per_country') - logging.basicConfig(level=snakemake.config['logging_level']) +if __name__ == "__main__": + if "snakemake" not in globals(): + from helper import mock_snakemake + + snakemake = mock_snakemake("build_industrial_production_per_country") + + logging.basicConfig(level=snakemake.config["logging_level"]) countries = non_EU + eu28 - year = snakemake.config['industry']['reference_year'] + year = snakemake.config["industry"]["reference_year"] config = snakemake.config["industry"] @@ -232,4 +310,4 @@ if __name__ == '__main__': separate_basic_chemicals(demand) fn = snakemake.output.industrial_production_per_country - demand.to_csv(fn, float_format='%.2f') + demand.to_csv(fn, float_format="%.2f") diff --git a/scripts/build_industrial_production_per_country_tomorrow.py b/scripts/build_industrial_production_per_country_tomorrow.py index ccf31839..05845f06 100644 --- a/scripts/build_industrial_production_per_country_tomorrow.py +++ b/scripts/build_industrial_production_per_country_tomorrow.py @@ -1,13 +1,16 @@ -"""Build future industrial production per country.""" +# -*- coding: utf-8 -*- +""" +Build future industrial production per country. +""" import pandas as pd - from prepare_sector_network import get -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_industrial_production_per_country_tomorrow') + + snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow") config = snakemake.config["industry"] @@ -24,12 +27,20 @@ if __name__ == '__main__': int_steel = production["Integrated steelworks"].sum() fraction_persistent_primary = st_primary_fraction * total_steel.sum() / int_steel - dri = dri_fraction * fraction_persistent_primary * production["Integrated steelworks"] + dri = ( + dri_fraction * fraction_persistent_primary * production["Integrated steelworks"] + ) production.insert(2, "DRI + Electric arc", dri) - not_dri = (1 - dri_fraction) - production["Integrated steelworks"] = not_dri * fraction_persistent_primary * production["Integrated steelworks"] - production["Electric arc"] = total_steel - production["DRI + Electric arc"] - production["Integrated steelworks"] + not_dri = 1 - dri_fraction + production["Integrated steelworks"] = ( + not_dri * fraction_persistent_primary * production["Integrated steelworks"] + ) + production["Electric arc"] = ( + total_steel + - production["DRI + Electric arc"] + - production["Integrated steelworks"] + ) keys = ["Aluminium - primary production", "Aluminium - secondary production"] total_aluminium = production[keys].sum(axis=1) @@ -38,15 +49,23 @@ if __name__ == '__main__': key_sec = "Aluminium - secondary production" al_primary_fraction = get(config["Al_primary_fraction"], investment_year) - fraction_persistent_primary = al_primary_fraction * total_aluminium.sum() / production[key_pri].sum() + fraction_persistent_primary = ( + al_primary_fraction * total_aluminium.sum() / production[key_pri].sum() + ) production[key_pri] = fraction_persistent_primary * production[key_pri] production[key_sec] = total_aluminium - production[key_pri] - production["HVC (mechanical recycling)"] = get(config["HVC_mechanical_recycling_fraction"], investment_year) * production["HVC"] - production["HVC (chemical recycling)"] = get(config["HVC_chemical_recycling_fraction"], investment_year) * production["HVC"] + production["HVC (mechanical recycling)"] = ( + get(config["HVC_mechanical_recycling_fraction"], investment_year) + * production["HVC"] + ) + production["HVC (chemical recycling)"] = ( + get(config["HVC_chemical_recycling_fraction"], investment_year) + * production["HVC"] + ) - production["HVC"] *= get(config['HVC_primary_fraction'], investment_year) + production["HVC"] *= get(config["HVC_primary_fraction"], investment_year) fn = snakemake.output.industrial_production_per_country_tomorrow - production.to_csv(fn, float_format='%.2f') + production.to_csv(fn, float_format="%.2f") diff --git a/scripts/build_industrial_production_per_node.py b/scripts/build_industrial_production_per_node.py index 4ceffee9..7ddd21be 100644 --- a/scripts/build_industrial_production_per_node.py +++ b/scripts/build_industrial_production_per_node.py @@ -1,36 +1,39 @@ -"""Build industrial production per node.""" +# -*- coding: utf-8 -*- +""" +Build industrial production per node. +""" + +from itertools import product import pandas as pd -from itertools import product # map JRC/our sectors to hotmaps sector, where mapping exist sector_mapping = { - 'Electric arc': 'Iron and steel', - 'Integrated steelworks': 'Iron and steel', - 'DRI + Electric arc': 'Iron and steel', - 'Ammonia': 'Chemical industry', - 'HVC': 'Chemical industry', - 'HVC (mechanical recycling)': 'Chemical industry', - 'HVC (chemical recycling)': 'Chemical industry', - 'Methanol': 'Chemical industry', - 'Chlorine': 'Chemical industry', - 'Other chemicals': 'Chemical industry', - 'Pharmaceutical products etc.': 'Chemical industry', - 'Cement': 'Cement', - 'Ceramics & other NMM': 'Non-metallic mineral products', - 'Glass production': 'Glass', - 'Pulp production': 'Paper and printing', - 'Paper production': 'Paper and printing', - 'Printing and media reproduction': 'Paper and printing', - 'Alumina production': 'Non-ferrous metals', - 'Aluminium - primary production': 'Non-ferrous metals', - 'Aluminium - secondary production': 'Non-ferrous metals', - 'Other non-ferrous metals': 'Non-ferrous metals', + "Electric arc": "Iron and steel", + "Integrated steelworks": "Iron and steel", + "DRI + Electric arc": "Iron and steel", + "Ammonia": "Chemical industry", + "HVC": "Chemical industry", + "HVC (mechanical recycling)": "Chemical industry", + "HVC (chemical recycling)": "Chemical industry", + "Methanol": "Chemical industry", + "Chlorine": "Chemical industry", + "Other chemicals": "Chemical industry", + "Pharmaceutical products etc.": "Chemical industry", + "Cement": "Cement", + "Ceramics & other NMM": "Non-metallic mineral products", + "Glass production": "Glass", + "Pulp production": "Paper and printing", + "Paper production": "Paper and printing", + "Printing and media reproduction": "Paper and printing", + "Alumina production": "Non-ferrous metals", + "Aluminium - primary production": "Non-ferrous metals", + "Aluminium - secondary production": "Non-ferrous metals", + "Other non-ferrous metals": "Non-ferrous metals", } def build_nodal_industrial_production(): - fn = snakemake.input.industrial_production_per_country_tomorrow industrial_production = pd.read_csv(fn, index_col=0) @@ -38,29 +41,32 @@ def build_nodal_industrial_production(): keys = pd.read_csv(fn, index_col=0) keys["country"] = keys.index.str[:2] - nodal_production = pd.DataFrame(index=keys.index, - columns=industrial_production.columns, - dtype=float) + nodal_production = pd.DataFrame( + index=keys.index, columns=industrial_production.columns, dtype=float + ) countries = keys.country.unique() sectors = industrial_production.columns for country, sector in product(countries, sectors): - buses = keys.index[keys.country == country] mapping = sector_mapping.get(sector, "population") key = keys.loc[buses, mapping] - nodal_production.loc[buses, sector] = industrial_production.at[country, sector] * key + nodal_production.loc[buses, sector] = ( + industrial_production.at[country, sector] * key + ) nodal_production.to_csv(snakemake.output.industrial_production_per_node) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_industrial_production_per_node', - simpl='', + + snakemake = mock_snakemake( + "build_industrial_production_per_node", + simpl="", clusters=48, ) diff --git a/scripts/build_industry_sector_ratios.py b/scripts/build_industry_sector_ratios.py index 32a2634e..c62ee9dd 100644 --- a/scripts/build_industry_sector_ratios.py +++ b/scripts/build_industry_sector_ratios.py @@ -1,4 +1,7 @@ -"""Build industry sector ratios.""" +# -*- coding: utf-8 -*- +""" +Build industry sector ratios. +""" import pandas as pd from helper import mute_print @@ -68,7 +71,6 @@ index = [ def load_idees_data(sector, country="EU28"): - suffixes = {"out": "", "fec": "_fec", "ued": "_ued", "emi": "_emi"} sheets = {k: sheet_names[sector] + v for k, v in suffixes.items()} @@ -91,7 +93,6 @@ def load_idees_data(sector, country="EU28"): def iron_and_steel(): - # There are two different approaches to produce iron and steel: # i.e., integrated steelworks and electric arc. # Electric arc approach has higher efficiency and relies more on electricity. @@ -602,7 +603,6 @@ def chemicals_industry(): def nonmetalic_mineral_products(): - # This includes cement, ceramic and glass production. # This includes process emissions related to the fabrication of clinker. @@ -789,7 +789,6 @@ def nonmetalic_mineral_products(): def pulp_paper_printing(): - # Pulp, paper and printing can be completely electrified. # There are no process emissions associated to this sector. @@ -942,7 +941,6 @@ def pulp_paper_printing(): def food_beverages_tobacco(): - # Food, beverages and tobaco can be completely electrified. # There are no process emissions associated to this sector. @@ -1002,7 +1000,6 @@ def food_beverages_tobacco(): def non_ferrous_metals(): - sector = "Non Ferrous Metals" idees = load_idees_data(sector) @@ -1205,7 +1202,6 @@ def non_ferrous_metals(): def transport_equipment(): - sector = "Transport Equipment" idees = load_idees_data(sector) @@ -1256,7 +1252,6 @@ def transport_equipment(): def machinery_equipment(): - sector = "Machinery Equipment" idees = load_idees_data(sector) @@ -1309,7 +1304,6 @@ def machinery_equipment(): def textiles_and_leather(): - sector = "Textiles and leather" idees = load_idees_data(sector) @@ -1358,7 +1352,6 @@ def textiles_and_leather(): def wood_and_wood_products(): - sector = "Wood and wood products" idees = load_idees_data(sector) @@ -1404,7 +1397,6 @@ def wood_and_wood_products(): def other_industrial_sectors(): - sector = "Other Industrial Sectors" idees = load_idees_data(sector) @@ -1465,9 +1457,10 @@ def other_industrial_sectors(): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_industry_sector_ratios') + + snakemake = mock_snakemake("build_industry_sector_ratios") # TODO make config option year = 2015 diff --git a/scripts/build_population_layouts.py b/scripts/build_population_layouts.py index 96a32b08..d624349f 100644 --- a/scripts/build_population_layouts.py +++ b/scripts/build_population_layouts.py @@ -1,29 +1,35 @@ -"""Build mapping between grid cells and population (total, urban, rural)""" +# -*- coding: utf-8 -*- +""" +Build mapping between grid cells and population (total, urban, rural) +""" import logging + logger = logging.getLogger(__name__) import multiprocessing as mp + import atlite +import geopandas as gpd import numpy as np import pandas as pd import xarray as xr -import geopandas as gpd -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_population_layouts') - logging.basicConfig(level=snakemake.config['logging_level']) + snakemake = mock_snakemake("build_population_layouts") - cutout = atlite.Cutout(snakemake.config['atlite']['cutout']) + logging.basicConfig(level=snakemake.config["logging_level"]) + + cutout = atlite.Cutout(snakemake.config["atlite"]["cutout"]) grid_cells = cutout.grid.geometry # nuts3 has columns country, gdp, pop, geometry # population is given in dimensions of 1e3=k - nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') + nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index") # Indicator matrix NUTS3 -> grid cells I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) @@ -34,9 +40,12 @@ if __name__ == '__main__': countries = np.sort(nuts3.country.unique()) - urban_fraction = pd.read_csv(snakemake.input.urban_percent, - header=None, index_col=0, - names=['fraction']).squeeze() / 100. + urban_fraction = ( + pd.read_csv( + snakemake.input.urban_percent, header=None, index_col=0, names=["fraction"] + ).squeeze() + / 100.0 + ) # fill missing Balkans values missing = ["AL", "ME", "MK"] @@ -46,7 +55,7 @@ if __name__ == '__main__': urban_fraction = pd.concat([urban_fraction, fill_values]) # population in each grid cell - pop_cells = pd.Series(I.dot(nuts3['pop'])) + pop_cells = pd.Series(I.dot(nuts3["pop"])) # in km^2 cell_areas = grid_cells.to_crs(3035).area / 1e6 @@ -55,13 +64,15 @@ if __name__ == '__main__': density_cells = pop_cells / cell_areas # rural or urban population in grid cell - pop_rural = pd.Series(0., density_cells.index) - pop_urban = pd.Series(0., density_cells.index) + pop_rural = pd.Series(0.0, density_cells.index) + pop_urban = pd.Series(0.0, density_cells.index) for ct in countries: - logger.debug(f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%") + logger.debug( + f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%" + ) - indicator_nuts3_ct = nuts3.country.apply(lambda x: 1. if x == ct else 0.) + indicator_nuts3_ct = nuts3.country.apply(lambda x: 1.0 if x == ct else 0.0) indicator_cells_ct = pd.Series(Iinv.T.dot(indicator_nuts3_ct)) @@ -70,7 +81,7 @@ if __name__ == '__main__': pop_cells_ct = indicator_cells_ct * pop_cells # correct for imprecision of Iinv*I - pop_ct = nuts3.loc[nuts3.country==ct,'pop'].sum() + pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum() pop_cells_ct *= pop_ct / pop_cells_ct.sum() # The first low density grid cells to reach rural fraction are rural @@ -80,20 +91,19 @@ if __name__ == '__main__': pop_ct_rural_b = asc_density_cumsum < rural_fraction_ct pop_ct_urban_b = ~pop_ct_rural_b - pop_ct_rural_b[indicator_cells_ct == 0.] = False - pop_ct_urban_b[indicator_cells_ct == 0.] = False + pop_ct_rural_b[indicator_cells_ct == 0.0] = False + pop_ct_urban_b[indicator_cells_ct == 0.0] = False - pop_rural += pop_cells_ct.where(pop_ct_rural_b, 0.) - pop_urban += pop_cells_ct.where(pop_ct_urban_b, 0.) + pop_rural += pop_cells_ct.where(pop_ct_rural_b, 0.0) + pop_urban += pop_cells_ct.where(pop_ct_urban_b, 0.0) pop_cells = {"total": pop_cells} pop_cells["rural"] = pop_rural pop_cells["urban"] = pop_urban for key, pop in pop_cells.items(): - - ycoords = ('y', cutout.coords['y'].data) - xcoords = ('x', cutout.coords['x'].data) + ycoords = ("y", cutout.coords["y"].data) + xcoords = ("x", cutout.coords["x"].data) values = pop.values.reshape(cutout.shape) layout = xr.DataArray(values, [ycoords, xcoords]) diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 938983d5..9b4fb2b1 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -1,13 +1,17 @@ -"""Build population-weighted energy totals.""" +# -*- coding: utf-8 -*- +""" +Build population-weighted energy totals. +""" import pandas as pd -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_population_weighted_energy_totals', - simpl='', + "build_population_weighted_energy_totals", + simpl="", clusters=48, ) @@ -15,7 +19,7 @@ if __name__ == '__main__': energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=0) - nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.) + nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.0) nodal_energy_totals.index = pop_layout.index nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py index 7262e839..1d6e954a 100644 --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ -Created on Fri Jan 22 10:36:39 2021 +Created on Fri Jan 22 10:36:39 2021. This script should calculate the space heating savings through better insulation of the thermal envelope of a building and corresponding costs for @@ -73,8 +73,7 @@ import xarray as xr k = 0.035 # strength of relative retrofitting depending on the component # determined by historical data of insulation thickness for retrofitting -l_weight = pd.DataFrame({"weight": [1.95, 1.48, 1.]}, - index=["Roof", "Wall", "Floor"]) +l_weight = pd.DataFrame({"weight": [1.95, 1.48, 1.0]}, index=["Roof", "Wall", "Floor"]) # standard room height [m], used to calculate heat transfer by ventilation h_room = 2.5 @@ -91,34 +90,40 @@ alpha_H_0 = 0.8 # parameter for solar heat load during heating season ------------------------- # tabular standard values table p.8 in documentation -external_shading = 0.6 # vertical orientation: fraction of window area shaded [-] +external_shading = 0.6 # vertical orientation: fraction of window area shaded [-] frame_area_fraction = 0.3 # fraction of frame area of window [-] -non_perpendicular = 0.9 # reduction factor, considering radiation non perpendicular to the glazing[-] -solar_energy_transmittance = 0.5 # solar energy transmiitance for radiation perpecidular to the glazing [-] +non_perpendicular = ( + 0.9 # reduction factor, considering radiation non perpendicular to the glazing[-] +) +solar_energy_transmittance = ( + 0.5 # solar energy transmiitance for radiation perpecidular to the glazing [-] +) # solar global radiation [kWh/(m^2a)] -solar_global_radiation = pd.Series([246, 401, 246, 148], - index=["east", "south", "west", "north"], - name="solar_global_radiation [kWh/(m^2a)]") +solar_global_radiation = pd.Series( + [246, 401, 246, 148], + index=["east", "south", "west", "north"], + name="solar_global_radiation [kWh/(m^2a)]", +) # threshold temperature for heating [Celsius] -------------------------------- t_threshold = 15 # rename sectors # rename residential sub sectors -rename_sectors = {'Single family- Terraced houses': "SFH", - 'Multifamily houses': "MFH", - 'Appartment blocks': "AB"} +rename_sectors = { + "Single family- Terraced houses": "SFH", + "Multifamily houses": "MFH", + "Appartment blocks": "AB", +} # additional insulation thickness, determines maximum possible savings [m] -l_strength = [ - "0.07","0.075", "0.08", "0.1", "0.15", - "0.22", "0.24", "0.26" - ] +l_strength = ["0.07", "0.075", "0.08", "0.1", "0.15", "0.22", "0.24", "0.26"] # (ii) --- FUNCTIONS ---------------------------------------------------------- + def get_average_temperature_during_heating_season(temperature, t_threshold=15): """ returns average temperature during heating season @@ -143,74 +148,99 @@ def prepare_building_stock_data(): """ - building_data = pd.read_csv(snakemake.input.building_stock, - usecols=list(range(13))) + building_data = pd.read_csv(snakemake.input.building_stock, usecols=list(range(13))) # standardize data building_data["type"].replace( - {'Covered area: heated [Mm²]': 'Heated area [Mm²]', - 'Windows ': 'Window', - 'Windows': 'Window', - 'Walls ': 'Wall', - 'Walls': 'Wall', - 'Roof ': 'Roof', - 'Floor ': 'Floor', - }, inplace=True) + { + "Covered area: heated [Mm²]": "Heated area [Mm²]", + "Windows ": "Window", + "Windows": "Window", + "Walls ": "Wall", + "Walls": "Wall", + "Roof ": "Roof", + "Floor ": "Floor", + }, + inplace=True, + ) building_data.country_code = building_data.country_code.str.upper() - building_data["subsector"].replace({'Hotels and Restaurants': - 'Hotels and restaurants'}, inplace=True) - building_data["sector"].replace({'Residential sector': 'residential', - 'Service sector': 'services'}, - inplace=True) + building_data["subsector"].replace( + {"Hotels and Restaurants": "Hotels and restaurants"}, inplace=True + ) + building_data["sector"].replace( + {"Residential sector": "residential", "Service sector": "services"}, + inplace=True, + ) # extract u-values - u_values = building_data[(building_data.feature.str.contains("U-values")) - & (building_data.subsector != "Total")] + u_values = building_data[ + (building_data.feature.str.contains("U-values")) + & (building_data.subsector != "Total") + ] components = list(u_values.type.unique()) country_iso_dic = building_data.set_index("country")["country_code"].to_dict() # add missing /rename countries - country_iso_dic.update({'Norway': 'NO', - 'Iceland': 'IS', - 'Montenegro': 'ME', - 'Serbia': 'RS', - 'Albania': 'AL', - 'United Kingdom': 'GB', - 'Bosnia and Herzegovina': 'BA', - 'Switzerland': 'CH'}) + country_iso_dic.update( + { + "Norway": "NO", + "Iceland": "IS", + "Montenegro": "ME", + "Serbia": "RS", + "Albania": "AL", + "United Kingdom": "GB", + "Bosnia and Herzegovina": "BA", + "Switzerland": "CH", + } + ) # heated floor area ---------------------------------------------------------- - area = building_data[(building_data.type == 'Heated area [Mm²]') & - (building_data.subsector != "Total")] + area = building_data[ + (building_data.type == "Heated area [Mm²]") + & (building_data.subsector != "Total") + ] area_tot = area.groupby(["country", "sector"]).sum() - area = pd.concat([area, area.apply(lambda x: x.value / - area_tot.value.loc[(x.country, x.sector)], - axis=1).rename("weight")],axis=1) - area = area.groupby(['country', 'sector', 'subsector', 'bage']).sum() + area = pd.concat( + [ + area, + area.apply( + lambda x: x.value / area_tot.value.loc[(x.country, x.sector)], axis=1 + ).rename("weight"), + ], + axis=1, + ) + area = area.groupby(["country", "sector", "subsector", "bage"]).sum() area_tot.rename(index=country_iso_dic, inplace=True) # add for some missing countries floor area from other data sources - area_missing = pd.read_csv(snakemake.input.floor_area_missing, - index_col=[0, 1], usecols=[0, 1, 2, 3], - encoding='ISO-8859-1') + area_missing = pd.read_csv( + snakemake.input.floor_area_missing, + index_col=[0, 1], + usecols=[0, 1, 2, 3], + encoding="ISO-8859-1", + ) area_tot = area_tot.append(area_missing.unstack(level=-1).dropna().stack()) - area_tot = area_tot.loc[~area_tot.index.duplicated(keep='last')] + area_tot = area_tot.loc[~area_tot.index.duplicated(keep="last")] # for still missing countries calculate floor area by population size pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) pop_layout["ct"] = pop_layout.index.str[:2] ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum() - area_per_pop = area_tot.unstack().reindex(index=ct_total.index).apply(lambda x: x / ct_total[x.index]) + area_per_pop = ( + area_tot.unstack() + .reindex(index=ct_total.index) + .apply(lambda x: x / ct_total[x.index]) + ) missing_area_ct = ct_total.index.difference(area_tot.index.levels[0]) for ct in missing_area_ct.intersection(ct_total.index): averaged_data = pd.DataFrame( - area_per_pop.value.reindex(map_for_missings[ct]).mean() - * ct_total[ct], - columns=["value"]) + area_per_pop.value.reindex(map_for_missings[ct]).mean() * ct_total[ct], + columns=["value"], + ) index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()]) averaged_data.index = index averaged_data["estimated"] = 1 @@ -221,8 +251,7 @@ def prepare_building_stock_data(): # u_values for Poland are missing -> take them from eurostat ----------- u_values_PL = pd.read_csv(snakemake.input.u_values_PL) - u_values_PL.component.replace({"Walls":"Wall", "Windows": "Window"}, - inplace=True) + u_values_PL.component.replace({"Walls": "Wall", "Windows": "Window"}, inplace=True) area_PL = area.loc["Poland"].reset_index() data_PL = pd.DataFrame(columns=u_values.columns, index=area_PL.index) data_PL["country"] = "Poland" @@ -235,38 +264,44 @@ def prepare_building_stock_data(): data_PL_final = pd.DataFrame() for component in components: data_PL["type"] = component - data_PL["value"] = data_PL.apply(lambda x: u_values_PL[(u_values_PL.component==component) - & (u_values_PL.sector==x["sector"])] - [x["bage"]].iloc[0], axis=1) + data_PL["value"] = data_PL.apply( + lambda x: u_values_PL[ + (u_values_PL.component == component) + & (u_values_PL.sector == x["sector"]) + ][x["bage"]].iloc[0], + axis=1, + ) data_PL_final = data_PL_final.append(data_PL) - u_values = pd.concat([u_values, - data_PL_final]).reset_index(drop=True) + u_values = pd.concat([u_values, data_PL_final]).reset_index(drop=True) # clean data --------------------------------------------------------------- # smallest possible today u values for windows 0.8 (passive house standard) # maybe the u values for the glass and not the whole window including frame # for those types assumed in the dataset - u_values.loc[(u_values.type=="Window") & (u_values.value<0.8), "value"] = 0.8 + u_values.loc[(u_values.type == "Window") & (u_values.value < 0.8), "value"] = 0.8 # drop unnecessary columns - u_values.drop(['topic', 'feature','detail', 'estimated','unit'], - axis=1, inplace=True, errors="ignore") - + u_values.drop( + ["topic", "feature", "detail", "estimated", "unit"], + axis=1, + inplace=True, + errors="ignore", + ) u_values.subsector.replace(rename_sectors, inplace=True) u_values.btype.replace(rename_sectors, inplace=True) # for missing weighting of surfaces of building types assume MFH u_values["assumed_subsector"] = u_values.subsector - u_values.loc[~u_values.subsector.isin(rename_sectors.values()), - "assumed_subsector"] = 'MFH' + u_values.loc[ + ~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector" + ] = "MFH" - u_values.country_code.replace({"UK":"GB"}, inplace=True) - u_values.bage.replace({'Berfore 1945':'Before 1945'}, inplace=True) + u_values.country_code.replace({"UK": "GB"}, inplace=True) + u_values.bage.replace({"Berfore 1945": "Before 1945"}, inplace=True) u_values = u_values[~u_values.bage.isna()] - u_values.set_index(["country_code", "subsector", "bage", "type"], - inplace=True) + u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True) # only take in config.yaml specified countries into account countries = ct_total.index @@ -275,35 +310,45 @@ def prepare_building_stock_data(): return u_values, country_iso_dic, countries, area_tot, area - def prepare_building_topology(u_values, same_building_topology=True): """ - reads in typical building topologies (e.g. average surface of building elements) - and typical losses through thermal bridging and air ventilation + Reads in typical building topologies (e.g. average surface of building + elements) and typical losses through thermal bridging and air ventilation. """ - data_tabula = pd.read_csv(snakemake.input.data_tabula, - skiprows=lambda x: x in range(1,11), - low_memory=False).iloc[:2974] + data_tabula = pd.read_csv( + snakemake.input.data_tabula, + skiprows=lambda x: x in range(1, 11), + low_memory=False, + ).iloc[:2974] - parameters = ["Code_Country", - # building type (SFH/MFH/AB) - "Code_BuildingSizeClass", - # time period of build year - "Year1_Building", "Year2_Building", - # areas [m^2] - "A_C_Ref", # conditioned area, internal - "A_Roof_1", "A_Roof_2", "A_Wall_1", "A_Wall_2", - "A_Floor_1", "A_Floor_2", "A_Window_1", "A_Window_2", - # for air ventilation loses [1/h] - "n_air_use", "n_air_infiltration", - # for losses due to thermal bridges, standard values [W/(m^2K)] - "delta_U_ThermalBridging", - # floor area related heat transfer coefficient by transmission [-] - "F_red_temp", - # refurbishment state [1: not refurbished, 2: moderate ,3: strong refurbishment] - 'Number_BuildingVariant', - ] + parameters = [ + "Code_Country", + # building type (SFH/MFH/AB) + "Code_BuildingSizeClass", + # time period of build year + "Year1_Building", + "Year2_Building", + # areas [m^2] + "A_C_Ref", # conditioned area, internal + "A_Roof_1", + "A_Roof_2", + "A_Wall_1", + "A_Wall_2", + "A_Floor_1", + "A_Floor_2", + "A_Window_1", + "A_Window_2", + # for air ventilation loses [1/h] + "n_air_use", + "n_air_infiltration", + # for losses due to thermal bridges, standard values [W/(m^2K)] + "delta_U_ThermalBridging", + # floor area related heat transfer coefficient by transmission [-] + "F_red_temp", + # refurbishment state [1: not refurbished, 2: moderate ,3: strong refurbishment] + "Number_BuildingVariant", + ] data_tabula = data_tabula[parameters] @@ -311,120 +356,158 @@ def prepare_building_topology(u_values, same_building_topology=True): # get total area of building components for element in building_elements: - elements = ["A_{}_1".format(element), - "A_{}_2".format(element)] - data_tabula = pd.concat([data_tabula.drop(elements, axis=1), - data_tabula[elements].sum(axis=1).rename("A_{}".format(element))], - axis=1) + elements = ["A_{}_1".format(element), "A_{}_2".format(element)] + data_tabula = pd.concat( + [ + data_tabula.drop(elements, axis=1), + data_tabula[elements].sum(axis=1).rename("A_{}".format(element)), + ], + axis=1, + ) # clean data - data_tabula = data_tabula.loc[pd.concat([data_tabula[col]!=0 for col in - ["A_Wall", "A_Floor", "A_Window", "A_Roof", "A_C_Ref"]], - axis=1).all(axis=1)] - data_tabula = data_tabula[data_tabula.Number_BuildingVariant.isin([1,2,3])] - data_tabula = data_tabula[data_tabula.Code_BuildingSizeClass.isin(["AB", "SFH", "MFH", "TH"])] - - + data_tabula = data_tabula.loc[ + pd.concat( + [ + data_tabula[col] != 0 + for col in ["A_Wall", "A_Floor", "A_Window", "A_Roof", "A_C_Ref"] + ], + axis=1, + ).all(axis=1) + ] + data_tabula = data_tabula[data_tabula.Number_BuildingVariant.isin([1, 2, 3])] + data_tabula = data_tabula[ + data_tabula.Code_BuildingSizeClass.isin(["AB", "SFH", "MFH", "TH"]) + ] # map tabula building periods to hotmaps building periods def map_periods(build_year1, build_year2): - periods = {(0, 1945): 'Before 1945', - (1945,1969) : '1945 - 1969', - (1970, 1979) :'1970 - 1979', - (1980, 1989) : '1980 - 1989', - (1990, 1999) :'1990 - 1999', - (2000, 2010) : '2000 - 2010', - (2010, 10000) : 'Post 2010'} + periods = { + (0, 1945): "Before 1945", + (1945, 1969): "1945 - 1969", + (1970, 1979): "1970 - 1979", + (1980, 1989): "1980 - 1989", + (1990, 1999): "1990 - 1999", + (2000, 2010): "2000 - 2010", + (2010, 10000): "Post 2010", + } minimum = 1e5 for key in periods: - diff = abs(build_year1-key[0]) + abs(build_year2-key[1]) + diff = abs(build_year1 - key[0]) + abs(build_year2 - key[1]) if diff < minimum: minimum = diff searched_period = periods[key] return searched_period - data_tabula["bage"] = data_tabula.apply(lambda x: map_periods(x.Year1_Building, x.Year2_Building), - axis=1) + data_tabula["bage"] = data_tabula.apply( + lambda x: map_periods(x.Year1_Building, x.Year2_Building), axis=1 + ) # set new index - data_tabula = data_tabula.set_index(['Code_Country', 'Code_BuildingSizeClass', - 'bage', 'Number_BuildingVariant']) + data_tabula = data_tabula.set_index( + ["Code_Country", "Code_BuildingSizeClass", "bage", "Number_BuildingVariant"] + ) # get typical building topology - area_cols = ['A_C_Ref', 'A_Floor', 'A_Roof', 'A_Wall', 'A_Window'] - typical_building = (data_tabula.groupby(level=[1,2]).mean() - .rename(index={"TH": "SFH"}).groupby(level=[0,1]).mean()) + area_cols = ["A_C_Ref", "A_Floor", "A_Roof", "A_Wall", "A_Window"] + typical_building = ( + data_tabula.groupby(level=[1, 2]) + .mean() + .rename(index={"TH": "SFH"}) + .groupby(level=[0, 1]) + .mean() + ) # drop duplicates data_tabula = data_tabula[~data_tabula.index.duplicated(keep="first")] # fill missing values - hotmaps_data_i = u_values.reset_index().set_index(["country_code", "assumed_subsector", - "bage"]).index + hotmaps_data_i = ( + u_values.reset_index() + .set_index(["country_code", "assumed_subsector", "bage"]) + .index + ) # missing countries in tabular missing_ct = data_tabula.unstack().reindex(hotmaps_data_i.unique()) # areas should stay constant for different retrofitting measures - cols_constant = ['Year1_Building', 'Year2_Building', 'A_C_Ref','A_Roof', - 'A_Wall', 'A_Floor', 'A_Window'] + cols_constant = [ + "Year1_Building", + "Year2_Building", + "A_C_Ref", + "A_Roof", + "A_Wall", + "A_Floor", + "A_Window", + ] for col in cols_constant: - missing_ct[col] = missing_ct[col].combine_first(missing_ct[col] - .groupby(level=[0,1,2]).mean()) - missing_ct = missing_ct.unstack().unstack().fillna(missing_ct.unstack() - .unstack().mean()) - data_tabula = missing_ct.stack(level=[-1,-2, -3],dropna=False) + missing_ct[col] = missing_ct[col].combine_first( + missing_ct[col].groupby(level=[0, 1, 2]).mean() + ) + missing_ct = ( + missing_ct.unstack().unstack().fillna(missing_ct.unstack().unstack().mean()) + ) + data_tabula = missing_ct.stack(level=[-1, -2, -3], dropna=False) # sets for different countries same building topology which only depends on # build year and subsector (MFH, SFH, AB) if same_building_topology: - typical_building = ((typical_building.reindex(data_tabula.droplevel(0).index)) - .set_index(data_tabula.index)) + typical_building = ( + typical_building.reindex(data_tabula.droplevel(0).index) + ).set_index(data_tabula.index) data_tabula.update(typical_building[area_cols]) # total buildings envelope surface [m^2] - data_tabula["A_envelope"] = data_tabula[["A_{}".format(element) for - element in building_elements]].sum(axis=1) + data_tabula["A_envelope"] = data_tabula[ + ["A_{}".format(element) for element in building_elements] + ].sum(axis=1) return data_tabula def prepare_cost_retro(country_iso_dic): """ - read and prepare retro costs, annualises them if annualise_cost=True + Read and prepare retro costs, annualises them if annualise_cost=True. """ - cost_retro = pd.read_csv(snakemake.input.cost_germany, - nrows=4, index_col=0, usecols=[0, 1, 2, 3]) + cost_retro = pd.read_csv( + snakemake.input.cost_germany, nrows=4, index_col=0, usecols=[0, 1, 2, 3] + ) cost_retro.rename(lambda x: x.capitalize(), inplace=True) - window_assumptions = pd.read_csv(snakemake.input.window_assumptions, - skiprows=[1], usecols=[0,1,2,3], nrows=2) + window_assumptions = pd.read_csv( + snakemake.input.window_assumptions, skiprows=[1], usecols=[0, 1, 2, 3], nrows=2 + ) if annualise_cost: - cost_retro[["cost_fix", "cost_var"]] = (cost_retro[["cost_fix", "cost_var"]] - .apply(lambda x: x * interest_rate / - (1 - (1 + interest_rate) - ** -cost_retro.loc[x.index, - "life_time"]))) + cost_retro[["cost_fix", "cost_var"]] = cost_retro[ + ["cost_fix", "cost_var"] + ].apply( + lambda x: x + * interest_rate + / (1 - (1 + interest_rate) ** -cost_retro.loc[x.index, "life_time"]) + ) # weightings of costs --------------------------------------------- if construction_index: - cost_w = pd.read_csv(snakemake.input.construction_index, - skiprows=3, nrows=32, index_col=0) + cost_w = pd.read_csv( + snakemake.input.construction_index, skiprows=3, nrows=32, index_col=0 + ) # since German retrofitting costs are assumed - cost_w = ((cost_w["2018"] / cost_w.loc["Germany", "2018"]) - .rename(index=country_iso_dic)) + cost_w = (cost_w["2018"] / cost_w.loc["Germany", "2018"]).rename( + index=country_iso_dic + ) else: cost_w = None if tax_weighting: - tax_w = pd.read_csv(snakemake.input.tax_w, - header=12, nrows=39, index_col=0, usecols=[0, 4]) + tax_w = pd.read_csv( + snakemake.input.tax_w, header=12, nrows=39, index_col=0, usecols=[0, 4] + ) tax_w.rename(index=country_iso_dic, inplace=True) - tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[:, 0] + tax_w = tax_w.apply(pd.to_numeric, errors="coerce").iloc[:, 0] tax_w.dropna(inplace=True) else: tax_w = None - return cost_retro, window_assumptions, cost_w, tax_w @@ -432,81 +515,115 @@ def prepare_temperature_data(): """ returns the temperature dependent data for each country: - d_heat : length of heating season pd.Series(index=countries) [days/year] - on those days, daily average temperature is below - threshold temperature t_threshold - temperature_factor : accumulated difference between internal and - external temperature pd.Series(index=countries) ([K]) * [days/year] - - temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365 + d_heat : length of heating season pd.Series(index=countries) [days/year] + on those days, daily average temperature is below + threshold temperature t_threshold + temperature_factor : accumulated difference between internal and + external temperature pd.Series(index=countries) ([K]) * [days/year] + temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365 """ temperature = xr.open_dataarray(snakemake.input.air_temperature).to_pandas() - d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1).mean() - .resample("1D").mean()window_limit(float(l), window_assumptions) else 0), - axis=1) + return u_values.apply( + lambda x: ( + cost_retro.loc[x.name[3], "cost_var"] + * 100 + * float(l) + * l_weight.loc[x.name[3]][0] + + cost_retro.loc[x.name[3], "cost_fix"] + ) + * x.A_element + / x.A_C_Ref + if x.name[3] != "Window" + else ( + window_cost(x["new_U_{}".format(l)], cost_retro, window_assumptions) + * x.A_element + / x.A_C_Ref + if x.value > window_limit(float(l), window_assumptions) + else 0 + ), + axis=1, + ) def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): """ calculate U-values after building retrofitting, depending on the old - U-values (u_values). This is for simple insulation measuers, adding - an additional layer of insulation. + U-values (u_values). This is for simple insulation measuers, adding an + additional layer of insulation. They depend for the components Roof, Wall, Floor on the additional insulation thickness (l), and the weighting for the corresponding @@ -522,20 +639,22 @@ def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): l: string l_weight: pd.DataFrame (component, weight) k: thermal conductivity - """ - return u_values.apply(lambda x: - k / ((k / x.value) + - (float(l) * l_weight.loc[x.name[3]])) - if x.name[3]!="Window" - else (min(x.value, u_retro_window(float(l), window_assumptions)) - if x.value>window_limit(float(l), window_assumptions) else x.value), - axis=1) + return u_values.apply( + lambda x: k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) + if x.name[3] != "Window" + else ( + min(x.value, u_retro_window(float(l), window_assumptions)) + if x.value > window_limit(float(l), window_assumptions) + else x.value + ), + axis=1, + ) def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix): """ - maps tabula data to hotmaps data with wished column name prefix + Maps tabula data to hotmaps data with wished column name prefix. Parameters ---------- @@ -550,12 +669,12 @@ def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix): ------- pd.DataFrame (index=df_hotmaps.index) returns df_tabula with hotmaps index - """ - values = (df_tabula.unstack() - .reindex(df_hotmaps.rename(index = - lambda x: "MFH" if x not in rename_sectors.values() - else x, level=1).index)) + values = df_tabula.unstack().reindex( + df_hotmaps.rename( + index=lambda x: "MFH" if x not in rename_sectors.values() else x, level=1 + ).index + ) values.columns = pd.MultiIndex.from_product([[column_prefix], values.columns]) values.index = df_hotmaps.index return values @@ -563,12 +682,18 @@ def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix): def get_solar_gains_per_year(window_area): """ - returns solar heat gains during heating season in [kWh/a] depending on - the window area [m^2] of the building, assuming a equal distributed window + returns solar heat gains during heating season in [kWh/a] depending on the + window area [m^2] of the building, assuming a equal distributed window orientation (east, south, north, west) """ - return sum(external_shading * frame_area_fraction * non_perpendicular - * 0.25 * window_area * solar_global_radiation) + return sum( + external_shading + * frame_area_fraction + * non_perpendicular + * 0.25 + * window_area + * solar_global_radiation + ) def map_to_lstrength(l_strength, df): @@ -577,18 +702,26 @@ def map_to_lstrength(l_strength, df): strengths [2 = moderate, 3 = ambitious] to l_strength """ middle = len(l_strength) // 2 - map_to_l = pd.MultiIndex.from_arrays([middle*[2] + len(l_strength[middle:])*[3],l_strength]) - l_strength_df = (df.stack(-2).reindex(map_to_l, axis=1, level=0) - .droplevel(0, axis=1).unstack().swaplevel(axis=1).dropna(axis=1)) - return pd.concat([df.drop([2,3], axis=1, level=1), l_strength_df], axis=1) + map_to_l = pd.MultiIndex.from_arrays( + [middle * [2] + len(l_strength[middle:]) * [3], l_strength] + ) + l_strength_df = ( + df.stack(-2) + .reindex(map_to_l, axis=1, level=0) + .droplevel(0, axis=1) + .unstack() + .swaplevel(axis=1) + .dropna(axis=1) + ) + return pd.concat([df.drop([2, 3], axis=1, level=1), l_strength_df], axis=1) def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor): """ - calculates total annual heat losses Q_ht for different insulation thicknesses - (l_strength), depending on current insulation state (u_values), standard - building topologies and air ventilation from TABULA (data_tabula) and - the accumulated difference between internal and external temperature + calculates total annual heat losses Q_ht for different insulation + thicknesses (l_strength), depending on current insulation state (u_values), + standard building topologies and air ventilation from TABULA (data_tabula) + and the accumulated difference between internal and external temperature during the heating season (temperature_factor). Total annual heat losses Q_ht constitute from losses by: @@ -602,76 +735,105 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'], columns=[current (1.) + retrofitted (l_strength)]) - """ # (1) by transmission # calculate new U values of building elements due to additional insulation for l in l_strength: - u_values["new_U_{}".format(l)] = calculate_new_u(u_values, - l, l_weight, window_assumptions) + u_values["new_U_{}".format(l)] = calculate_new_u( + u_values, l, l_weight, window_assumptions + ) # surface area of building components [m^2] - area_element = (data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]] - .rename(columns=lambda x: x[2:]).stack().unstack(-2).stack()) - u_values["A_element"] = map_tabula_to_hotmaps(area_element, - u_values, "A_element").xs(1, level=1, axis=1) + area_element = ( + data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]] + .rename(columns=lambda x: x[2:]) + .stack() + .unstack(-2) + .stack() + ) + u_values["A_element"] = map_tabula_to_hotmaps( + area_element, u_values, "A_element" + ).xs(1, level=1, axis=1) # heat transfer H_tr_e [W/m^2K] through building element # U_e * A_e / A_C_Ref columns = ["value"] + ["new_U_{}".format(l) for l in l_strength] - heat_transfer = pd.concat([u_values[columns].mul(u_values.A_element, axis=0), - u_values.A_element], axis=1) + heat_transfer = pd.concat( + [u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1 + ) # get real subsector back in index heat_transfer.index = u_values.index - heat_transfer = heat_transfer.groupby(level=[0,1,2]).sum() + heat_transfer = heat_transfer.groupby(level=[0, 1, 2]).sum() # rename columns of heat transfer H_tr_e [W/K] and envelope surface A_envelope [m^2] - heat_transfer.rename(columns={"A_element":"A_envelope", - },inplace=True) + heat_transfer.rename( + columns={ + "A_element": "A_envelope", + }, + inplace=True, + ) # map reference area - heat_transfer["A_C_Ref"] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, - heat_transfer, - "A_C_Ref").xs(1.,level=1,axis=1) - u_values["A_C_Ref"] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, - u_values, - "A_C_Ref").xs(1.,level=1,axis=1) + heat_transfer["A_C_Ref"] = map_tabula_to_hotmaps( + data_tabula.A_C_Ref, heat_transfer, "A_C_Ref" + ).xs(1.0, level=1, axis=1) + u_values["A_C_Ref"] = map_tabula_to_hotmaps( + data_tabula.A_C_Ref, u_values, "A_C_Ref" + ).xs(1.0, level=1, axis=1) # get heat transfer by transmission through building element [W/(m^2K)] heat_transfer_perm2 = heat_transfer[columns].div(heat_transfer.A_C_Ref, axis=0) - heat_transfer_perm2.columns = pd.MultiIndex.from_product([["H_tr_e"], [1.] + l_strength]) + heat_transfer_perm2.columns = pd.MultiIndex.from_product( + [["H_tr_e"], [1.0] + l_strength] + ) # (2) heat transfer by thermal bridges H_tb [W/(m^2K)] # H_tb = delta_U [W/(m^2K)]* A_envelope [m^2] / A_C_Ref [m^2] - H_tb_tabula = data_tabula.delta_U_ThermalBridging * data_tabula.A_envelope / data_tabula.A_C_Ref - heat_transfer_perm2 = pd.concat([heat_transfer_perm2, - map_tabula_to_hotmaps(H_tb_tabula, heat_transfer_perm2, "H_tb")], axis=1) - + H_tb_tabula = ( + data_tabula.delta_U_ThermalBridging + * data_tabula.A_envelope + / data_tabula.A_C_Ref + ) + heat_transfer_perm2 = pd.concat( + [ + heat_transfer_perm2, + map_tabula_to_hotmaps(H_tb_tabula, heat_transfer_perm2, "H_tb"), + ], + axis=1, + ) # (3) by ventilation H_ve [W/(m²K)] # = c_p_air [Wh/(m^3K)] * (n_air_use + n_air_infilitraion) [1/h] * h_room [m] - H_ve_tabula = (data_tabula.n_air_infiltration + data_tabula.n_air_use) * c_p_air * h_room - heat_transfer_perm2 = pd.concat([heat_transfer_perm2, - map_tabula_to_hotmaps(H_ve_tabula, heat_transfer_perm2, "H_ve")], - axis=1) - + H_ve_tabula = ( + (data_tabula.n_air_infiltration + data_tabula.n_air_use) * c_p_air * h_room + ) + heat_transfer_perm2 = pd.concat( + [ + heat_transfer_perm2, + map_tabula_to_hotmaps(H_ve_tabula, heat_transfer_perm2, "H_ve"), + ], + axis=1, + ) # F_red_temp factor which is taken account for non-uniform heating e.g. # lower heating/switch point during night times/weekends # effect is significant for buildings with poor insulation # for well insulated buildings/passive houses it has nearly no effect # based on tabula values depending on the building type - F_red_temp = map_tabula_to_hotmaps(data_tabula.F_red_temp, - heat_transfer_perm2, - "F_red_temp") + F_red_temp = map_tabula_to_hotmaps( + data_tabula.F_red_temp, heat_transfer_perm2, "F_red_temp" + ) # total heat transfer Q_ht [W/m^2] = # (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K] # temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365 heat_transfer_perm2 = map_to_lstrength(l_strength, heat_transfer_perm2) F_red_temp = map_to_lstrength(l_strength, F_red_temp) - Q_ht = (heat_transfer_perm2.groupby(level=1,axis=1).sum() - .mul(F_red_temp.droplevel(0, axis=1)) - .mul(temperature_factor.reindex(heat_transfer_perm2.index,level=0), axis=0)) + Q_ht = ( + heat_transfer_perm2.groupby(level=1, axis=1) + .sum() + .mul(F_red_temp.droplevel(0, axis=1)) + .mul(temperature_factor.reindex(heat_transfer_perm2.index, level=0), axis=0) + ) return Q_ht, heat_transfer_perm2 @@ -679,48 +841,58 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat): """ calculates heat gains Q_gain [W/m^2], which consititure from gains by: - (1) solar radiation - (2) internal heat gains + (1) solar radiation (2) internal heat gains """ - # (1) by solar radiation H_solar [W/m^2] + # (1) by solar radiation H_solar [W/m^2] # solar radiation [kWhm^2/a] / A_C_Ref [m^2] *1e3[1/k] / 8760 [a/h] - H_solar = (data_tabula.A_Window.apply(lambda x: get_solar_gains_per_year(x)) - / data_tabula.A_C_Ref * 1e3 / 8760) + H_solar = ( + data_tabula.A_Window.apply(lambda x: get_solar_gains_per_year(x)) + / data_tabula.A_C_Ref + * 1e3 + / 8760 + ) - Q_gain = map_tabula_to_hotmaps(H_solar, heat_transfer_perm2, "H_solar").xs(1.,level=1, axis=1) + Q_gain = map_tabula_to_hotmaps(H_solar, heat_transfer_perm2, "H_solar").xs( + 1.0, level=1, axis=1 + ) # (2) by internal H_int # phi [W/m^2] * d_heat [d/a] * 1/365 [a/d] -> W/m^2 - Q_gain["H_int"] = (phi_int * d_heat * 1/365).reindex(index=heat_transfer_perm2.index, level=0) + Q_gain["H_int"] = (phi_int * d_heat * 1 / 365).reindex( + index=heat_transfer_perm2.index, level=0 + ) return Q_gain + def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain): """ - calculates gain utilisation factor nu + Calculates gain utilisation factor nu. """ # time constant of the building tau [h] = c_m [Wh/(m^2K)] * 1 /(H_tr_e+H_tb*H_ve) [m^2 K /W] - tau = c_m / heat_transfer_perm2.groupby(level=1,axis=1).sum() - alpha = alpha_H_0 + (tau/tau_H_0) + tau = c_m / heat_transfer_perm2.groupby(level=1, axis=1).sum() + alpha = alpha_H_0 + (tau / tau_H_0) # heat balance ratio gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0) # gain utilisation factor - nu = (1 - gamma**alpha) / (1 - gamma**(alpha+1)) + nu = (1 - gamma**alpha) / (1 - gamma ** (alpha + 1)) return nu -def calculate_space_heat_savings(u_values, data_tabula, l_strength, - temperature_factor, d_heat): +def calculate_space_heat_savings( + u_values, data_tabula, l_strength, temperature_factor, d_heat +): """ calculates space heat savings (dE_space [per unit of unrefurbished state]) through retrofitting of the thermal envelope by additional insulation material (l_strength[m]) """ # heat losses Q_ht [W/m^2] - Q_ht, heat_transfer_perm2 = calculate_heat_losses(u_values, data_tabula, - l_strength, temperature_factor) + Q_ht, heat_transfer_perm2 = calculate_heat_losses( + u_values, data_tabula, l_strength, temperature_factor + ) # heat gains Q_gain [W/m^2] Q_gain = calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat) @@ -729,7 +901,7 @@ def calculate_space_heat_savings(u_values, data_tabula, l_strength, # total space heating demand E_space E_space = Q_ht - nu.mul(Q_gain.sum(axis=1), axis=0) - dE_space = E_space.div(E_space[1.], axis=0).iloc[:, 1:] + dE_space = E_space.div(E_space[1.0], axis=0).iloc[:, 1:] dE_space.columns = pd.MultiIndex.from_product([["dE"], l_strength]) return dE_space @@ -737,45 +909,65 @@ def calculate_space_heat_savings(u_values, data_tabula, l_strength, def calculate_retro_costs(u_values, l_strength, cost_retro): """ - returns costs of different retrofitting measures + Returns costs of different retrofitting measures. """ - costs = pd.concat([calculate_costs(u_values, l, cost_retro, window_assumptions).rename(l) - for l in l_strength], axis=1) + costs = pd.concat( + [ + calculate_costs(u_values, l, cost_retro, window_assumptions).rename(l) + for l in l_strength + ], + axis=1, + ) # energy and costs per country, sector, subsector and year - cost_tot = costs.groupby(level=['country_code', 'subsector', 'bage']).sum() + cost_tot = costs.groupby(level=["country_code", "subsector", "bage"]).sum() cost_tot.columns = pd.MultiIndex.from_product([["cost"], cost_tot.columns]) return cost_tot -def sample_dE_costs_area(area, area_tot, costs, dE_space, countries, - construction_index, tax_weighting): +def sample_dE_costs_area( + area, area_tot, costs, dE_space, countries, construction_index, tax_weighting +): """ - bring costs and energy savings together, fill area and costs per energy - savings for missing countries, weight costs, - determine "moderate" and "ambitious" retrofitting + Bring costs and energy savings together, fill area and costs per energy + savings for missing countries, weight costs, determine "moderate" and + "ambitious" retrofitting. """ - sub_to_sector_dict = (area.reset_index().replace(rename_sectors) - .set_index("subsector")["sector"].to_dict()) + sub_to_sector_dict = ( + area.reset_index() + .replace(rename_sectors) + .set_index("subsector")["sector"] + .to_dict() + ) - area_reordered = ((area.rename(index=country_iso_dic, level=0) - .rename(index=rename_sectors, level=2) - .reset_index()).rename(columns={"country":"country_code"}) - .set_index(["country_code", "subsector", "bage"])) + area_reordered = ( + ( + area.rename(index=country_iso_dic, level=0) + .rename(index=rename_sectors, level=2) + .reset_index() + ) + .rename(columns={"country": "country_code"}) + .set_index(["country_code", "subsector", "bage"]) + ) - cost_dE =(pd.concat([costs, dE_space], axis=1) - .mul(area_reordered.weight, axis=0) - .rename(sub_to_sector_dict,level=1).groupby(level=[0,1]).sum()) + cost_dE = ( + pd.concat([costs, dE_space], axis=1) + .mul(area_reordered.weight, axis=0) + .rename(sub_to_sector_dict, level=1) + .groupby(level=[0, 1]) + .sum() + ) # map missing countries for ct in countries.difference(cost_dE.index.levels[0]): - averaged_data = (cost_dE.reindex(index=map_for_missings[ct], level=0).mean(level=1) - .set_index(pd.MultiIndex - .from_product([[ct], cost_dE.index.levels[1]]))) + averaged_data = ( + cost_dE.reindex(index=map_for_missings[ct], level=0) + .mean(level=1) + .set_index(pd.MultiIndex.from_product([[ct], cost_dE.index.levels[1]])) + ) cost_dE = cost_dE.append(averaged_data) - # weights costs after construction index if construction_index: for ct in list(map_for_missings.keys() - cost_w.index): @@ -789,59 +981,74 @@ def sample_dE_costs_area(area, area_tot, costs, dE_space, countries, cost_dE.cost = cost_dE.cost.mul(tax_w, level=0, axis=0) # drop not considered countries - cost_dE = cost_dE.reindex(countries,level=0) + cost_dE = cost_dE.reindex(countries, level=0) # get share of residential and service floor area sec_w = area_tot.value / area_tot.value.groupby(level=0).sum() # get the total cost-energy-savings weight by sector area - tot = (cost_dE.mul(sec_w, axis=0).groupby(level="country_code").sum() - .set_index(pd.MultiIndex - .from_product([cost_dE.index.unique(level="country_code"), ["tot"]]))) + tot = ( + cost_dE.mul(sec_w, axis=0) + .groupby(level="country_code") + .sum() + .set_index( + pd.MultiIndex.from_product( + [cost_dE.index.unique(level="country_code"), ["tot"]] + ) + ) + ) cost_dE = cost_dE.append(tot).unstack().stack() - summed_area = (pd.DataFrame(area_tot.groupby("country").sum()) - .set_index(pd.MultiIndex.from_product( - [area_tot.index.unique(level="country"), ["tot"]]))) + summed_area = pd.DataFrame(area_tot.groupby("country").sum()).set_index( + pd.MultiIndex.from_product([area_tot.index.unique(level="country"), ["tot"]]) + ) area_tot = area_tot.append(summed_area).unstack().stack() - - - cost_per_saving = (cost_dE["cost"] / (1-cost_dE["dE"])) #.diff(axis=1).dropna(axis=1) - + cost_per_saving = cost_dE["cost"] / ( + 1 - cost_dE["dE"] + ) # .diff(axis=1).dropna(axis=1) moderate_min = cost_per_saving.idxmin(axis=1) - moderate_dE_cost = pd.concat([cost_dE.loc[i].xs(moderate_min.loc[i], level=1) - for i in moderate_min.index], axis=1).T - moderate_dE_cost.columns = pd.MultiIndex.from_product([moderate_dE_cost.columns, - ["moderate"]]) + moderate_dE_cost = pd.concat( + [cost_dE.loc[i].xs(moderate_min.loc[i], level=1) for i in moderate_min.index], + axis=1, + ).T + moderate_dE_cost.columns = pd.MultiIndex.from_product( + [moderate_dE_cost.columns, ["moderate"]] + ) - ambitious_dE_cost = cost_dE.xs("0.26", level=1,axis=1) - ambitious_dE_cost.columns = pd.MultiIndex.from_product([ambitious_dE_cost.columns, - ["ambitious"]]) + ambitious_dE_cost = cost_dE.xs("0.26", level=1, axis=1) + ambitious_dE_cost.columns = pd.MultiIndex.from_product( + [ambitious_dE_cost.columns, ["ambitious"]] + ) cost_dE_new = pd.concat([moderate_dE_cost, ambitious_dE_cost], axis=1) return cost_dE_new, area_tot -#%% --- MAIN -------------------------------------------------------------- +# %% --- MAIN -------------------------------------------------------------- if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_retro_cost', - simpl='', + "build_retro_cost", + simpl="", clusters=48, lv=1.0, - sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1' + sector_opts="Co2L0-168H-T-H-B-I-solar3-dist1", ) -# ******** config ********************************************************* + # ******** config ********************************************************* - retro_opts = snakemake.config["sector"]["retrofitting"] + retro_opts = snakemake.config["sector"]["retrofitting"] interest_rate = retro_opts["interest_rate"] annualise_cost = retro_opts["annualise_cost"] # annualise the investment costs - tax_weighting = retro_opts["tax_weighting"] # weight costs depending on taxes in countries - construction_index = retro_opts["construction_index"] # weight costs depending on labour/material costs per ct + tax_weighting = retro_opts[ + "tax_weighting" + ] # weight costs depending on taxes in countries + construction_index = retro_opts[ + "construction_index" + ] # weight costs depending on labour/material costs per ct # mapping missing countries by neighbours map_for_missings = { @@ -852,9 +1059,9 @@ if __name__ == "__main__": "ME": ["BA", "AL", "RS", "HR"], "CH": ["SE", "DE"], "NO": ["SE"], - } + } -# (1) prepare data ********************************************************** + # (1) prepare data ********************************************************** # building stock data ----------------------------------------------------- # hotmaps u_values, heated floor areas per sector @@ -866,19 +1073,19 @@ if __name__ == "__main__": # temperature dependent parameters d_heat, temperature_factor = prepare_temperature_data() + # (2) space heat savings **************************************************** + dE_space = calculate_space_heat_savings( + u_values, data_tabula, l_strength, temperature_factor, d_heat + ) -# (2) space heat savings **************************************************** - dE_space = calculate_space_heat_savings(u_values, data_tabula, l_strength, - temperature_factor, d_heat) - -# (3) costs ***************************************************************** + # (3) costs ***************************************************************** costs = calculate_retro_costs(u_values, l_strength, cost_retro) -# (4) cost-dE and area per sector ******************************************* - cost_dE, area_tot = sample_dE_costs_area(area, area_tot, costs, dE_space, countries, - construction_index, tax_weighting) + # (4) cost-dE and area per sector ******************************************* + cost_dE, area_tot = sample_dE_costs_area( + area, area_tot, costs, dE_space, countries, construction_index, tax_weighting + ) -# save ********************************************************************* + # save ********************************************************************* cost_dE.to_csv(snakemake.output.retro_cost) area_tot.to_csv(snakemake.output.floor_area) - diff --git a/scripts/build_salt_cavern_potentials.py b/scripts/build_salt_cavern_potentials.py index 4b45a65d..e1703ef3 100644 --- a/scripts/build_salt_cavern_potentials.py +++ b/scripts/build_salt_cavern_potentials.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ Build salt cavern potentials for hydrogen storage. @@ -22,29 +23,35 @@ import geopandas as gpd import pandas as pd -def concat_gdf(gdf_list, crs='EPSG:4326'): - """Concatenate multiple geopandas dataframes with common coordinate reference system (crs).""" +def concat_gdf(gdf_list, crs="EPSG:4326"): + """ + Concatenate multiple geopandas dataframes with common coordinate reference + system (crs). + """ return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs) def load_bus_regions(onshore_path, offshore_path): - """Load pypsa-eur on- and offshore regions and concat.""" + """ + Load pypsa-eur on- and offshore regions and concat. + """ bus_regions_offshore = gpd.read_file(offshore_path) bus_regions_onshore = gpd.read_file(onshore_path) bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore]) - bus_regions = bus_regions.dissolve(by='name', aggfunc='sum') + bus_regions = bus_regions.dissolve(by="name", aggfunc="sum") return bus_regions def area(gdf): - """Returns area of GeoDataFrame geometries in square kilometers.""" + """ + Returns area of GeoDataFrame geometries in square kilometers. + """ return gdf.to_crs(epsg=3035).area.div(1e6) def salt_cavern_potential_by_region(caverns, regions): - # calculate area of caverns shapes caverns["area_caverns"] = area(caverns) @@ -53,18 +60,24 @@ def salt_cavern_potential_by_region(caverns, regions): # calculate share of cavern area inside region overlay["share"] = area(overlay) / overlay["area_caverns"] - overlay["e_nom"] = overlay.eval("capacity_per_area * share * area_caverns / 1000") # TWh + overlay["e_nom"] = overlay.eval( + "capacity_per_area * share * area_caverns / 1000" + ) # TWh + + caverns_regions = ( + overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type") + ) - caverns_regions = overlay.groupby(['name', "storage_type"]).e_nom.sum().unstack("storage_type") - return caverns_regions -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('build_salt_cavern_potentials', simpl='', clusters='37') + snakemake = mock_snakemake( + "build_salt_cavern_potentials", simpl="", clusters="37" + ) fn_onshore = snakemake.input.regions_onshore fn_offshore = snakemake.input.regions_offshore @@ -75,4 +88,4 @@ if __name__ == '__main__': caverns_regions = salt_cavern_potential_by_region(caverns, regions) - caverns_regions.to_csv(snakemake.output.h2_cavern_potential) \ No newline at end of file + caverns_regions.to_csv(snakemake.output.h2_cavern_potential) diff --git a/scripts/build_sequestration_potentials.py b/scripts/build_sequestration_potentials.py index 4983640b..f3eec9ea 100644 --- a/scripts/build_sequestration_potentials.py +++ b/scripts/build_sequestration_potentials.py @@ -1,12 +1,18 @@ -import pandas as pd +# -*- coding: utf-8 -*- import geopandas as gpd +import pandas as pd + def area(gdf): - """Returns area of GeoDataFrame geometries in square kilometers.""" + """ + Returns area of GeoDataFrame geometries in square kilometers. + """ return gdf.to_crs(epsg=3035).area.div(1e6) -def allocate_sequestration_potential(gdf, regions, attr='conservative estimate Mt', threshold=3): +def allocate_sequestration_potential( + gdf, regions, attr="conservative estimate Mt", threshold=3 +): gdf = gdf.loc[gdf[attr] > threshold, [attr, "geometry"]] gdf["area_sqkm"] = area(gdf) overlay = gpd.overlay(regions, gdf, keep_geom_type=True) @@ -19,12 +25,11 @@ def allocate_sequestration_potential(gdf, regions, attr='conservative estimate M if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_sequestration_potentials', - simpl='', - clusters="181" + "build_sequestration_potentials", simpl="", clusters="181" ) cf = snakemake.config["sector"]["regional_co2_sequestration_potential"] @@ -34,10 +39,12 @@ if __name__ == "__main__": regions = gpd.read_file(snakemake.input.regions_offshore) if cf["include_onshore"]: onregions = gpd.read_file(snakemake.input.regions_onshore) - regions = pd.concat([regions, onregions]).dissolve(by='name').reset_index() + regions = pd.concat([regions, onregions]).dissolve(by="name").reset_index() - s = allocate_sequestration_potential(gdf, regions, attr=cf["attribute"], threshold=cf["min_size"]) + s = allocate_sequestration_potential( + gdf, regions, attr=cf["attribute"], threshold=cf["min_size"] + ) - s = s.where(s>cf["min_size"]).dropna() + s = s.where(s > cf["min_size"]).dropna() s.to_csv(snakemake.output.sequestration_potential) diff --git a/scripts/build_shipping_demand.py b/scripts/build_shipping_demand.py index 18335c9f..0b28c6e2 100644 --- a/scripts/build_shipping_demand.py +++ b/scripts/build_shipping_demand.py @@ -1,45 +1,55 @@ -"""Build regional demand for international navigation based on outflow volume of ports.""" +# -*- coding: utf-8 -*- +""" +Build regional demand for international navigation based on outflow volume of +ports. +""" -import pandas as pd -import geopandas as gpd import json -if __name__ == '__main__': - if 'snakemake' not in globals(): +import geopandas as gpd +import pandas as pd + +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_shipping_demand_per_node', - simpl='', + "build_shipping_demand_per_node", + simpl="", clusters=48, ) scope = gpd.read_file(snakemake.input.scope).geometry[0] - regions = gpd.read_file(snakemake.input.regions).set_index('name') - demand = pd.read_csv(snakemake.input.demand, index_col=0)["total international navigation"] + regions = gpd.read_file(snakemake.input.regions).set_index("name") + demand = pd.read_csv(snakemake.input.demand, index_col=0)[ + "total international navigation" + ] # read port data into GeoDataFrame - with open(snakemake.input.ports, 'r', encoding='latin_1') as f: - ports = json.load(f) + with open(snakemake.input.ports, "r", encoding="latin_1") as f: + ports = json.load(f) ports = pd.json_normalize(ports, "features", sep="_") coordinates = ports.geometry_coordinates geometry = gpd.points_from_xy(coordinates.str[0], coordinates.str[1]) ports = gpd.GeoDataFrame(ports, geometry=geometry, crs=4326) - + # filter global port data by European ports european_ports = ports[ports.within(scope)] - + # assign ports to nearest region p = european_ports.to_crs(3857) r = regions.to_crs(3857) - outflows = p.sjoin_nearest(r).groupby("index_right").properties_outflows.sum().div(1e3) - + outflows = ( + p.sjoin_nearest(r).groupby("index_right").properties_outflows.sum().div(1e3) + ) + # calculate fraction of each country's port outflows countries = outflows.index.str[:2] outflows_per_country = outflows.groupby(countries).sum() fraction = outflows / countries.map(outflows_per_country) - + # distribute per-country demands to nodes based on these fractions - nodal_demand = demand.loc[countries].fillna(0.) + nodal_demand = demand.loc[countries].fillna(0.0) nodal_demand.index = fraction.index nodal_demand = nodal_demand.multiply(fraction, axis=0) nodal_demand = nodal_demand.reindex(regions.index, fill_value=0) diff --git a/scripts/build_solar_thermal_profiles.py b/scripts/build_solar_thermal_profiles.py index 0fb8b6e6..a819c94b 100644 --- a/scripts/build_solar_thermal_profiles.py +++ b/scripts/build_solar_thermal_profiles.py @@ -1,18 +1,22 @@ -"""Build solar thermal collector time series.""" +# -*- coding: utf-8 -*- +""" +Build solar thermal collector time series. +""" -import geopandas as gpd import atlite +import geopandas as gpd +import numpy as np import pandas as pd import xarray as xr -import numpy as np from dask.distributed import Client, LocalCluster -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_solar_thermal_profiles', - simpl='', + "build_solar_thermal_profiles", + simpl="", clusters=48, ) @@ -20,29 +24,36 @@ if __name__ == '__main__': cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) client = Client(cluster, asynchronous=True) - config = snakemake.config['solar_thermal'] + config = snakemake.config["solar_thermal"] - time = pd.date_range(freq='h', **snakemake.config['snapshots']) - cutout_config = snakemake.config['atlite']['cutout'] + time = pd.date_range(freq="h", **snakemake.config["snapshots"]) + cutout_config = snakemake.config["atlite"]["cutout"] cutout = atlite.Cutout(cutout_config).sel(time=time) - clustered_regions = gpd.read_file( - snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() + clustered_regions = ( + gpd.read_file(snakemake.input.regions_onshore) + .set_index("name") + .buffer(0) + .squeeze() + ) I = cutout.indicatormatrix(clustered_regions) pop_layout = xr.open_dataarray(snakemake.input.pop_layout) - stacked_pop = pop_layout.stack(spatial=('y', 'x')) + stacked_pop = pop_layout.stack(spatial=("y", "x")) M = I.T.dot(np.diag(I.dot(stacked_pop))) nonzero_sum = M.sum(axis=0, keepdims=True) - nonzero_sum[nonzero_sum == 0.] = 1. + nonzero_sum[nonzero_sum == 0.0] = 1.0 M_tilde = M / nonzero_sum - solar_thermal = cutout.solar_thermal(**config, matrix=M_tilde.T, - index=clustered_regions.index, - dask_kwargs=dict(scheduler=client), - show_progress=False) + solar_thermal = cutout.solar_thermal( + **config, + matrix=M_tilde.T, + index=clustered_regions.index, + dask_kwargs=dict(scheduler=client), + show_progress=False + ) solar_thermal.to_netcdf(snakemake.output.solar_thermal) diff --git a/scripts/build_temperature_profiles.py b/scripts/build_temperature_profiles.py index a056fca0..c709d692 100644 --- a/scripts/build_temperature_profiles.py +++ b/scripts/build_temperature_profiles.py @@ -1,18 +1,22 @@ -"""Build temperature profiles.""" +# -*- coding: utf-8 -*- +""" +Build temperature profiles. +""" -import geopandas as gpd import atlite +import geopandas as gpd +import numpy as np import pandas as pd import xarray as xr -import numpy as np from dask.distributed import Client, LocalCluster -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'build_temperature_profiles', - simpl='', + "build_temperature_profiles", + simpl="", clusters=48, ) @@ -20,34 +24,42 @@ if __name__ == '__main__': cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) client = Client(cluster, asynchronous=True) - time = pd.date_range(freq='h', **snakemake.config['snapshots']) - cutout_config = snakemake.config['atlite']['cutout'] + time = pd.date_range(freq="h", **snakemake.config["snapshots"]) + cutout_config = snakemake.config["atlite"]["cutout"] cutout = atlite.Cutout(cutout_config).sel(time=time) - clustered_regions = gpd.read_file( - snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() + clustered_regions = ( + gpd.read_file(snakemake.input.regions_onshore) + .set_index("name") + .buffer(0) + .squeeze() + ) I = cutout.indicatormatrix(clustered_regions) pop_layout = xr.open_dataarray(snakemake.input.pop_layout) - stacked_pop = pop_layout.stack(spatial=('y', 'x')) + stacked_pop = pop_layout.stack(spatial=("y", "x")) M = I.T.dot(np.diag(I.dot(stacked_pop))) nonzero_sum = M.sum(axis=0, keepdims=True) - nonzero_sum[nonzero_sum == 0.] = 1. + nonzero_sum[nonzero_sum == 0.0] = 1.0 M_tilde = M / nonzero_sum temp_air = cutout.temperature( - matrix=M_tilde.T, index=clustered_regions.index, + matrix=M_tilde.T, + index=clustered_regions.index, dask_kwargs=dict(scheduler=client), - show_progress=False) + show_progress=False, + ) temp_air.to_netcdf(snakemake.output.temp_air) temp_soil = cutout.soil_temperature( - matrix=M_tilde.T, index=clustered_regions.index, + matrix=M_tilde.T, + index=clustered_regions.index, dask_kwargs=dict(scheduler=client), - show_progress=False) + show_progress=False, + ) temp_soil.to_netcdf(snakemake.output.temp_soil) diff --git a/scripts/build_transport_demand.py b/scripts/build_transport_demand.py index 0045b882..9753cb10 100644 --- a/scripts/build_transport_demand.py +++ b/scripts/build_transport_demand.py @@ -1,13 +1,15 @@ -"""Build transport demand.""" +# -*- coding: utf-8 -*- +""" +Build transport demand. +""" -import pandas as pd import numpy as np +import pandas as pd import xarray as xr from helper import generate_periodic_profiles def build_nodal_transport_data(fn, pop_layout): - transport_data = pd.read_csv(fn, index_col=0) nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.0) @@ -24,12 +26,9 @@ def build_nodal_transport_data(fn, pop_layout): def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data): - ## Get overall demand curve for all vehicles - traffic = pd.read_csv( - traffic_fn, skiprows=2, usecols=["count"] - ).squeeze("columns") + traffic = pd.read_csv(traffic_fn, skiprows=2, usecols=["count"]).squeeze("columns") transport_shape = generate_periodic_profiles( dt_index=snapshots, @@ -94,9 +93,11 @@ def transport_degree_factor( upper_degree_factor=1.6, ): """ - Work out how much energy demand in vehicles increases due to heating and cooling. - There is a deadband where there is no increase. - Degree factors are % increase in demand compared to no heating/cooling fuel consumption. + Work out how much energy demand in vehicles increases due to heating and + cooling. + + There is a deadband where there is no increase. Degree factors are % + increase in demand compared to no heating/cooling fuel consumption. Returns per unit increase in demand for each place and time """ @@ -137,7 +138,6 @@ def bev_availability_profile(fn, snapshots, nodes, options): def bev_dsm_profile(snapshots, nodes, options): - dsm_week = np.zeros((24 * 7,)) dsm_week[(np.arange(0, 7, 1) * 24 + options["bev_dsm_restriction_time"])] = options[ @@ -173,24 +173,23 @@ if __name__ == "__main__": options = snakemake.config["sector"] - snapshots = pd.date_range(freq='h', **snakemake.config["snapshots"], tz="UTC") + snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"], tz="UTC") Nyears = 1 nodal_transport_data = build_nodal_transport_data( - snakemake.input.transport_data, - pop_layout + snakemake.input.transport_data, pop_layout ) transport_demand = build_transport_demand( snakemake.input.traffic_data_KFZ, snakemake.input.temp_air_total, - nodes, nodal_transport_data + nodes, + nodal_transport_data, ) avail_profile = bev_availability_profile( - snakemake.input.traffic_data_Pkw, - snapshots, nodes, options + snakemake.input.traffic_data_Pkw, snapshots, nodes, options ) dsm_profile = bev_dsm_profile(snapshots, nodes, options) @@ -198,4 +197,4 @@ if __name__ == "__main__": nodal_transport_data.to_csv(snakemake.output.transport_data) transport_demand.to_csv(snakemake.output.transport_demand) avail_profile.to_csv(snakemake.output.avail_profile) - dsm_profile.to_csv(snakemake.output.dsm_profile) \ No newline at end of file + dsm_profile.to_csv(snakemake.output.dsm_profile) diff --git a/scripts/cluster_gas_network.py b/scripts/cluster_gas_network.py index 0169181e..95c10eac 100755 --- a/scripts/cluster_gas_network.py +++ b/scripts/cluster_gas_network.py @@ -1,45 +1,57 @@ -"""Cluster gas network.""" +# -*- coding: utf-8 -*- +""" +Cluster gas network. +""" import logging + logger = logging.getLogger(__name__) -import pandas as pd import geopandas as gpd - -from shapely import wkt -from pypsa.geo import haversine_pts +import pandas as pd from packaging.version import Version, parse +from pypsa.geo import haversine_pts +from shapely import wkt -def concat_gdf(gdf_list, crs='EPSG:4326'): - """Concatenate multiple geopandas dataframes with common coordinate reference system (crs).""" +def concat_gdf(gdf_list, crs="EPSG:4326"): + """ + Concatenate multiple geopandas dataframes with common coordinate reference + system (crs). + """ return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs) def load_bus_regions(onshore_path, offshore_path): - """Load pypsa-eur on- and offshore regions and concat.""" + """ + Load pypsa-eur on- and offshore regions and concat. + """ bus_regions_offshore = gpd.read_file(offshore_path) bus_regions_onshore = gpd.read_file(onshore_path) bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore]) - bus_regions = bus_regions.dissolve(by='name', aggfunc='sum') + bus_regions = bus_regions.dissolve(by="name", aggfunc="sum") return bus_regions def build_clustered_gas_network(df, bus_regions, length_factor=1.25): - - for i in [0,1]: - + for i in [0, 1]: gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326") - kws = dict(op="within") if parse(gpd.__version__) < Version('0.10') else dict(predicate="within") + kws = ( + dict(op="within") + if parse(gpd.__version__) < Version("0.10") + else dict(predicate="within") + ) bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right bus_mapping = bus_mapping.groupby(bus_mapping.index).first() df[f"bus{i}"] = bus_mapping - df[f"point{i}"] = df[f"bus{i}"].map(bus_regions.to_crs(3035).centroid.to_crs(4326)) + df[f"point{i}"] = df[f"bus{i}"].map( + bus_regions.to_crs(3035).centroid.to_crs(4326) + ) # drop pipes where not both buses are inside regions df = df.loc[~df.bus0.isna() & ~df.bus1.isna()] @@ -49,10 +61,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): # recalculate lengths as center to center * length factor df["length"] = df.apply( - lambda p: length_factor * haversine_pts( - [p.point0.x, p.point0.y], - [p.point1.x, p.point1.y] - ), axis=1 + lambda p: length_factor + * haversine_pts([p.point0.x, p.point0.y], [p.point1.x, p.point1.y]), + axis=1, ) # tidy and create new numbered index @@ -63,7 +74,6 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): def reindex_pipes(df): - def make_index(x): connector = " <-> " if x.bidirectional else " -> " return "gas pipeline " + x.bus0 + connector + x.bus1 @@ -77,33 +87,28 @@ def reindex_pipes(df): def aggregate_parallel_pipes(df): - strategies = { - 'bus0': 'first', - 'bus1': 'first', - "p_nom": 'sum', - "p_nom_diameter": 'sum', + "bus0": "first", + "bus1": "first", + "p_nom": "sum", + "p_nom_diameter": "sum", "max_pressure_bar": "mean", "build_year": "mean", "diameter_mm": "mean", - "length": 'mean', - 'name': ' '.join, - "p_min_pu": 'min', + "length": "mean", + "name": " ".join, + "p_min_pu": "min", } return df.groupby(df.index).agg(strategies) if __name__ == "__main__": - - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake( - 'cluster_gas_network', - simpl='', - clusters='37' - ) - logging.basicConfig(level=snakemake.config['logging_level']) + snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37") + + logging.basicConfig(level=snakemake.config["logging_level"]) fn = snakemake.input.cleaned_gas_network df = pd.read_csv(fn, index_col=0) @@ -111,8 +116,7 @@ if __name__ == "__main__": df[col] = df[col].apply(wkt.loads) bus_regions = load_bus_regions( - snakemake.input.regions_onshore, - snakemake.input.regions_offshore + snakemake.input.regions_onshore, snakemake.input.regions_offshore ) gas_network = build_clustered_gas_network(df, bus_regions) @@ -120,4 +124,4 @@ if __name__ == "__main__": reindex_pipes(gas_network) gas_network = aggregate_parallel_pipes(gas_network) - gas_network.to_csv(snakemake.output.clustered_gas_network) \ No newline at end of file + gas_network.to_csv(snakemake.output.clustered_gas_network) diff --git a/scripts/copy_config.py b/scripts/copy_config.py index 6eaf6e66..0e071b77 100644 --- a/scripts/copy_config.py +++ b/scripts/copy_config.py @@ -1,5 +1,7 @@ +# -*- coding: utf-8 -*- from shutil import copy + import yaml files = { @@ -7,24 +9,27 @@ files = { "Snakefile": "Snakefile", "scripts/solve_network.py": "solve_network.py", "scripts/prepare_sector_network.py": "prepare_sector_network.py", - "../pypsa-eur/config.yaml": "config.pypsaeur.yaml" + "../pypsa-eur/config.yaml": "config.pypsaeur.yaml", } -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('copy_config') - basepath = snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/configs/' + snakemake = mock_snakemake("copy_config") + + basepath = ( + snakemake.config["summary_dir"] + "/" + snakemake.config["run"] + "/configs/" + ) for f, name in files.items(): copy(f, basepath + name) - with open(basepath + 'config.snakemake.yaml', 'w') as yaml_file: + with open(basepath + "config.snakemake.yaml", "w") as yaml_file: yaml.dump( snakemake.config, yaml_file, default_flow_style=False, allow_unicode=True, - sort_keys=False - ) \ No newline at end of file + sort_keys=False, + ) diff --git a/scripts/helper.py b/scripts/helper.py index ef3dd828..f5e78f96 100644 --- a/scripts/helper.py +++ b/scripts/helper.py @@ -1,26 +1,28 @@ +# -*- coding: utf-8 -*- +import contextlib +import logging import os import sys -import contextlib -import yaml -import pytz -import pandas as pd from pathlib import Path -from snakemake.utils import update_config -from pypsa.descriptors import Dict -from pypsa.components import components, component_attrs -import logging +import pandas as pd +import pytz +import yaml +from pypsa.components import component_attrs, components +from pypsa.descriptors import Dict +from snakemake.utils import update_config + logger = logging.getLogger(__name__) # Define a context manager to temporarily mute print statements @contextlib.contextmanager def mute_print(): - with open(os.devnull, 'w') as devnull: + with open(os.devnull, "w") as devnull: with contextlib.redirect_stdout(devnull): yield - - + + def override_component_attrs(directory): """Tell PyPSA that links can have multiple outputs by overriding the component_attrs. This can be done for @@ -30,7 +32,7 @@ def override_component_attrs(directory): Parameters ---------- directory : string - Folder where component attributes to override are stored + Folder where component attributes to override are stored analogous to ``pypsa/component_attrs``, e.g. `links.csv`. Returns @@ -38,7 +40,7 @@ def override_component_attrs(directory): Dictionary of overridden component attributes. """ - attrs = Dict({k : v.copy() for k,v in component_attrs.items()}) + attrs = Dict({k: v.copy() for k, v in component_attrs.items()}) for component, list_name in components.list_name.items(): fn = f"{directory}/{list_name}.csv" @@ -66,15 +68,17 @@ def mock_snakemake(rulename, **wildcards): keyword arguments fixing the wildcards. Only necessary if wildcards are needed. """ - import snakemake as sm import os + + import snakemake as sm + from packaging.version import Version, parse from pypsa.descriptors import Dict from snakemake.script import Snakemake - from packaging.version import Version, parse script_dir = Path(__file__).parent.resolve() - assert Path.cwd().resolve() == script_dir, \ - f'mock_snakemake has to be run from the repository scripts directory {script_dir}' + assert ( + Path.cwd().resolve() == script_dir + ), f"mock_snakemake has to be run from the repository scripts directory {script_dir}" os.chdir(script_dir.parent) for p in sm.SNAKEFILE_CHOICES: if os.path.exists(p): @@ -95,9 +99,18 @@ def mock_snakemake(rulename, **wildcards): io[i] = os.path.abspath(io[i]) make_accessable(job.input, job.output, job.log) - snakemake = Snakemake(job.input, job.output, job.params, job.wildcards, - job.threads, job.resources, job.log, - job.dag.workflow.config, job.rule.name, None,) + snakemake = Snakemake( + job.input, + job.output, + job.params, + job.wildcards, + job.threads, + job.resources, + job.log, + job.dag.workflow.config, + job.rule.name, + None, + ) # create log and output dir if not existent for path in list(snakemake.log) + list(snakemake.output): Path(path).parent.mkdir(parents=True, exist_ok=True) @@ -105,15 +118,17 @@ def mock_snakemake(rulename, **wildcards): os.chdir(script_dir) return snakemake + # from pypsa-eur/_helpers.py def progress_retrieve(url, file): import urllib + from progressbar import ProgressBar pbar = ProgressBar(0, 100) def dlProgress(count, blockSize, totalSize): - pbar.update( int(count * blockSize * 100 / totalSize) ) + pbar.update(int(count * blockSize * 100 / totalSize)) urllib.request.urlretrieve(url, file, reporthook=dlProgress) @@ -121,10 +136,11 @@ def progress_retrieve(url, file): def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None): """ Give a 24*7 long list of weekly hourly profiles, generate this for each - country for the period dt_index, taking account of time zones and summer time. + country for the period dt_index, taking account of time zones and summer + time. """ - weekly_profile = pd.Series(weekly_profile, range(24*7)) + weekly_profile = pd.Series(weekly_profile, range(24 * 7)) week_df = pd.DataFrame(index=dt_index, columns=nodes) @@ -150,4 +166,4 @@ def update_config_with_sector_opts(config, sector_opts): for o in sector_opts.split("-"): if o.startswith("CF+"): l = o.split("+")[1:] - update_config(config, parse(l)) \ No newline at end of file + update_config(config, parse(l)) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index dcf5b801..1d7fe068 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -1,23 +1,20 @@ +# -*- coding: utf-8 -*- import logging + logger = logging.getLogger(__name__) import sys -import yaml -import pypsa import numpy as np import pandas as pd - -from prepare_sector_network import prepare_costs +import pypsa +import yaml from helper import override_component_attrs +from prepare_sector_network import prepare_costs idx = pd.IndexSlice -opt_name = { - "Store": "e", - "Line": "s", - "Transformer": "s" -} +opt_name = {"Store": "e", "Line": "s", "Transformer": "s"} def assign_carriers(n): @@ -26,20 +23,25 @@ def assign_carriers(n): def assign_locations(n): - for c in n.iterate_components(n.one_port_components|n.branch_components): - ifind = pd.Series(c.df.index.str.find(" ",start=4),c.df.index) + for c in n.iterate_components(n.one_port_components | n.branch_components): + ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) for i in ifind.unique(): names = ifind.index[ifind == i] if i == -1: - c.df.loc[names, 'location'] = "" + c.df.loc[names, "location"] = "" else: - c.df.loc[names, 'location'] = names.str[:i] + c.df.loc[names, "location"] = names.str[:i] def calculate_nodal_cfs(n, label, nodal_cfs): - #Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff - for c in n.iterate_components((n.branch_components^{"Line","Transformer"})|n.controllable_one_port_components^{"Load","StorageUnit"}): - capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum() + # Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff + for c in n.iterate_components( + (n.branch_components ^ {"Line", "Transformer"}) + | n.controllable_one_port_components ^ {"Load", "StorageUnit"} + ): + capacities_c = c.df.groupby(["location", "carrier"])[ + opt_name.get(c.name, "p") + "_nom_opt" + ].sum() if c.name == "Link": p = c.pnl.p0.abs().mean() @@ -53,21 +55,27 @@ def calculate_nodal_cfs(n, label, nodal_cfs): c.df["p"] = p p_c = c.df.groupby(["location", "carrier"])["p"].sum() - cf_c = p_c/capacities_c + cf_c = p_c / capacities_c - index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in cf_c.index.to_list()]) + index = pd.MultiIndex.from_tuples( + [(c.list_name,) + t for t in cf_c.index.to_list()] + ) nodal_cfs = nodal_cfs.reindex(index.union(nodal_cfs.index)) - nodal_cfs.loc[index,label] = cf_c.values + nodal_cfs.loc[index, label] = cf_c.values return nodal_cfs def calculate_cfs(n, label, cfs): + for c in n.iterate_components( + n.branch_components + | n.controllable_one_port_components ^ {"Load", "StorageUnit"} + ): + capacities_c = ( + c.df[opt_name.get(c.name, "p") + "_nom_opt"].groupby(c.df.carrier).sum() + ) - for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load","StorageUnit"}): - capacities_c = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum() - - if c.name in ["Link","Line","Transformer"]: + if c.name in ["Link", "Line", "Transformer"]: p = c.pnl.p0.abs().mean() elif c.name == "Store": p = c.pnl.e.abs().mean() @@ -76,25 +84,31 @@ def calculate_cfs(n, label, cfs): p_c = p.groupby(c.df.carrier).sum() - cf_c = p_c/capacities_c + cf_c = p_c / capacities_c cf_c = pd.concat([cf_c], keys=[c.list_name]) cfs = cfs.reindex(cf_c.index.union(cfs.index)) - cfs.loc[cf_c.index,label] = cf_c + cfs.loc[cf_c.index, label] = cf_c return cfs def calculate_nodal_costs(n, label, nodal_costs): - #Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff - for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): - c.df["capital_costs"] = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"] + # Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + c.df["capital_costs"] = ( + c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"] + ) capital_costs = c.df.groupby(["location", "carrier"])["capital_costs"].sum() - index = pd.MultiIndex.from_tuples([(c.list_name, "capital") + t for t in capital_costs.index.to_list()]) + index = pd.MultiIndex.from_tuples( + [(c.list_name, "capital") + t for t in capital_costs.index.to_list()] + ) nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index)) - nodal_costs.loc[index,label] = capital_costs.values + nodal_costs.loc[index, label] = capital_costs.values if c.name == "Link": p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum() @@ -102,19 +116,23 @@ def calculate_nodal_costs(n, label, nodal_costs): continue elif c.name == "StorageUnit": p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) - p_all[p_all < 0.] = 0. + p_all[p_all < 0.0] = 0.0 p = p_all.sum() else: p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum() - #correct sequestration cost + # correct sequestration cost if c.name == "Store": - items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.)] - c.df.loc[items, "marginal_cost"] = -20. + items = c.df.index[ + (c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0) + ] + c.df.loc[items, "marginal_cost"] = -20.0 - c.df["marginal_costs"] = p*c.df.marginal_cost + c.df["marginal_costs"] = p * c.df.marginal_cost marginal_costs = c.df.groupby(["location", "carrier"])["marginal_costs"].sum() - index = pd.MultiIndex.from_tuples([(c.list_name, "marginal") + t for t in marginal_costs.index.to_list()]) + index = pd.MultiIndex.from_tuples( + [(c.list_name, "marginal") + t for t in marginal_costs.index.to_list()] + ) nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index)) nodal_costs.loc[index, label] = marginal_costs.values @@ -122,9 +140,10 @@ def calculate_nodal_costs(n, label, nodal_costs): def calculate_costs(n, label, costs): - - for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): - capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"] capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum() capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"]) @@ -140,17 +159,19 @@ def calculate_costs(n, label, costs): continue elif c.name == "StorageUnit": p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) - p_all[p_all < 0.] = 0. + p_all[p_all < 0.0] = 0.0 p = p_all.sum() else: p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum() - #correct sequestration cost + # correct sequestration cost if c.name == "Store": - items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.)] - c.df.loc[items, "marginal_cost"] = -20. + items = c.df.index[ + (c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0) + ] + c.df.loc[items, "marginal_cost"] = -20.0 - marginal_costs = p*c.df.marginal_cost + marginal_costs = p * c.df.marginal_cost marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum() @@ -159,54 +180,79 @@ def calculate_costs(n, label, costs): costs = costs.reindex(marginal_costs_grouped.index.union(costs.index)) - costs.loc[marginal_costs_grouped.index,label] = marginal_costs_grouped + costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped # add back in all hydro - #costs.loc[("storage_units", "capital", "hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro", "p_nom"].sum() - #costs.loc[("storage_units", "capital", "PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS", "p_nom"].sum() - #costs.loc[("generators", "capital", "ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror", "p_nom"].sum() + # costs.loc[("storage_units", "capital", "hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro", "p_nom"].sum() + # costs.loc[("storage_units", "capital", "PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS", "p_nom"].sum() + # costs.loc[("generators", "capital", "ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror", "p_nom"].sum() return costs def calculate_cumulative_cost(): - planning_horizons = snakemake.config['scenario']['planning_horizons'] + planning_horizons = snakemake.config["scenario"]["planning_horizons"] - cumulative_cost = pd.DataFrame(index = df["costs"].sum().index, - columns=pd.Series(data=np.arange(0,0.1, 0.01), name='social discount rate')) + cumulative_cost = pd.DataFrame( + index=df["costs"].sum().index, + columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"), + ) - #discount cost and express them in money value of planning_horizons[0] + # discount cost and express them in money value of planning_horizons[0] for r in cumulative_cost.columns: - cumulative_cost[r]=[df["costs"].sum()[index]/((1+r)**(index[-1]-planning_horizons[0])) for index in cumulative_cost.index] + cumulative_cost[r] = [ + df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0])) + for index in cumulative_cost.index + ] - #integrate cost throughout the transition path + # integrate cost throughout the transition path for r in cumulative_cost.columns: for cluster in cumulative_cost.index.get_level_values(level=0).unique(): for lv in cumulative_cost.index.get_level_values(level=1).unique(): - for sector_opts in cumulative_cost.index.get_level_values(level=2).unique(): - cumulative_cost.loc[(cluster, lv, sector_opts, 'cumulative cost'),r] = np.trapz(cumulative_cost.loc[idx[cluster, lv, sector_opts,planning_horizons],r].values, x=planning_horizons) + for sector_opts in cumulative_cost.index.get_level_values( + level=2 + ).unique(): + cumulative_cost.loc[ + (cluster, lv, sector_opts, "cumulative cost"), r + ] = np.trapz( + cumulative_cost.loc[ + idx[cluster, lv, sector_opts, planning_horizons], r + ].values, + x=planning_horizons, + ) return cumulative_cost def calculate_nodal_capacities(n, label, nodal_capacities): - #Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff - for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): - nodal_capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum() - index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]) + # Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + nodal_capacities_c = c.df.groupby(["location", "carrier"])[ + opt_name.get(c.name, "p") + "_nom_opt" + ].sum() + index = pd.MultiIndex.from_tuples( + [(c.list_name,) + t for t in nodal_capacities_c.index.to_list()] + ) nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index)) - nodal_capacities.loc[index,label] = nodal_capacities_c.values + nodal_capacities.loc[index, label] = nodal_capacities_c.values return nodal_capacities def calculate_capacities(n, label, capacities): - - for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): - capacities_grouped = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum() + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + capacities_grouped = ( + c.df[opt_name.get(c.name, "p") + "_nom_opt"].groupby(c.df.carrier).sum() + ) capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name]) - capacities = capacities.reindex(capacities_grouped.index.union(capacities.index)) + capacities = capacities.reindex( + capacities_grouped.index.union(capacities.index) + ) capacities.loc[capacities_grouped.index, label] = capacities_grouped @@ -214,28 +260,42 @@ def calculate_capacities(n, label, capacities): def calculate_curtailment(n, label, curtailment): - - avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum() + avail = ( + n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt) + .sum() + .groupby(n.generators.carrier) + .sum() + ) used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() - curtailment[label] = (((avail - used)/avail)*100).round(3) + curtailment[label] = (((avail - used) / avail) * 100).round(3) return curtailment def calculate_energy(n, label, energy): - - for c in n.iterate_components(n.one_port_components|n.branch_components): - + for c in n.iterate_components(n.one_port_components | n.branch_components): if c.name in n.one_port_components: - c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() + c_energies = ( + c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) + .sum() + .multiply(c.df.sign) + .groupby(c.df.carrier) + .sum() + ) else: - c_energies = pd.Series(0., c.df.carrier.unique()) + c_energies = pd.Series(0.0, c.df.carrier.unique()) for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]: - totals = c.pnl["p" + port].multiply(n.snapshot_weightings.generators, axis=0).sum() - #remove values where bus is missing (bug in nomopyomo) + totals = ( + c.pnl["p" + port] + .multiply(n.snapshot_weightings.generators, axis=0) + .sum() + ) + # remove values where bus is missing (bug in nomopyomo) no_bus = c.df.index[c.df["bus" + port] == ""] - totals.loc[no_bus] = n.component_attrs[c.name].loc["p" + port, "default"] + totals.loc[no_bus] = n.component_attrs[c.name].loc[ + "p" + port, "default" + ] c_energies -= totals.groupby(c.df.carrier).sum() c_energies = pd.concat([c_energies], keys=[c.list_name]) @@ -248,40 +308,47 @@ def calculate_energy(n, label, energy): def calculate_supply(n, label, supply): - """calculate the max dispatch of each component at the buses aggregated by carrier""" + """ + Calculate the max dispatch of each component at the buses aggregated by + carrier. + """ bus_carriers = n.buses.carrier.unique() for i in bus_carriers: - bus_map = (n.buses.carrier == i) + bus_map = n.buses.carrier == i bus_map.at[""] = False for c in n.iterate_components(n.one_port_components): - items = c.df.index[c.df.bus.map(bus_map).fillna(False)] if len(items) == 0: continue - s = c.pnl.p[items].max().multiply(c.df.loc[items, 'sign']).groupby(c.df.loc[items, 'carrier']).sum() + s = ( + c.pnl.p[items] + .max() + .multiply(c.df.loc[items, "sign"]) + .groupby(c.df.loc[items, "carrier"]) + .sum() + ) s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) supply = supply.reindex(s.index.union(supply.index)) - supply.loc[s.index,label] = s - + supply.loc[s.index, label] = s for c in n.iterate_components(n.branch_components): - for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]: - items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)] if len(items) == 0: continue - #lots of sign compensation for direction and to do maximums - s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items, 'carrier']).sum() + # lots of sign compensation for direction and to do maximums + s = (-1) ** (1 - int(end)) * ( + (-1) ** int(end) * c.pnl["p" + end][items] + ).max().groupby(c.df.loc[items, "carrier"]).sum() s.index = s.index + end s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) @@ -291,46 +358,56 @@ def calculate_supply(n, label, supply): return supply -def calculate_supply_energy(n, label, supply_energy): - """calculate the total energy supply/consuption of each component at the buses aggregated by carrier""" +def calculate_supply_energy(n, label, supply_energy): + """ + Calculate the total energy supply/consuption of each component at the buses + aggregated by carrier. + """ bus_carriers = n.buses.carrier.unique() for i in bus_carriers: - bus_map = (n.buses.carrier == i) + bus_map = n.buses.carrier == i bus_map.at[""] = False for c in n.iterate_components(n.one_port_components): - items = c.df.index[c.df.bus.map(bus_map).fillna(False)] if len(items) == 0: continue - s = c.pnl.p[items].multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.loc[items, 'sign']).groupby(c.df.loc[items, 'carrier']).sum() + s = ( + c.pnl.p[items] + .multiply(n.snapshot_weightings.generators, axis=0) + .sum() + .multiply(c.df.loc[items, "sign"]) + .groupby(c.df.loc[items, "carrier"]) + .sum() + ) s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) supply_energy = supply_energy.reindex(s.index.union(supply_energy.index)) supply_energy.loc[s.index, label] = s - for c in n.iterate_components(n.branch_components): - for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]: - items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)] if len(items) == 0: continue - s = (-1)*c.pnl["p"+end][items].multiply(n.snapshot_weightings.generators,axis=0).sum().groupby(c.df.loc[items, 'carrier']).sum() + s = (-1) * c.pnl["p" + end][items].multiply( + n.snapshot_weightings.generators, axis=0 + ).sum().groupby(c.df.loc[items, "carrier"]).sum() s.index = s.index + end s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) - supply_energy = supply_energy.reindex(s.index.union(supply_energy.index)) + supply_energy = supply_energy.reindex( + s.index.union(supply_energy.index) + ) supply_energy.loc[s.index, label] = s @@ -338,21 +415,24 @@ def calculate_supply_energy(n, label, supply_energy): def calculate_metrics(n, label, metrics): - metrics_list = [ "line_volume", "line_volume_limit", "line_volume_AC", "line_volume_DC", "line_volume_shadow", - "co2_shadow" + "co2_shadow", ] metrics = metrics.reindex(pd.Index(metrics_list).union(metrics.index)) - metrics.at["line_volume_DC",label] = (n.links.length * n.links.p_nom_opt)[n.links.carrier == "DC"].sum() - metrics.at["line_volume_AC",label] = (n.lines.length * n.lines.s_nom_opt).sum() - metrics.at["line_volume",label] = metrics.loc[["line_volume_AC", "line_volume_DC"], label].sum() + metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[ + n.links.carrier == "DC" + ].sum() + metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum() + metrics.at["line_volume", label] = metrics.loc[ + ["line_volume_AC", "line_volume_DC"], label + ].sum() if hasattr(n, "line_volume_limit"): metrics.at["line_volume_limit", label] = n.line_volume_limit @@ -365,10 +445,9 @@ def calculate_metrics(n, label, metrics): def calculate_prices(n, label, prices): - prices = prices.reindex(prices.index.union(n.buses.carrier.unique())) - #WARNING: this is time-averaged, see weighted_prices for load-weighted average + # WARNING: this is time-averaged, see weighted_prices for load-weighted average prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean() return prices @@ -377,32 +456,42 @@ def calculate_prices(n, label, prices): def calculate_weighted_prices(n, label, weighted_prices): # Warning: doesn't include storage units as loads - weighted_prices = weighted_prices.reindex(pd.Index([ - "electricity", - "heat", - "space heat", - "urban heat", - "space urban heat", - "gas", - "H2" - ])) + weighted_prices = weighted_prices.reindex( + pd.Index( + [ + "electricity", + "heat", + "space heat", + "urban heat", + "space urban heat", + "gas", + "H2", + ] + ) + ) - link_loads = {"electricity": ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"], - "heat": ["water tanks charger"], - "urban heat": ["water tanks charger"], - "space heat": [], - "space urban heat": [], - "gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"], - "H2": ["Sabatier", "H2 Fuel Cell"]} + link_loads = { + "electricity": [ + "heat pump", + "resistive heater", + "battery charger", + "H2 Electrolysis", + ], + "heat": ["water tanks charger"], + "urban heat": ["water tanks charger"], + "space heat": [], + "space urban heat": [], + "gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"], + "H2": ["Sabatier", "H2 Fuel Cell"], + } for carrier in link_loads: - if carrier == "electricity": suffix = "" elif carrier[:5] == "space": suffix = carrier[5:] else: - suffix = " " + carrier + suffix = " " + carrier buses = n.buses.index[n.buses.index.str[2:] == suffix] @@ -410,28 +499,33 @@ def calculate_weighted_prices(n, label, weighted_prices): continue if carrier in ["H2", "gas"]: - load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.) + load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0) elif carrier[:5] == "space": - load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix) + load = heat_demand_df[buses.str[:2]].rename( + columns=lambda i: str(i) + suffix + ) else: load = n.loads_t.p_set[buses] for tech in link_loads[carrier]: - - names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech] + names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] if names.empty: continue - load += n.links_t.p0[names].groupby(n.links.loc[names, "bus0"],axis=1).sum() + load += ( + n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum() + ) # Add H2 Store when charging - #if carrier == "H2": + # if carrier == "H2": # stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store", "bus"],axis=1).sum(axis=1) # stores[stores > 0.] = 0. # load += -stores - weighted_prices.loc[carrier,label] = (load * n.buses_t.marginal_price[buses]).sum().sum() / load.sum().sum() + weighted_prices.loc[carrier, label] = ( + load * n.buses_t.marginal_price[buses] + ).sum().sum() / load.sum().sum() # still have no idea what this is for, only for debug reasons. if carrier[:5] == "space": @@ -455,21 +549,24 @@ def calculate_market_values(n, label, market_values): market_values = market_values.reindex(market_values.index.union(techs)) - for tech in techs: gens = generators[n.generators.loc[generators, "carrier"] == tech] - dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens, "bus"], axis=1).sum().reindex(columns=buses, fill_value=0.) + dispatch = ( + n.generators_t.p[gens] + .groupby(n.generators.loc[gens, "bus"], axis=1) + .sum() + .reindex(columns=buses, fill_value=0.0) + ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech,label] = revenue.sum().sum() / dispatch.sum().sum() - + market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() ## Now do market value of links ## for i in ["0", "1"]: - all_links = n.links.index[n.buses.loc[n.links["bus"+i], "carrier"] == carrier] + all_links = n.links.index[n.buses.loc[n.links["bus" + i], "carrier"] == carrier] techs = n.links.loc[all_links, "carrier"].value_counts().index @@ -478,39 +575,51 @@ def calculate_market_values(n, label, market_values): for tech in techs: links = all_links[n.links.loc[all_links, "carrier"] == tech] - dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links, "bus"+i], axis=1).sum().reindex(columns=buses, fill_value=0.) + dispatch = ( + n.links_t["p" + i][links] + .groupby(n.links.loc[links, "bus" + i], axis=1) + .sum() + .reindex(columns=buses, fill_value=0.0) + ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech,label] = revenue.sum().sum() / dispatch.sum().sum() + market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() return market_values def calculate_price_statistics(n, label, price_statistics): - - - price_statistics = price_statistics.reindex(price_statistics.index.union(pd.Index(["zero_hours", "mean", "standard_deviation"]))) + price_statistics = price_statistics.reindex( + price_statistics.index.union( + pd.Index(["zero_hours", "mean", "standard_deviation"]) + ) + ) buses = n.buses.index[n.buses.carrier == "AC"] - threshold = 0.1 # higher than phoney marginal_cost of wind/solar + threshold = 0.1 # higher than phoney marginal_cost of wind/solar - df = pd.DataFrame(data=0., columns=buses, index=n.snapshots) + df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots) - df[n.buses_t.marginal_price[buses] < threshold] = 1. + df[n.buses_t.marginal_price[buses] < threshold] = 1.0 - price_statistics.at["zero_hours", label] = df.sum().sum() / (df.shape[0] * df.shape[1]) + price_statistics.at["zero_hours", label] = df.sum().sum() / ( + df.shape[0] * df.shape[1] + ) - price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean() + price_statistics.at["mean", label] = ( + n.buses_t.marginal_price[buses].unstack().mean() + ) - price_statistics.at["standard_deviation", label] = n.buses_t.marginal_price[buses].unstack().std() + price_statistics.at["standard_deviation", label] = ( + n.buses_t.marginal_price[buses].unstack().std() + ) return price_statistics def make_summaries(networks_dict): - outputs = [ "nodal_costs", "nodal_capacities", @@ -530,8 +639,7 @@ def make_summaries(networks_dict): ] columns = pd.MultiIndex.from_tuples( - networks_dict.keys(), - names=["cluster", "lv", "opt", "planning_horizon"] + networks_dict.keys(), names=["cluster", "lv", "opt", "planning_horizon"] ) df = {} @@ -560,41 +668,48 @@ def to_csv(df): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('make_summary') - - logging.basicConfig(level=snakemake.config['logging_level']) + + snakemake = mock_snakemake("make_summary") + + logging.basicConfig(level=snakemake.config["logging_level"]) networks_dict = { - (cluster, lv, opt+sector_opt, planning_horizon) : - snakemake.config['results_dir'] + snakemake.config['run'] + f'/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc' \ - for simpl in snakemake.config['scenario']['simpl'] \ - for cluster in snakemake.config['scenario']['clusters'] \ - for opt in snakemake.config['scenario']['opts'] \ - for sector_opt in snakemake.config['scenario']['sector_opts'] \ - for lv in snakemake.config['scenario']['lv'] \ - for planning_horizon in snakemake.config['scenario']['planning_horizons'] + (cluster, lv, opt + sector_opt, planning_horizon): snakemake.config[ + "results_dir" + ] + + snakemake.config["run"] + + f"/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc" + for simpl in snakemake.config["scenario"]["simpl"] + for cluster in snakemake.config["scenario"]["clusters"] + for opt in snakemake.config["scenario"]["opts"] + for sector_opt in snakemake.config["scenario"]["sector_opts"] + for lv in snakemake.config["scenario"]["lv"] + for planning_horizon in snakemake.config["scenario"]["planning_horizons"] } Nyears = 1 costs_db = prepare_costs( snakemake.input.costs, - snakemake.config['costs']['USD2013_to_EUR2013'], - snakemake.config['costs']['discountrate'], + snakemake.config["costs"]["USD2013_to_EUR2013"], + snakemake.config["costs"]["discountrate"], Nyears, - snakemake.config['costs']['lifetime'] + snakemake.config["costs"]["lifetime"], ) df = make_summaries(networks_dict) - df["metrics"].loc["total costs"] = df["costs"].sum() + df["metrics"].loc["total costs"] = df["costs"].sum() to_csv(df) - if snakemake.config["foresight"]=='myopic': - cumulative_cost=calculate_cumulative_cost() - cumulative_cost.to_csv(snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/csvs/cumulative_cost.csv') - - + if snakemake.config["foresight"] == "myopic": + cumulative_cost = calculate_cumulative_cost() + cumulative_cost.to_csv( + snakemake.config["summary_dir"] + + "/" + + snakemake.config["run"] + + "/csvs/cumulative_cost.csv" + ) diff --git a/scripts/plot_network.py b/scripts/plot_network.py index b95fca27..1941ab84 100644 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -1,20 +1,19 @@ +# -*- coding: utf-8 -*- import logging + logger = logging.getLogger(__name__) -import pypsa - -import pandas as pd +import cartopy.crs as ccrs import geopandas as gpd import matplotlib.pyplot as plt -import cartopy.crs as ccrs - -from pypsa.plot import add_legend_circles, add_legend_patches, add_legend_lines - -from make_summary import assign_carriers -from plot_summary import rename_techs, preferred_order +import pandas as pd +import pypsa from helper import override_component_attrs +from make_summary import assign_carriers +from plot_summary import preferred_order, rename_techs +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches -plt.style.use(['ggplot', "matplotlibrc"]) +plt.style.use(["ggplot", "matplotlibrc"]) def rename_techs_tyndp(tech): @@ -46,15 +45,20 @@ def assign_location(n): ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) for i in ifind.value_counts().index: # these have already been assigned defaults - if i == -1: continue + if i == -1: + continue names = ifind.index[ifind == i] - c.df.loc[names, 'location'] = names.str[:i] + c.df.loc[names, "location"] = names.str[:i] -def plot_map(network, components=["links", "stores", "storage_units", "generators"], - bus_size_factor=1.7e10, transmission=False, with_legend=True): - - tech_colors = snakemake.config['plotting']['tech_colors'] +def plot_map( + network, + components=["links", "stores", "storage_units", "generators"], + bus_size_factor=1.7e10, + transmission=False, + with_legend=True, +): + tech_colors = snakemake.config["plotting"]["tech_colors"] n = network.copy() assign_location(n) @@ -73,19 +77,24 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" - costs_c = ((df_c.capital_cost * df_c[attr]) - .groupby([df_c.location, df_c.nice_group]).sum() - .unstack().fillna(0.)) + costs_c = ( + (df_c.capital_cost * df_c[attr]) + .groupby([df_c.location, df_c.nice_group]) + .sum() + .unstack() + .fillna(0.0) + ) costs = pd.concat([costs, costs_c], axis=1) logger.debug(f"{comp}, {costs}") costs = costs.groupby(costs.columns, axis=1).sum() - costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True) + costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) - new_columns = (preferred_order.intersection(costs.columns) - .append(costs.columns.difference(preferred_order))) + new_columns = preferred_order.intersection(costs.columns).append( + costs.columns.difference(preferred_order) + ) costs = costs[new_columns] for item in new_columns: @@ -95,12 +104,16 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator costs = costs.stack() # .sort_index() # hack because impossible to drop buses... - eu_location = snakemake.config["plotting"].get("eu_node_location", dict(x=-5.5, y=46)) + eu_location = snakemake.config["plotting"].get( + "eu_node_location", dict(x=-5.5, y=46) + ) n.buses.loc["EU gas", "x"] = eu_location["x"] n.buses.loc["EU gas", "y"] = eu_location["y"] - n.links.drop(n.links.index[(n.links.carrier != "DC") & ( - n.links.carrier != "B2B")], inplace=True) + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) # drop non-bus to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) @@ -111,13 +124,13 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator # make sure they are removed from index costs.index = pd.MultiIndex.from_tuples(costs.index.values) - threshold = 100e6 # 100 mEUR/a + threshold = 100e6 # 100 mEUR/a carriers = costs.groupby(level=1).sum() carriers = carriers.where(carriers > threshold).dropna() carriers = list(carriers.index) # PDF has minimum width, so set these to zero - line_lower_threshold = 500. + line_lower_threshold = 500.0 line_upper_threshold = 1e4 linewidth_factor = 4e3 ac_color = "rosybrown" @@ -133,7 +146,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator line_widths = n.lines.s_nom_opt link_widths = n.links.p_nom_opt linewidth_factor = 2e3 - line_lower_threshold = 0. + line_lower_threshold = 0.0 title = "current grid" else: line_widths = n.lines.s_nom_opt - n.lines.s_nom_min @@ -144,12 +157,12 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator line_widths = n.lines.s_nom_opt link_widths = n.links.p_nom_opt title = "total grid" - - line_widths = line_widths.clip(line_lower_threshold,line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold,line_upper_threshold) - line_widths = line_widths.replace(line_lower_threshold,0) - link_widths = link_widths.replace(line_lower_threshold,0) + line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) + link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) + + line_widths = line_widths.replace(line_lower_threshold, 0) + link_widths = link_widths.replace(line_lower_threshold, 0) fig, ax = plt.subplots(subplot_kw={"projection": ccrs.EqualEarth()}) fig.set_size_inches(7, 6) @@ -161,12 +174,13 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator link_colors=dc_color, line_widths=line_widths / linewidth_factor, link_widths=link_widths / linewidth_factor, - ax=ax, **map_opts + ax=ax, + **map_opts, ) sizes = [20, 10, 5] labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s/bus_size_factor*1e9 for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] legend_kw = dict( loc="upper left", @@ -174,7 +188,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator labelspacing=0.8, frameon=False, handletextpad=0, - title='system cost', + title="system cost", ) add_legend_circles( @@ -183,13 +197,13 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator labels, srid=n.srid, patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw + legend_kw=legend_kw, ) sizes = [10, 5] labels = [f"{s} GW" for s in sizes] scale = 1e3 / linewidth_factor - sizes = [s*scale for s in sizes] + sizes = [s * scale for s in sizes] legend_kw = dict( loc="upper left", @@ -197,24 +211,19 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator frameon=False, labelspacing=0.8, handletextpad=1, - title=title + title=title, ) add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color='lightgrey'), - legend_kw=legend_kw + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw ) - + legend_kw = dict( bbox_to_anchor=(1.52, 1.04), frameon=False, ) if with_legend: - colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] labels = carriers + ["HVAC line", "HVDC link"] @@ -225,14 +234,12 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator legend_kw=legend_kw, ) - fig.savefig( - snakemake.output.map, - transparent=True, - bbox_inches="tight" - ) + fig.savefig(snakemake.output.map, transparent=True, bbox_inches="tight") + def group_pipes(df, drop_direction=False): - """Group pipes which connect same buses and return overall capacity. + """ + Group pipes which connect same buses and return overall capacity. """ if drop_direction: positive_order = df.bus0 < df.bus1 @@ -244,16 +251,17 @@ def group_pipes(df, drop_direction=False): # there are pipes for each investment period rename to AC buses name for plotting df.index = df.apply( lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1 + axis=1, ) # group pipe lines connecting the same buses and rename them for plotting - pipe_capacity = df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"}) + pipe_capacity = df.groupby(level=0).agg( + {"p_nom_opt": sum, "bus0": "first", "bus1": "first"} + ) return pipe_capacity def plot_h2_map(network, regions): - n = network.copy() if "H2 pipeline" not in n.links.carrier.unique(): return @@ -261,7 +269,11 @@ def plot_h2_map(network, regions): assign_location(n) h2_storage = n.stores.query("carrier == 'H2'") - regions["H2"] = h2_storage.rename(index=h2_storage.bus.map(n.buses.location)).e_nom_opt.div(1e6) # TWh + regions["H2"] = h2_storage.rename( + index=h2_storage.bus.map(n.buses.location) + ).e_nom_opt.div( + 1e6 + ) # TWh regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) bus_size_factor = 1e5 @@ -276,26 +288,33 @@ def plot_h2_map(network, regions): elec = n.links[n.links.carrier.isin(carriers)].index - bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() / bus_size_factor + bus_sizes = ( + n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() + / bus_size_factor + ) # make a fake MultiIndex so that area is correct for legend bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) # drop all links which are not H2 pipelines - n.links.drop(n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True) + n.links.drop( + n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True + ) - h2_new = n.links[n.links.carrier=="H2 pipeline"] - h2_retro = n.links[n.links.carrier=='H2 pipeline retrofitted'] + h2_new = n.links[n.links.carrier == "H2 pipeline"] + h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] - if snakemake.config['foresight'] == 'myopic': + if snakemake.config["foresight"] == "myopic": # sum capacitiy for pipelines from different investment periods h2_new = group_pipes(h2_new) if not h2_retro.empty: - h2_retro = group_pipes(h2_retro, drop_direction=True).reindex(h2_new.index).fillna(0) - + h2_retro = ( + group_pipes(h2_retro, drop_direction=True) + .reindex(h2_new.index) + .fillna(0) + ) if not h2_retro.empty: - positive_order = h2_retro.bus0 < h2_retro.bus1 h2_retro_p = h2_retro[positive_order] swap_buses = {"bus0": "bus1", "bus1": "bus0"} @@ -305,7 +324,7 @@ def plot_h2_map(network, regions): h2_retro["index_orig"] = h2_retro.index h2_retro.index = h2_retro.apply( lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1 + axis=1, ) retro_w_new_i = h2_retro.index.intersection(h2_new.index) @@ -319,19 +338,20 @@ def plot_h2_map(network, regions): h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() else: - h2_total = h2_new.p_nom_opt link_widths_total = h2_total / linewidth_factor n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) n.links = n.links.groupby(level=0).first() - link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.) - link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0. + link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) + link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 - retro = n.links.p_nom_opt.where(n.links.carrier=='H2 pipeline retrofitted', other=0.) + retro = n.links.p_nom_opt.where( + n.links.carrier == "H2 pipeline retrofitted", other=0.0 + ) link_widths_retro = retro / linewidth_factor - link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0. + link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 n.links.bus0 = n.links.bus0.str.replace(" H2", "") n.links.bus1 = n.links.bus1.str.replace(" H2", "") @@ -339,18 +359,12 @@ def plot_h2_map(network, regions): proj = ccrs.EqualEarth() regions = regions.to_crs(proj.proj4_init) - fig, ax = plt.subplots( - figsize=(7, 6), - subplot_kw={"projection": proj} - ) + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - color_h2_pipe = '#b3f3f4' - color_retrofit = '#499a9c' - - bus_colors = { - "H2 Electrolysis": "#ff29d9", - "H2 Fuel Cell": '#805394' - } + color_h2_pipe = "#b3f3f4" + color_retrofit = "#499a9c" + + bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} n.plot( geomap=True, @@ -360,7 +374,7 @@ def plot_h2_map(network, regions): link_widths=link_widths_total, branch_components=["Link"], ax=ax, - **map_opts + **map_opts, ) n.plot( @@ -371,13 +385,13 @@ def plot_h2_map(network, regions): branch_components=["Link"], ax=ax, color_geomap=False, - boundaries=map_opts["boundaries"] + boundaries=map_opts["boundaries"], ) regions.plot( ax=ax, column="H2", - cmap='Blues', + cmap="Blues", linewidths=0, legend=True, vmax=6, @@ -391,7 +405,7 @@ def plot_h2_map(network, regions): sizes = [50, 10] labels = [f"{s} GW" for s in sizes] - sizes = [s/bus_size_factor*1e3 for s in sizes] + sizes = [s / bus_size_factor * 1e3 for s in sizes] legend_kw = dict( loc="upper left", @@ -401,16 +415,19 @@ def plot_h2_map(network, regions): frameon=False, ) - add_legend_circles(ax, sizes, labels, + add_legend_circles( + ax, + sizes, + labels, srid=n.srid, - patch_kw=dict(facecolor='lightgrey'), - legend_kw=legend_kw + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, ) sizes = [30, 10] labels = [f"{s} GW" for s in sizes] scale = 1e3 / linewidth_factor - sizes = [s*scale for s in sizes] + sizes = [s * scale for s in sizes] legend_kw = dict( loc="upper left", @@ -424,7 +441,7 @@ def plot_h2_map(network, regions): ax, sizes, labels, - patch_kw=dict(color='lightgrey'), + patch_kw=dict(color="lightgrey"), legend_kw=legend_kw, ) @@ -438,23 +455,16 @@ def plot_h2_map(network, regions): frameon=False, ) - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw - ) + add_legend_patches(ax, colors, labels, legend_kw=legend_kw) ax.set_facecolor("white") fig.savefig( - snakemake.output.map.replace("-costs-all","-h2_network"), - bbox_inches="tight" + snakemake.output.map.replace("-costs-all", "-h2_network"), bbox_inches="tight" ) def plot_ch4_map(network): - n = network.copy() if "gas pipeline" not in n.links.carrier.unique(): @@ -470,22 +480,54 @@ def plot_ch4_map(network): # Drop non-electric buses so they don't clutter the plot n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - fossil_gas_i = n.generators[n.generators.carrier=="gas"].index - fossil_gas = n.generators_t.p.loc[:,fossil_gas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.generators.loc[fossil_gas_i,"bus"]).sum() / bus_size_factor + fossil_gas_i = n.generators[n.generators.carrier == "gas"].index + fossil_gas = ( + n.generators_t.p.loc[:, fossil_gas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.generators.loc[fossil_gas_i, "bus"]) + .sum() + / bus_size_factor + ) fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) # make a fake MultiIndex so that area is correct for legend fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) methanation_i = n.links[n.links.carrier.isin(["helmeth", "Sabatier"])].index - methanation = abs(n.links_t.p1.loc[:,methanation_i].mul(n.snapshot_weightings.generators, axis=0)).sum().groupby(n.links.loc[methanation_i,"bus1"]).sum() / bus_size_factor - methanation = methanation.groupby(methanation.index).sum().rename(index=lambda x: x.replace(" gas", "")) + methanation = ( + abs( + n.links_t.p1.loc[:, methanation_i].mul( + n.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .groupby(n.links.loc[methanation_i, "bus1"]) + .sum() + / bus_size_factor + ) + methanation = ( + methanation.groupby(methanation.index) + .sum() + .rename(index=lambda x: x.replace(" gas", "")) + ) # make a fake MultiIndex so that area is correct for legend methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) - biogas_i = n.stores[n.stores.carrier=="biogas"].index - biogas = n.stores_t.p.loc[:,biogas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.stores.loc[biogas_i,"bus"]).sum() / bus_size_factor - biogas = biogas.groupby(biogas.index).sum().rename(index=lambda x: x.replace(" biogas", "")) + biogas_i = n.stores[n.stores.carrier == "biogas"].index + biogas = ( + n.stores_t.p.loc[:, biogas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.stores.loc[biogas_i, "bus"]) + .sum() + / bus_size_factor + ) + biogas = ( + biogas.groupby(biogas.index) + .sum() + .rename(index=lambda x: x.replace(" biogas", "")) + ) # make a fake MultiIndex so that area is correct for legend biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) @@ -496,22 +538,22 @@ def plot_ch4_map(network): n.links.drop(to_remove, inplace=True) link_widths_rem = n.links.p_nom_opt / linewidth_factor - link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0. + link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 link_widths_orig = n.links.p_nom / linewidth_factor - link_widths_orig[n.links.p_nom < line_lower_threshold] = 0. + link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 max_usage = n.links_t.p0.abs().max(axis=0) - link_widths_used = max_usage / linewidth_factor - link_widths_used[max_usage < line_lower_threshold] = 0. + link_widths_used = max_usage / linewidth_factor + link_widths_used[max_usage < line_lower_threshold] = 0.0 - tech_colors = snakemake.config['plotting']['tech_colors'] + tech_colors = snakemake.config["plotting"]["tech_colors"] pipe_colors = { "gas pipeline": "#f08080", "gas pipeline new": "#c46868", - "gas pipeline (in 2020)": 'lightgrey', - "gas pipeline (available)": '#e8d1d1', + "gas pipeline (in 2020)": "lightgrey", + "gas pipeline (available)": "#e8d1d1", } link_color_used = n.links.carrier.map(pipe_colors) @@ -522,88 +564,88 @@ def plot_ch4_map(network): bus_colors = { "fossil gas": tech_colors["fossil gas"], "methanation": tech_colors["methanation"], - "biogas": "seagreen" + "biogas": "seagreen", } - fig, ax = plt.subplots(figsize=(7,6), subplot_kw={"projection": ccrs.EqualEarth()}) + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()}) n.plot( bus_sizes=bus_sizes, bus_colors=bus_colors, - link_colors=pipe_colors['gas pipeline (in 2020)'], + link_colors=pipe_colors["gas pipeline (in 2020)"], link_widths=link_widths_orig, branch_components=["Link"], ax=ax, - **map_opts + **map_opts, ) n.plot( ax=ax, - bus_sizes=0., - link_colors=pipe_colors['gas pipeline (available)'], + bus_sizes=0.0, + link_colors=pipe_colors["gas pipeline (available)"], link_widths=link_widths_rem, branch_components=["Link"], color_geomap=False, - boundaries=map_opts["boundaries"] + boundaries=map_opts["boundaries"], ) n.plot( ax=ax, - bus_sizes=0., + bus_sizes=0.0, link_colors=link_color_used, link_widths=link_widths_used, branch_components=["Link"], color_geomap=False, - boundaries=map_opts["boundaries"] + boundaries=map_opts["boundaries"], ) sizes = [100, 10] labels = [f"{s} TWh" for s in sizes] - sizes = [s/bus_size_factor*1e6 for s in sizes] - + sizes = [s / bus_size_factor * 1e6 for s in sizes] + legend_kw = dict( loc="upper left", bbox_to_anchor=(0, 1.03), labelspacing=0.8, frameon=False, handletextpad=1, - title='gas sources', + title="gas sources", ) - + add_legend_circles( ax, sizes, labels, srid=n.srid, - patch_kw=dict(facecolor='lightgrey'), + patch_kw=dict(facecolor="lightgrey"), legend_kw=legend_kw, ) sizes = [50, 10] labels = [f"{s} GW" for s in sizes] scale = 1e3 / linewidth_factor - sizes = [s*scale for s in sizes] - + sizes = [s * scale for s in sizes] + legend_kw = dict( loc="upper left", bbox_to_anchor=(0.25, 1.03), frameon=False, labelspacing=0.8, handletextpad=1, - title='gas pipeline' + title="gas pipeline", ) - + add_legend_lines( ax, sizes, labels, - patch_kw=dict(color='lightgrey'), + patch_kw=dict(color="lightgrey"), legend_kw=legend_kw, ) colors = list(pipe_colors.values()) + list(bus_colors.values()) labels = list(pipe_colors.keys()) + list(bus_colors.keys()) - + # legend on the side # legend_kw = dict( # bbox_to_anchor=(1.47, 1.04), @@ -611,7 +653,7 @@ def plot_ch4_map(network): # ) legend_kw = dict( - loc='upper left', + loc="upper left", bbox_to_anchor=(0, 1.24), ncol=2, frameon=False, @@ -625,26 +667,21 @@ def plot_ch4_map(network): ) fig.savefig( - snakemake.output.map.replace("-costs-all","-ch4_network"), - bbox_inches="tight" + snakemake.output.map.replace("-costs-all", "-ch4_network"), bbox_inches="tight" ) def plot_map_without(network): - n = network.copy() assign_location(n) # Drop non-electric buses so they don't clutter the plot n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - fig, ax = plt.subplots( - figsize=(7, 6), - subplot_kw={"projection": ccrs.EqualEarth()} - ) + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()}) # PDF has minimum width, so set these to zero - line_lower_threshold = 200. + line_lower_threshold = 200.0 line_upper_threshold = 1e4 linewidth_factor = 3e3 ac_color = "rosybrown" @@ -652,9 +689,11 @@ def plot_map_without(network): # hack because impossible to drop buses... if "EU gas" in n.buses.index: - eu_location = snakemake.config["plotting"].get("eu_node_location", dict(x=-5.5, y=46)) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] + eu_location = snakemake.config["plotting"].get( + "eu_node_location", dict(x=-5.5, y=46) + ) + n.buses.loc["EU gas", "x"] = eu_location["x"] + n.buses.loc["EU gas", "y"] = eu_location["y"] to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")] n.links.drop(to_drop, inplace=True) @@ -666,11 +705,11 @@ def plot_map_without(network): line_widths = n.lines.s_nom_min link_widths = n.links.p_nom_min - line_widths = line_widths.clip(line_lower_threshold,line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold,line_upper_threshold) + line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) + link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - line_widths = line_widths.replace(line_lower_threshold,0) - link_widths = link_widths.replace(line_lower_threshold,0) + line_widths = line_widths.replace(line_lower_threshold, 0) + link_widths = link_widths.replace(line_lower_threshold, 0) n.plot( bus_colors="k", @@ -678,32 +717,34 @@ def plot_map_without(network): link_colors=dc_color, line_widths=line_widths / linewidth_factor, link_widths=link_widths / linewidth_factor, - ax=ax, **map_opts + ax=ax, + **map_opts, ) handles = [] labels = [] for s in (10, 5): - handles.append(plt.Line2D([0], [0], color=ac_color, - linewidth=s * 1e3 / linewidth_factor)) + handles.append( + plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor) + ) labels.append(f"{s} GW") - l1_1 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.05, 1.01), - frameon=False, - labelspacing=0.8, handletextpad=1.5, - title='Today\'s transmission') + l1_1 = ax.legend( + handles, + labels, + loc="upper left", + bbox_to_anchor=(0.05, 1.01), + frameon=False, + labelspacing=0.8, + handletextpad=1.5, + title="Today's transmission", + ) ax.add_artist(l1_1) - fig.savefig( - snakemake.output.today, - transparent=True, - bbox_inches="tight" - ) + fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight") def plot_series(network, carrier="AC", name="test"): - n = network.copy() assign_location(n) assign_carriers(n) @@ -712,28 +753,41 @@ def plot_series(network, carrier="AC", name="test"): supply = pd.DataFrame(index=n.snapshots) for c in n.iterate_components(n.branch_components): - n_port = 4 if c.name=='Link' else 2 + n_port = 4 if c.name == "Link" else 2 for i in range(n_port): - supply = pd.concat((supply, - (-1) * c.pnl["p" + str(i)].loc[:, - c.df.index[c.df["bus" + str(i)].isin(buses)]].groupby(c.df.carrier, - axis=1).sum()), - axis=1) + supply = pd.concat( + ( + supply, + (-1) + * c.pnl["p" + str(i)] + .loc[:, c.df.index[c.df["bus" + str(i)].isin(buses)]] + .groupby(c.df.carrier, axis=1) + .sum(), + ), + axis=1, + ) for c in n.iterate_components(n.one_port_components): comps = c.df.index[c.df.bus.isin(buses)] - supply = pd.concat((supply, ((c.pnl["p"].loc[:, comps]).multiply( - c.df.loc[comps, "sign"])).groupby(c.df.carrier, axis=1).sum()), axis=1) + supply = pd.concat( + ( + supply, + ((c.pnl["p"].loc[:, comps]).multiply(c.df.loc[comps, "sign"])) + .groupby(c.df.carrier, axis=1) + .sum(), + ), + axis=1, + ) supply = supply.groupby(rename_techs_tyndp, axis=1).sum() - both = supply.columns[(supply < 0.).any() & (supply > 0.).any()] + both = supply.columns[(supply < 0.0).any() & (supply > 0.0).any()] positive_supply = supply[both] negative_supply = supply[both] - positive_supply[positive_supply < 0.] = 0. - negative_supply[negative_supply > 0.] = 0. + positive_supply[positive_supply < 0.0] = 0.0 + negative_supply[negative_supply > 0.0] = 0.0 supply[both] = positive_supply @@ -761,48 +815,61 @@ def plot_series(network, carrier="AC", name="test"): supply = supply / 1e3 - supply.rename(columns={"electricity": "electric demand", - "heat": "heat demand"}, - inplace=True) + supply.rename( + columns={"electricity": "electric demand", "heat": "heat demand"}, inplace=True + ) supply.columns = supply.columns.str.replace("residential ", "") supply.columns = supply.columns.str.replace("services ", "") supply.columns = supply.columns.str.replace("urban decentral ", "decentral ") - preferred_order = pd.Index(["electric demand", - "transmission lines", - "hydroelectricity", - "hydro reservoir", - "run of river", - "pumped hydro storage", - "CHP", - "onshore wind", - "offshore wind", - "solar PV", - "solar thermal", - "building retrofitting", - "ground heat pump", - "air heat pump", - "resistive heater", - "OCGT", - "gas boiler", - "gas", - "natural gas", - "methanation", - "hydrogen storage", - "battery storage", - "hot water storage"]) + preferred_order = pd.Index( + [ + "electric demand", + "transmission lines", + "hydroelectricity", + "hydro reservoir", + "run of river", + "pumped hydro storage", + "CHP", + "onshore wind", + "offshore wind", + "solar PV", + "solar thermal", + "building retrofitting", + "ground heat pump", + "air heat pump", + "resistive heater", + "OCGT", + "gas boiler", + "gas", + "natural gas", + "methanation", + "hydrogen storage", + "battery storage", + "hot water storage", + ] + ) - new_columns = (preferred_order.intersection(supply.columns) - .append(supply.columns.difference(preferred_order))) + new_columns = preferred_order.intersection(supply.columns).append( + supply.columns.difference(preferred_order) + ) - supply = supply.groupby(supply.columns, axis=1).sum() + supply = supply.groupby(supply.columns, axis=1).sum() fig, ax = plt.subplots() fig.set_size_inches((8, 5)) - (supply.loc[start:stop, new_columns] - .plot(ax=ax, kind="area", stacked=True, linewidth=0., - color=[snakemake.config['plotting']['tech_colors'][i.replace(suffix, "")] - for i in new_columns])) + ( + supply.loc[start:stop, new_columns].plot( + ax=ax, + kind="area", + stacked=True, + linewidth=0.0, + color=[ + snakemake.config["plotting"]["tech_colors"][i.replace(suffix, "")] + for i in new_columns + ], + ) + ) handles, labels = ax.get_legend_handles_labels() @@ -824,44 +891,53 @@ def plot_series(network, carrier="AC", name="test"): ax.set_ylabel("Power [GW]") fig.tight_layout() - fig.savefig("{}{}/maps/series-{}-{}-{}-{}-{}.pdf".format( - snakemake.config['results_dir'], snakemake.config['run'], - snakemake.wildcards["lv"], - carrier, start, stop, name), - transparent=True) + fig.savefig( + "{}{}/maps/series-{}-{}-{}-{}-{}.pdf".format( + snakemake.config["results_dir"], + snakemake.config["run"], + snakemake.wildcards["lv"], + carrier, + start, + stop, + name, + ), + transparent=True, + ) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'plot_network', - simpl='', + "plot_network", + simpl="", clusters="181", - lv='opt', - opts='', - sector_opts='Co2L0-730H-T-H-B-I-A-solar+p3-linemaxext10', + lv="opt", + opts="", + sector_opts="Co2L0-730H-T-H-B-I-A-solar+p3-linemaxext10", planning_horizons="2050", ) - logging.basicConfig(level=snakemake.config['logging_level']) + logging.basicConfig(level=snakemake.config["logging_level"]) overrides = override_component_attrs(snakemake.input.overrides) n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) regions = gpd.read_file(snakemake.input.regions).set_index("name") - map_opts = snakemake.config['plotting']['map'] + map_opts = snakemake.config["plotting"]["map"] - plot_map(n, + plot_map( + n, components=["generators", "links", "stores", "storage_units"], bus_size_factor=2e10, - transmission=False + transmission=False, ) plot_h2_map(n, regions) plot_ch4_map(n) plot_map_without(n) - #plot_series(n, carrier="AC", name=suffix) - #plot_series(n, carrier="heat", name=suffix) + # plot_series(n, carrier="AC", name=suffix) + # plot_series(n, carrier="heat", name=suffix) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index eadf427c..7d14c902 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -1,25 +1,27 @@ +# -*- coding: utf-8 -*- import logging + logger = logging.getLogger(__name__) +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt -plt.style.use('ggplot') +plt.style.use("ggplot") -from prepare_sector_network import co2_emissions_year from helper import update_config_with_sector_opts +from prepare_sector_network import co2_emissions_year -#consolidate and rename + +# consolidate and rename def rename_techs(label): - prefix_to_remove = [ "residential ", "services ", "urban ", "rural ", "central ", - "decentral " + "decentral ", ] rename_if_contains = [ @@ -30,7 +32,7 @@ def rename_techs(label): "air heat pump", "ground heat pump", "resistive heater", - "Fischer-Tropsch" + "Fischer-Tropsch", ] rename_if_contains_dict = { @@ -58,151 +60,159 @@ def rename_techs(label): "co2 stored": "CO2 sequestration", "AC": "transmission lines", "DC": "transmission lines", - "B2B": "transmission lines" + "B2B": "transmission lines", } for ptr in prefix_to_remove: - if label[:len(ptr)] == ptr: - label = label[len(ptr):] + if label[: len(ptr)] == ptr: + label = label[len(ptr) :] for rif in rename_if_contains: if rif in label: label = rif - for old,new in rename_if_contains_dict.items(): + for old, new in rename_if_contains_dict.items(): if old in label: label = new - for old,new in rename.items(): + for old, new in rename.items(): if old == label: label = new return label -preferred_order = pd.Index([ - "transmission lines", - "hydroelectricity", - "hydro reservoir", - "run of river", - "pumped hydro storage", - "solid biomass", - "biogas", - "onshore wind", - "offshore wind", - "offshore wind (AC)", - "offshore wind (DC)", - "solar PV", - "solar thermal", - "solar rooftop", - "solar", - "building retrofitting", - "ground heat pump", - "air heat pump", - "heat pump", - "resistive heater", - "power-to-heat", - "gas-to-power/heat", - "CHP", - "OCGT", - "gas boiler", - "gas", - "natural gas", - "helmeth", - "methanation", - "ammonia", - "hydrogen storage", - "power-to-gas", - "power-to-liquid", - "battery storage", - "hot water storage", - "CO2 sequestration" -]) +preferred_order = pd.Index( + [ + "transmission lines", + "hydroelectricity", + "hydro reservoir", + "run of river", + "pumped hydro storage", + "solid biomass", + "biogas", + "onshore wind", + "offshore wind", + "offshore wind (AC)", + "offshore wind (DC)", + "solar PV", + "solar thermal", + "solar rooftop", + "solar", + "building retrofitting", + "ground heat pump", + "air heat pump", + "heat pump", + "resistive heater", + "power-to-heat", + "gas-to-power/heat", + "CHP", + "OCGT", + "gas boiler", + "gas", + "natural gas", + "helmeth", + "methanation", + "ammonia", + "hydrogen storage", + "power-to-gas", + "power-to-liquid", + "battery storage", + "hot water storage", + "CO2 sequestration", + ] +) + def plot_costs(): - - cost_df = pd.read_csv( - snakemake.input.costs, - index_col=list(range(3)), - header=list(range(n_header)) + snakemake.input.costs, index_col=list(range(3)), header=list(range(n_header)) ) df = cost_df.groupby(cost_df.index.get_level_values(2)).sum() - #convert to billions + # convert to billions df = df / 1e9 df = df.groupby(df.index.map(rename_techs)).sum() - to_drop = df.index[df.max(axis=1) < snakemake.config['plotting']['costs_threshold']] + to_drop = df.index[df.max(axis=1) < snakemake.config["plotting"]["costs_threshold"]] - logger.info(f"Dropping technology with costs below {snakemake.config['plotting']['costs_threshold']} EUR billion per year") + logger.info( + f"Dropping technology with costs below {snakemake.config['plotting']['costs_threshold']} EUR billion per year" + ) logger.debug(df.loc[to_drop]) df = df.drop(to_drop) logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year") - new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) + new_index = preferred_order.intersection(df.index).append( + df.index.difference(preferred_order) + ) new_columns = df.sum().sort_values().index - fig, ax = plt.subplots(figsize=(12,8)) + fig, ax = plt.subplots(figsize=(12, 8)) - df.loc[new_index,new_columns].T.plot( + df.loc[new_index, new_columns].T.plot( kind="bar", ax=ax, stacked=True, - color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index] + color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index], ) - handles,labels = ax.get_legend_handles_labels() + handles, labels = ax.get_legend_handles_labels() handles.reverse() labels.reverse() - ax.set_ylim([0,snakemake.config['plotting']['costs_max']]) + ax.set_ylim([0, snakemake.config["plotting"]["costs_max"]]) ax.set_ylabel("System Cost [EUR billion per year]") ax.set_xlabel("") - ax.grid(axis='x') + ax.grid(axis="x") - ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1,1], frameon=False) + ax.legend( + handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False + ) - fig.savefig(snakemake.output.costs, bbox_inches='tight') + fig.savefig(snakemake.output.costs, bbox_inches="tight") def plot_energy(): - energy_df = pd.read_csv( - snakemake.input.energy, - index_col=list(range(2)), - header=list(range(n_header)) + snakemake.input.energy, index_col=list(range(2)), header=list(range(n_header)) ) df = energy_df.groupby(energy_df.index.get_level_values(1)).sum() - #convert MWh to TWh + # convert MWh to TWh df = df / 1e6 df = df.groupby(df.index.map(rename_techs)).sum() - to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']] + to_drop = df.index[ + df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"] + ] - logger.info(f"Dropping all technology with energy consumption or production below {snakemake.config['plotting']['energy_threshold']} TWh/a") + logger.info( + f"Dropping all technology with energy consumption or production below {snakemake.config['plotting']['energy_threshold']} TWh/a" + ) logger.debug(df.loc[to_drop]) df = df.drop(to_drop) logger.info(f"Total energy of {round(df.sum()[0])} TWh/a") - new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) + new_index = preferred_order.intersection(df.index).append( + df.index.difference(preferred_order) + ) new_columns = df.columns.sort_values() - fig, ax = plt.subplots(figsize=(12,8)) + fig, ax = plt.subplots(figsize=(12, 8)) logger.debug(df.loc[new_index, new_columns]) @@ -210,15 +220,20 @@ def plot_energy(): kind="bar", ax=ax, stacked=True, - color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index] + color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index], ) - handles,labels = ax.get_legend_handles_labels() + handles, labels = ax.get_legend_handles_labels() handles.reverse() labels.reverse() - ax.set_ylim([snakemake.config['plotting']['energy_min'], snakemake.config['plotting']['energy_max']]) + ax.set_ylim( + [ + snakemake.config["plotting"]["energy_min"], + snakemake.config["plotting"]["energy_max"], + ] + ) ax.set_ylabel("Energy [TWh/a]") @@ -226,48 +241,56 @@ def plot_energy(): ax.grid(axis="x") - ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False) - - fig.savefig(snakemake.output.energy, bbox_inches='tight') + ax.legend( + handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False + ) + fig.savefig(snakemake.output.energy, bbox_inches="tight") def plot_balances(): - co2_carriers = ["co2", "co2 stored", "process emissions"] balances_df = pd.read_csv( - snakemake.input.balances, - index_col=list(range(3)), - header=list(range(n_header)) + snakemake.input.balances, index_col=list(range(3)), header=list(range(n_header)) ) - balances = {i.replace(" ","_"): [i] for i in balances_df.index.levels[0]} - balances["energy"] = [i for i in balances_df.index.levels[0] if i not in co2_carriers] - - fig, ax = plt.subplots(figsize=(12,8)) + balances = {i.replace(" ", "_"): [i] for i in balances_df.index.levels[0]} + balances["energy"] = [ + i for i in balances_df.index.levels[0] if i not in co2_carriers + ] + + fig, ax = plt.subplots(figsize=(12, 8)) for k, v in balances.items(): - df = balances_df.loc[v] df = df.groupby(df.index.get_level_values(2)).sum() - #convert MWh to TWh + # convert MWh to TWh df = df / 1e6 - #remove trailing link ports - df.index = [i[:-1] if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0","1","2","3"])) else i for i in df.index] + # remove trailing link ports + df.index = [ + i[:-1] + if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"])) + else i + for i in df.index + ] df = df.groupby(df.index.map(rename_techs)).sum() - to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']/10] + to_drop = df.index[ + df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"] / 10 + ] if v[0] in co2_carriers: units = "MtCO2/a" else: units = "TWh/a" - logger.debug(f"Dropping technology energy balance smaller than {snakemake.config['plotting']['energy_threshold']/10} {units}") + logger.debug( + f"Dropping technology energy balance smaller than {snakemake.config['plotting']['energy_threshold']/10} {units}" + ) logger.debug(df.loc[to_drop]) df = df.drop(to_drop) @@ -277,14 +300,20 @@ def plot_balances(): if df.empty: continue - new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) + new_index = preferred_order.intersection(df.index).append( + df.index.difference(preferred_order) + ) new_columns = df.columns.sort_values() - df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) + df.loc[new_index, new_columns].T.plot( + kind="bar", + ax=ax, + stacked=True, + color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index], + ) - - handles,labels = ax.get_legend_handles_labels() + handles, labels = ax.get_legend_handles_labels() handles.reverse() labels.reverse() @@ -298,153 +327,225 @@ def plot_balances(): ax.grid(axis="x") - ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False) + ax.legend( + handles, + labels, + ncol=1, + loc="upper left", + bbox_to_anchor=[1, 1], + frameon=False, + ) + fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight") - fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches='tight') - plt.cla() def historical_emissions(cts): """ - read historical emissions to add them to the carbon budget plot + Read historical emissions to add them to the carbon budget plot. """ - #https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 - #downloaded 201228 (modified by EEA last on 201221) + # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 + # downloaded 201228 (modified by EEA last on 201221) fn = "data/eea/UNFCCC_v23.csv" df = pd.read_csv(fn, encoding="latin-1") - df.loc[df["Year"] == "1985-1987","Year"] = 1986 + df.loc[df["Year"] == "1985-1987", "Year"] = 1986 df["Year"] = df["Year"].astype(int) - df = df.set_index(['Year', 'Sector_name', 'Country_code', 'Pollutant_name']).sort_index() + df = df.set_index( + ["Year", "Sector_name", "Country_code", "Pollutant_name"] + ).sort_index() e = pd.Series() - e["electricity"] = '1.A.1.a - Public Electricity and Heat Production' - e['residential non-elec'] = '1.A.4.b - Residential' - e['services non-elec'] = '1.A.4.a - Commercial/Institutional' - e['rail non-elec'] = "1.A.3.c - Railways" - e["road non-elec"] = '1.A.3.b - Road Transportation' + e["electricity"] = "1.A.1.a - Public Electricity and Heat Production" + e["residential non-elec"] = "1.A.4.b - Residential" + e["services non-elec"] = "1.A.4.a - Commercial/Institutional" + e["rail non-elec"] = "1.A.3.c - Railways" + e["road non-elec"] = "1.A.3.b - Road Transportation" e["domestic navigation"] = "1.A.3.d - Domestic Navigation" - e['international navigation'] = '1.D.1.b - International Navigation' - e["domestic aviation"] = '1.A.3.a - Domestic Aviation' - e["international aviation"] = '1.D.1.a - International Aviation' - e['total energy'] = '1 - Energy' - e['industrial processes'] = '2 - Industrial Processes and Product Use' - e['agriculture'] = '3 - Agriculture' - e['LULUCF'] = '4 - Land Use, Land-Use Change and Forestry' - e['waste management'] = '5 - Waste management' - e['other'] = '6 - Other Sector' - e['indirect'] = 'ind_CO2 - Indirect CO2' + e["international navigation"] = "1.D.1.b - International Navigation" + e["domestic aviation"] = "1.A.3.a - Domestic Aviation" + e["international aviation"] = "1.D.1.a - International Aviation" + e["total energy"] = "1 - Energy" + e["industrial processes"] = "2 - Industrial Processes and Product Use" + e["agriculture"] = "3 - Agriculture" + e["LULUCF"] = "4 - Land Use, Land-Use Change and Forestry" + e["waste management"] = "5 - Waste management" + e["other"] = "6 - Other Sector" + e["indirect"] = "ind_CO2 - Indirect CO2" e["total wL"] = "Total (with LULUCF)" e["total woL"] = "Total (without LULUCF)" - pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"] + pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"] cts if "GB" in cts: cts.remove("GB") cts.append("UK") - year = np.arange(1990,2018).tolist() + year = np.arange(1990, 2018).tolist() idx = pd.IndexSlice - co2_totals = df.loc[idx[year,e.values,cts,pol],"emissions"].unstack("Year").rename(index=pd.Series(e.index,e.values)) + co2_totals = ( + df.loc[idx[year, e.values, cts, pol], "emissions"] + .unstack("Year") + .rename(index=pd.Series(e.index, e.values)) + ) - co2_totals = (1/1e6)*co2_totals.groupby(level=0, axis=0).sum() #Gton CO2 + co2_totals = (1 / 1e6) * co2_totals.groupby(level=0, axis=0).sum() # Gton CO2 - co2_totals.loc['industrial non-elec'] = co2_totals.loc['total energy'] - co2_totals.loc[['electricity', 'services non-elec','residential non-elec', 'road non-elec', - 'rail non-elec', 'domestic aviation', 'international aviation', 'domestic navigation', - 'international navigation']].sum() + co2_totals.loc["industrial non-elec"] = ( + co2_totals.loc["total energy"] + - co2_totals.loc[ + [ + "electricity", + "services non-elec", + "residential non-elec", + "road non-elec", + "rail non-elec", + "domestic aviation", + "international aviation", + "domestic navigation", + "international navigation", + ] + ].sum() + ) emissions = co2_totals.loc["electricity"] if "T" in opts: - emissions += co2_totals.loc[[i+ " non-elec" for i in ["rail","road"]]].sum() + emissions += co2_totals.loc[[i + " non-elec" for i in ["rail", "road"]]].sum() if "H" in opts: - emissions += co2_totals.loc[[i+ " non-elec" for i in ["residential","services"]]].sum() + emissions += co2_totals.loc[ + [i + " non-elec" for i in ["residential", "services"]] + ].sum() if "I" in opts: - emissions += co2_totals.loc[["industrial non-elec","industrial processes", - "domestic aviation","international aviation", - "domestic navigation","international navigation"]].sum() + emissions += co2_totals.loc[ + [ + "industrial non-elec", + "industrial processes", + "domestic aviation", + "international aviation", + "domestic navigation", + "international navigation", + ] + ].sum() return emissions - def plot_carbon_budget_distribution(input_eurostat): """ - Plot historical carbon emissions in the EU and decarbonization path + Plot historical carbon emissions in the EU and decarbonization path. """ import matplotlib.gridspec as gridspec - import seaborn as sns; sns.set() - sns.set_style('ticks') - plt.style.use('seaborn-ticks') - plt.rcParams['xtick.direction'] = 'in' - plt.rcParams['ytick.direction'] = 'in' - plt.rcParams['xtick.labelsize'] = 20 - plt.rcParams['ytick.labelsize'] = 20 + + import seaborn as sns + + sns.set() + sns.set_style("ticks") + plt.style.use("seaborn-ticks") + plt.rcParams["xtick.direction"] = "in" + plt.rcParams["ytick.direction"] = "in" + plt.rcParams["xtick.labelsize"] = 20 + plt.rcParams["ytick.labelsize"] = 20 plt.figure(figsize=(10, 7)) gs1 = gridspec.GridSpec(1, 1) - ax1 = plt.subplot(gs1[0,0]) - ax1.set_ylabel('CO$_2$ emissions (Gt per year)',fontsize=22) - ax1.set_ylim([0,5]) - ax1.set_xlim([1990,snakemake.config['scenario']['planning_horizons'][-1]+1]) + ax1 = plt.subplot(gs1[0, 0]) + ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22) + ax1.set_ylim([0, 5]) + ax1.set_xlim([1990, snakemake.config["scenario"]["planning_horizons"][-1] + 1]) - path_cb = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/' + path_cb = snakemake.config["results_dir"] + snakemake.config["run"] + "/csvs/" countries = pd.read_csv(snakemake.input.country_codes, index_col=1) cts = countries.index.to_list() e_1990 = co2_emissions_year(cts, input_eurostat, opts, year=1990) - CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv', - index_col=0) + CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0) - - ax1.plot(e_1990*CO2_CAP[o],linewidth=3, - color='dodgerblue', label=None) + ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None) emissions = historical_emissions(cts) - ax1.plot(emissions, color='black', linewidth=3, label=None) + ax1.plot(emissions, color="black", linewidth=3, label=None) - #plot committed and uder-discussion targets - #(notice that historical emissions include all countries in the + # plot committed and uder-discussion targets + # (notice that historical emissions include all countries in the # network, but targets refer to EU) - ax1.plot([2020],[0.8*emissions[1990]], - marker='*', markersize=12, markerfacecolor='black', - markeredgecolor='black') + ax1.plot( + [2020], + [0.8 * emissions[1990]], + marker="*", + markersize=12, + markerfacecolor="black", + markeredgecolor="black", + ) - ax1.plot([2030],[0.45*emissions[1990]], - marker='*', markersize=12, markerfacecolor='white', - markeredgecolor='black') + ax1.plot( + [2030], + [0.45 * emissions[1990]], + marker="*", + markersize=12, + markerfacecolor="white", + markeredgecolor="black", + ) - ax1.plot([2030],[0.6*emissions[1990]], - marker='*', markersize=12, markerfacecolor='black', - markeredgecolor='black') + ax1.plot( + [2030], + [0.6 * emissions[1990]], + marker="*", + markersize=12, + markerfacecolor="black", + markeredgecolor="black", + ) - ax1.plot([2050, 2050],[x*emissions[1990] for x in [0.2, 0.05]], - color='gray', linewidth=2, marker='_', alpha=0.5) + ax1.plot( + [2050, 2050], + [x * emissions[1990] for x in [0.2, 0.05]], + color="gray", + linewidth=2, + marker="_", + alpha=0.5, + ) - ax1.plot([2050],[0.01*emissions[1990]], - marker='*', markersize=12, markerfacecolor='white', - linewidth=0, markeredgecolor='black', - label='EU under-discussion target', zorder=10, - clip_on=False) + ax1.plot( + [2050], + [0.01 * emissions[1990]], + marker="*", + markersize=12, + markerfacecolor="white", + linewidth=0, + markeredgecolor="black", + label="EU under-discussion target", + zorder=10, + clip_on=False, + ) - ax1.plot([2050],[0.125*emissions[1990]],'ro', - marker='*', markersize=12, markerfacecolor='black', - markeredgecolor='black', label='EU committed target') + ax1.plot( + [2050], + [0.125 * emissions[1990]], + "ro", + marker="*", + markersize=12, + markerfacecolor="black", + markeredgecolor="black", + label="EU committed target", + ) - ax1.legend(fancybox=True, fontsize=18, loc=(0.01,0.01), - facecolor='white', frameon=True) + ax1.legend( + fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True + ) - path_cb_plot = snakemake.config['results_dir'] + snakemake.config['run'] + '/graphs/' - plt.savefig(path_cb_plot+'carbon_budget_plot.pdf', dpi=300) + path_cb_plot = ( + snakemake.config["results_dir"] + snakemake.config["run"] + "/graphs/" + ) + plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('plot_summary') - logging.basicConfig(level=snakemake.config['logging_level']) + snakemake = mock_snakemake("plot_summary") + + logging.basicConfig(level=snakemake.config["logging_level"]) n_header = 4 @@ -454,8 +555,8 @@ if __name__ == "__main__": plot_balances() - for sector_opts in snakemake.config['scenario']['sector_opts']: - opts=sector_opts.split('-') + for sector_opts in snakemake.config["scenario"]["sector_opts"]: + opts = sector_opts.split("-") for o in opts: if "cb" in o: plot_carbon_budget_distribution(snakemake.input.eurostat) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 6426031b..062d598a 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1,39 +1,43 @@ -# coding: utf-8 +# -*- coding: utf-8 -*- -import pypsa -import re +import logging import os - -import pandas as pd -import numpy as np -import xarray as xr -import networkx as nx - +import re from itertools import product + +import networkx as nx +import numpy as np +import pandas as pd +import pypsa +import xarray as xr +from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 +from helper import ( + generate_periodic_profiles, + override_component_attrs, + update_config_with_sector_opts, +) +from networkx.algorithms import complement +from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation +from pypsa.geo import haversine_pts +from pypsa.io import import_components_from_dataframe from scipy.stats import beta from vresutils.costdata import annuity -from build_energy_totals import build_eea_co2, build_eurostat_co2, build_co2_totals -from helper import override_component_attrs, generate_periodic_profiles, update_config_with_sector_opts - -from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation -from networkx.algorithms import complement -from pypsa.geo import haversine_pts -from pypsa.io import import_components_from_dataframe - -import logging logger = logging.getLogger(__name__) from types import SimpleNamespace + spatial = SimpleNamespace() from packaging.version import Version, parse + pd_version = parse(pd.__version__) agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {} + def define_spatial(nodes, options): """ - Namespace for spatial + Namespace for spatial. Parameters ---------- @@ -104,7 +108,7 @@ def define_spatial(nodes, options): # ammonia - if options.get('ammonia'): + if options.get("ammonia"): spatial.ammonia = SimpleNamespace() if options.get("ammonia") == "regional": spatial.ammonia.nodes = nodes + " NH3" @@ -149,22 +153,16 @@ def define_spatial(nodes, options): from types import SimpleNamespace + spatial = SimpleNamespace() def emission_sectors_from_opts(opts): - sectors = ["electricity"] if "T" in opts: - sectors += [ - "rail non-elec", - "road non-elec" - ] + sectors += ["rail non-elec", "road non-elec"] if "H" in opts: - sectors += [ - "residential non-elec", - "services non-elec" - ] + sectors += ["residential non-elec", "services non-elec"] if "I" in opts: sectors += [ "industrial non-elec", @@ -172,25 +170,27 @@ def emission_sectors_from_opts(opts): "domestic aviation", "international aviation", "domestic navigation", - "international navigation" + "international navigation", ] if "A" in opts: - sectors += [ - "agriculture" - ] + sectors += ["agriculture"] return sectors def get(item, investment_year=None): - """Check whether item depends on investment year""" + """ + Check whether item depends on investment year. + """ if isinstance(item, dict): return item[investment_year] else: return item -def co2_emissions_year(countries, input_eurostat, opts, emissions_scope, report_year, year): +def co2_emissions_year( + countries, input_eurostat, opts, emissions_scope, report_year, year +): """ Calculate CO2 emissions in one specific year (e.g. 1990 or 2018). """ @@ -201,7 +201,9 @@ def co2_emissions_year(countries, input_eurostat, opts, emissions_scope, report_ # this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK report_year = snakemake.config["energy"]["eurostat_report_year"] if year > 2014: - eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, year=2014) + eurostat_co2 = build_eurostat_co2( + input_eurostat, countries, report_year, year=2014 + ) else: eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, year) @@ -225,28 +227,29 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): # opts? if "be" in o: - #beta decay - carbon_budget = float(o[o.find("cb")+2:o.find("be")]) - be = float(o[o.find("be")+2:]) + # beta decay + carbon_budget = float(o[o.find("cb") + 2 : o.find("be")]) + be = float(o[o.find("be") + 2 :]) if "ex" in o: - #exponential decay - carbon_budget = float(o[o.find("cb")+2:o.find("ex")]) - r = float(o[o.find("ex")+2:]) + # exponential decay + carbon_budget = float(o[o.find("cb") + 2 : o.find("ex")]) + r = float(o[o.find("ex") + 2 :]) countries = n.buses.country.dropna().unique() - e_1990 = co2_emissions_year(countries, input_eurostat, opts, emissions_scope, - report_year, year=1990) + e_1990 = co2_emissions_year( + countries, input_eurostat, opts, emissions_scope, report_year, year=1990 + ) - #emissions at the beginning of the path (last year available 2018) - e_0 = co2_emissions_year(countries, input_eurostat, opts, emissions_scope, - report_year,year=2018) + # emissions at the beginning of the path (last year available 2018) + e_0 = co2_emissions_year( + countries, input_eurostat, opts, emissions_scope, report_year, year=2018 + ) - planning_horizons = snakemake.config['scenario']['planning_horizons'] + planning_horizons = snakemake.config["scenario"]["planning_horizons"] t_0 = planning_horizons[0] if "be" in o: - # final year in the path t_f = t_0 + (2 * carbon_budget / e_0).round(0) @@ -254,42 +257,48 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): cdf_term = (t - t_0) / (t_f - t_0) return (e_0 / e_1990) * (1 - beta.cdf(cdf_term, be, be)) - #emissions (relative to 1990) + # emissions (relative to 1990) co2_cap = pd.Series({t: beta_decay(t) for t in planning_horizons}, name=o) if "ex" in o: - T = carbon_budget / e_0 m = (1 + np.sqrt(1 + r * T)) / T def exponential_decay(t): return (e_0 / e_1990) * (1 + (m + r) * (t - t_0)) * np.exp(-m * (t - t_0)) - co2_cap = pd.Series({t: exponential_decay(t) for t in planning_horizons}, name=o) + co2_cap = pd.Series( + {t: exponential_decay(t) for t in planning_horizons}, name=o + ) # TODO log in Snakefile csvs_folder = fn.rsplit("/", 1)[0] if not os.path.exists(csvs_folder): os.makedirs(csvs_folder) - co2_cap.to_csv(fn, float_format='%.3f') + co2_cap.to_csv(fn, float_format="%.3f") def add_lifetime_wind_solar(n, costs): - """Add lifetime for solar and wind generators.""" - for carrier in ['solar', 'onwind', 'offwind']: + """ + Add lifetime for solar and wind generators. + """ + for carrier in ["solar", "onwind", "offwind"]: gen_i = n.generators.index.str.contains(carrier) - n.generators.loc[gen_i, "lifetime"] = costs.at[carrier, 'lifetime'] + n.generators.loc[gen_i, "lifetime"] = costs.at[carrier, "lifetime"] def haversine(p): - coord0 = n.buses.loc[p.bus0, ['x', 'y']].values - coord1 = n.buses.loc[p.bus1, ['x', 'y']].values + coord0 = n.buses.loc[p.bus0, ["x", "y"]].values + coord1 = n.buses.loc[p.bus1, ["x", "y"]].values return 1.5 * haversine_pts(coord0, coord1) -def create_network_topology(n, prefix, carriers=["DC"], connector=" -> ", bidirectional=True): +def create_network_topology( + n, prefix, carriers=["DC"], connector=" -> ", bidirectional=True +): """ - Create a network topology from transmission lines and link carrier selection. + Create a network topology from transmission lines and link carrier + selection. Parameters ---------- @@ -310,10 +319,9 @@ def create_network_topology(n, prefix, carriers=["DC"], connector=" -> ", bidire lk_attrs = ["bus0", "bus1", "length", "underwater_fraction"] lk_attrs = n.links.columns.intersection(lk_attrs) - candidates = pd.concat([ - n.lines[ln_attrs], - n.links.loc[n.links.carrier.isin(carriers), lk_attrs] - ]).fillna(0) + candidates = pd.concat( + [n.lines[ln_attrs], n.links.loc[n.links.carrier.isin(carriers), lk_attrs]] + ).fillna(0) # base network topology purely on location not carrier candidates["bus0"] = candidates.bus0.map(n.buses.location) @@ -344,68 +352,85 @@ def create_network_topology(n, prefix, carriers=["DC"], connector=" -> ", bidire def update_wind_solar_costs(n, costs): """ Update costs for wind and solar generators added with pypsa-eur to those - cost in the planning year + cost in the planning year. """ - #NB: solar costs are also manipulated for rooftop - #when distribution grid is inserted - n.generators.loc[n.generators.carrier=='solar', 'capital_cost'] = costs.at['solar-utility', 'fixed'] + # NB: solar costs are also manipulated for rooftop + # when distribution grid is inserted + n.generators.loc[n.generators.carrier == "solar", "capital_cost"] = costs.at[ + "solar-utility", "fixed" + ] - n.generators.loc[n.generators.carrier=='onwind', 'capital_cost'] = costs.at['onwind', 'fixed'] + n.generators.loc[n.generators.carrier == "onwind", "capital_cost"] = costs.at[ + "onwind", "fixed" + ] - #for offshore wind, need to calculated connection costs + # for offshore wind, need to calculated connection costs - #assign clustered bus - #map initial network -> simplified network + # assign clustered bus + # map initial network -> simplified network busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0).squeeze() busmap_s.index = busmap_s.index.astype(str) busmap_s = busmap_s.astype(str) - #map simplified network -> clustered network + # map simplified network -> clustered network busmap = pd.read_csv(snakemake.input.busmap, index_col=0).squeeze() busmap.index = busmap.index.astype(str) busmap = busmap.astype(str) - #map initial network -> clustered network + # map initial network -> clustered network clustermaps = busmap_s.map(busmap) - #code adapted from pypsa-eur/scripts/add_electricity.py - for connection in ['dc', 'ac']: + # code adapted from pypsa-eur/scripts/add_electricity.py + for connection in ["dc", "ac"]: tech = "offwind-" + connection - profile = snakemake.input['profile_offwind_' + connection] + profile = snakemake.input["profile_offwind_" + connection] with xr.open_dataset(profile) as ds: - underwater_fraction = ds['underwater_fraction'].to_pandas() - connection_cost = (snakemake.config['costs']['lines']['length_factor'] * - ds['average_distance'].to_pandas() * - (underwater_fraction * - costs.at[tech + '-connection-submarine', 'fixed'] + - (1. - underwater_fraction) * - costs.at[tech + '-connection-underground', 'fixed'])) + underwater_fraction = ds["underwater_fraction"].to_pandas() + connection_cost = ( + snakemake.config["costs"]["lines"]["length_factor"] + * ds["average_distance"].to_pandas() + * ( + underwater_fraction + * costs.at[tech + "-connection-submarine", "fixed"] + + (1.0 - underwater_fraction) + * costs.at[tech + "-connection-underground", "fixed"] + ) + ) - #convert to aggregated clusters with weighting - weight = ds['weight'].to_pandas() + # convert to aggregated clusters with weighting + weight = ds["weight"].to_pandas() - #e.g. clusters == 37m means that VRE generators are left - #at clustering of simplified network, but that they are - #connected to 37-node network + # e.g. clusters == 37m means that VRE generators are left + # at clustering of simplified network, but that they are + # connected to 37-node network if snakemake.wildcards.clusters[-1:] == "m": genmap = busmap_s else: genmap = clustermaps - connection_cost = (connection_cost*weight).groupby(genmap).sum()/weight.groupby(genmap).sum() + connection_cost = (connection_cost * weight).groupby( + genmap + ).sum() / weight.groupby(genmap).sum() - capital_cost = (costs.at['offwind', 'fixed'] + - costs.at[tech + '-station', 'fixed'] + - connection_cost) + capital_cost = ( + costs.at["offwind", "fixed"] + + costs.at[tech + "-station", "fixed"] + + connection_cost + ) - logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}" - .format(connection_cost[0].min(), connection_cost[0].max(), tech)) + logger.info( + "Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format( + connection_cost[0].min(), connection_cost[0].max(), tech + ) + ) - n.generators.loc[n.generators.carrier==tech, 'capital_cost'] = capital_cost.rename(index=lambda node: node + ' ' + tech) + n.generators.loc[ + n.generators.carrier == tech, "capital_cost" + ] = capital_cost.rename(index=lambda node: node + " " + tech) def add_carrier_buses(n, carrier, nodes=None): """ - Add buses to connect e.g. coal, nuclear and oil plants + Add buses to connect e.g. coal, nuclear and oil plants. """ if nodes is None: @@ -423,36 +448,36 @@ def add_carrier_buses(n, carrier, nodes=None): unit = "MWh_LHV" if carrier == "gas" else "MWh_th" - n.madd("Bus", - nodes, - location=location, - carrier=carrier, - unit=unit - ) + n.madd("Bus", nodes, location=location, carrier=carrier, unit=unit) - #capital cost could be corrected to e.g. 0.2 EUR/kWh * annuity and O&M - n.madd("Store", + # capital cost could be corrected to e.g. 0.2 EUR/kWh * annuity and O&M + n.madd( + "Store", nodes + " Store", bus=nodes, e_nom_extendable=True, e_cyclic=True, carrier=carrier, - capital_cost=0.2 * costs.at[carrier, "discount rate"] # preliminary value to avoid zeros + capital_cost=0.2 + * costs.at[carrier, "discount rate"], # preliminary value to avoid zeros ) - n.madd("Generator", + n.madd( + "Generator", nodes, bus=nodes, p_nom_extendable=True, carrier=carrier, - marginal_cost=costs.at[carrier, 'fuel'] + marginal_cost=costs.at[carrier, "fuel"], ) # TODO: PyPSA-Eur merge issue def remove_elec_base_techs(n): - """remove conventional generators (e.g. OCGT) and storage units (e.g. batteries and H2) - from base electricity-only network, since they're added here differently using links + """ + Remove conventional generators (e.g. OCGT) and storage units (e.g. + batteries and H2) from base electricity-only network, since they're added + here differently using links. """ for c in n.iterate_components(snakemake.config["pypsa_eur"]): @@ -469,7 +494,7 @@ def remove_elec_base_techs(n): # TODO: PyPSA-Eur merge issue def remove_non_electric_buses(n): """ - remove buses from pypsa-eur with carriers which are not AC buses + Remove buses from pypsa-eur with carriers which are not AC buses. """ to_drop = list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique()) if to_drop: @@ -490,79 +515,93 @@ def patch_electricity_network(n): def add_co2_tracking(n, options): - # minus sign because opposite to how fossil fuels used: # CH4 burning puts CH4 down, atmosphere up - n.add("Carrier", "co2", - co2_emissions=-1.) + n.add("Carrier", "co2", co2_emissions=-1.0) # this tracks CO2 in the atmosphere - n.add("Bus", - "co2 atmosphere", - location="EU", - carrier="co2", - unit="t_co2" - ) + n.add("Bus", "co2 atmosphere", location="EU", carrier="co2", unit="t_co2") # can also be negative - n.add("Store", + n.add( + "Store", "co2 atmosphere", e_nom_extendable=True, e_min_pu=-1, carrier="co2", - bus="co2 atmosphere" + bus="co2 atmosphere", ) # this tracks CO2 stored, e.g. underground - n.madd("Bus", + n.madd( + "Bus", spatial.co2.nodes, location=spatial.co2.locations, carrier="co2 stored", - unit="t_co2" + unit="t_co2", ) if options["regional_co2_sequestration_potential"]["enable"]: - upper_limit = options["regional_co2_sequestration_potential"]["max_size"] * 1e3 # Mt + upper_limit = ( + options["regional_co2_sequestration_potential"]["max_size"] * 1e3 + ) # Mt annualiser = options["regional_co2_sequestration_potential"]["years_of_storage"] - e_nom_max = pd.read_csv(snakemake.input.sequestration_potential, index_col=0).squeeze() - e_nom_max = e_nom_max.reindex(spatial.co2.locations).fillna(0.).clip(upper=upper_limit).mul(1e6) / annualiser # t + e_nom_max = pd.read_csv( + snakemake.input.sequestration_potential, index_col=0 + ).squeeze() + e_nom_max = ( + e_nom_max.reindex(spatial.co2.locations) + .fillna(0.0) + .clip(upper=upper_limit) + .mul(1e6) + / annualiser + ) # t e_nom_max = e_nom_max.rename(index=lambda x: x + " co2 stored") else: e_nom_max = np.inf - n.madd("Store", + n.madd( + "Store", spatial.co2.nodes, e_nom_extendable=True, e_nom_max=e_nom_max, - capital_cost=options['co2_sequestration_cost'], + capital_cost=options["co2_sequestration_cost"], carrier="co2 stored", - bus=spatial.co2.nodes + bus=spatial.co2.nodes, ) n.add("Carrier", "co2 stored") - if options['co2_vent']: - - n.madd("Link", + if options["co2_vent"]: + n.madd( + "Link", spatial.co2.vents, bus0=spatial.co2.nodes, bus1="co2 atmosphere", carrier="co2 vent", - efficiency=1., - p_nom_extendable=True + efficiency=1.0, + p_nom_extendable=True, ) def add_co2_network(n, costs): - logger.info("Adding CO2 network.") co2_links = create_network_topology(n, "CO2 pipeline ") - cost_onshore = (1 - co2_links.underwater_fraction) * costs.at['CO2 pipeline', 'fixed'] * co2_links.length - cost_submarine = co2_links.underwater_fraction * costs.at['CO2 submarine pipeline', 'fixed'] * co2_links.length + cost_onshore = ( + (1 - co2_links.underwater_fraction) + * costs.at["CO2 pipeline", "fixed"] + * co2_links.length + ) + cost_submarine = ( + co2_links.underwater_fraction + * costs.at["CO2 submarine pipeline", "fixed"] + * co2_links.length + ) capital_cost = cost_onshore + cost_submarine - n.madd("Link", + n.madd( + "Link", co2_links.index, bus0=co2_links.bus0.values + " co2 stored", bus1=co2_links.bus1.values + " co2 stored", @@ -571,60 +610,65 @@ def add_co2_network(n, costs): length=co2_links.length.values, capital_cost=capital_cost.values, carrier="CO2 pipeline", - lifetime=costs.at['CO2 pipeline', 'lifetime'] + lifetime=costs.at["CO2 pipeline", "lifetime"], ) -def add_allam(n, costs): - - logger.info("Adding Allam cycle gas power plants.") - - nodes = pop_layout.index - - n.madd("Link", +def add_allam(n, costs): + logger.info("Adding Allam cycle gas power plants.") + + nodes = pop_layout.index + + n.madd( + "Link", nodes, - suffix=" allam", - bus0=spatial.gas.df.loc[nodes, "nodes"].values, - bus1=nodes, - bus2=spatial.co2.df.loc[nodes, "nodes"].values, - carrier="allam", - p_nom_extendable=True, + suffix=" allam", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=spatial.co2.df.loc[nodes, "nodes"].values, + carrier="allam", + p_nom_extendable=True, # TODO: add costs to technology-data - capital_cost=0.6*1.5e6*0.1, # efficiency * EUR/MW * annuity - marginal_cost=2, - efficiency=0.6, - efficiency2=costs.at['gas', 'CO2 intensity'], - lifetime=30., + capital_cost=0.6 * 1.5e6 * 0.1, # efficiency * EUR/MW * annuity + marginal_cost=2, + efficiency=0.6, + efficiency2=costs.at["gas", "CO2 intensity"], + lifetime=30.0, ) - + def add_dac(n, costs): - heat_carriers = ["urban central heat", "services urban decentral heat"] heat_buses = n.buses.index[n.buses.carrier.isin(heat_carriers)] locations = n.buses.location[heat_buses] - efficiency2 = -(costs.at['direct air capture', 'electricity-input'] + costs.at['direct air capture', 'compression-electricity-input']) - efficiency3 = -(costs.at['direct air capture', 'heat-input'] - costs.at['direct air capture', 'compression-heat-output']) + efficiency2 = -( + costs.at["direct air capture", "electricity-input"] + + costs.at["direct air capture", "compression-electricity-input"] + ) + efficiency3 = -( + costs.at["direct air capture", "heat-input"] + - costs.at["direct air capture", "compression-heat-output"] + ) - n.madd("Link", + n.madd( + "Link", heat_buses.str.replace(" heat", " DAC"), bus0="co2 atmosphere", bus1=spatial.co2.df.loc[locations, "nodes"].values, bus2=locations.values, bus3=heat_buses, carrier="DAC", - capital_cost=costs.at['direct air capture', 'fixed'], - efficiency=1., + capital_cost=costs.at["direct air capture", "fixed"], + efficiency=1.0, efficiency2=efficiency2, efficiency3=efficiency3, p_nom_extendable=True, - lifetime=costs.at['direct air capture', 'lifetime'] + lifetime=costs.at["direct air capture", "lifetime"], ) -def add_co2limit(n, Nyears=1., limit=0.): - +def add_co2limit(n, Nyears=1.0, limit=0.0): logger.info(f"Adding CO2 budget limit as per unit of 1990 levels of {limit}") countries = n.buses.country.dropna().unique() @@ -638,16 +682,18 @@ def add_co2limit(n, Nyears=1., limit=0.): co2_limit *= limit * Nyears - n.add("GlobalConstraint", + n.add( + "GlobalConstraint", "CO2Limit", carrier_attribute="co2_emissions", sense="<=", - constant=co2_limit + constant=co2_limit, ) + # TODO PyPSA-Eur merge issue def average_every_nhours(n, offset): - logger.info(f'Resampling the network to {offset}') + logger.info(f"Resampling the network to {offset}") m = n.copy(with_time=False) snapshot_weightings = n.snapshot_weightings.resample(offset).sum() @@ -655,7 +701,7 @@ def average_every_nhours(n, offset): m.snapshot_weightings = snapshot_weightings for c in n.iterate_components(): - pnl = getattr(m, c.list_name+"_t") + pnl = getattr(m, c.list_name + "_t") for k, df in c.pnl.items(): if not df.empty: if c.list_name == "stores" and k == "e_max_pu": @@ -669,7 +715,9 @@ def average_every_nhours(n, offset): def cycling_shift(df, steps=1): - """Cyclic shift on index of pd.Series|pd.DataFrame by number of steps""" + """ + Cyclic shift on index of pd.Series|pd.DataFrame by number of steps. + """ df = df.copy() new_index = np.roll(df.index, steps) df.values[:] = df.reindex(index=new_index).values @@ -678,34 +726,41 @@ def cycling_shift(df, steps=1): # TODO checkout PyPSA-Eur script def prepare_costs(cost_file, USD_to_EUR, discount_rate, Nyears, lifetime): + # set all asset costs and other parameters + costs = pd.read_csv(cost_file, index_col=[0, 1]).sort_index() - #set all asset costs and other parameters - costs = pd.read_csv(cost_file, index_col=[0,1]).sort_index() - - #correct units to MW and EUR + # correct units to MW and EUR costs.loc[costs.unit.str.contains("/kW"), "value"] *= 1e3 costs.loc[costs.unit.str.contains("USD"), "value"] *= USD_to_EUR - #min_count=1 is important to generate NaNs which are then filled by fillna - costs = costs.loc[:, "value"].unstack(level=1).groupby("technology").sum(min_count=1) - costs = costs.fillna({"CO2 intensity" : 0, - "FOM" : 0, - "VOM" : 0, - "discount rate" : discount_rate, - "efficiency" : 1, - "fuel" : 0, - "investment" : 0, - "lifetime" : lifetime - }) + # min_count=1 is important to generate NaNs which are then filled by fillna + costs = ( + costs.loc[:, "value"].unstack(level=1).groupby("technology").sum(min_count=1) + ) + costs = costs.fillna( + { + "CO2 intensity": 0, + "FOM": 0, + "VOM": 0, + "discount rate": discount_rate, + "efficiency": 1, + "fuel": 0, + "investment": 0, + "lifetime": lifetime, + } + ) - annuity_factor = lambda v: annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100 - costs["fixed"] = [annuity_factor(v) * v["investment"] * Nyears for i, v in costs.iterrows()] + annuity_factor = ( + lambda v: annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100 + ) + costs["fixed"] = [ + annuity_factor(v) * v["investment"] * Nyears for i, v in costs.iterrows() + ] return costs def add_generation(n, costs): - logger.info("Adding electricity generation") nodes = pop_layout.index @@ -714,29 +769,29 @@ def add_generation(n, costs): conventionals = options.get("conventional_generation", fallback) for generator, carrier in conventionals.items(): - - carrier_nodes = vars(spatial)[carrier].nodes add_carrier_buses(n, carrier, carrier_nodes) - n.madd("Link", + n.madd( + "Link", nodes + " " + generator, bus0=carrier_nodes, bus1=nodes, bus2="co2 atmosphere", - marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel - capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel + marginal_cost=costs.at[generator, "efficiency"] + * costs.at[generator, "VOM"], # NB: VOM is per MWel + capital_cost=costs.at[generator, "efficiency"] + * costs.at[generator, "fixed"], # NB: fixed cost is per MWel p_nom_extendable=True, carrier=generator, - efficiency=costs.at[generator, 'efficiency'], - efficiency2=costs.at[carrier, 'CO2 intensity'], - lifetime=costs.at[generator, 'lifetime'] + efficiency=costs.at[generator, "efficiency"], + efficiency2=costs.at[carrier, "CO2 intensity"], + lifetime=costs.at[generator, "lifetime"], ) def add_ammonia(n, costs): - logger.info("Adding ammonia carrier with synthesis, cracking and storage") nodes = pop_layout.index @@ -745,13 +800,12 @@ def add_ammonia(n, costs): n.add("Carrier", "NH3") - n.madd("Bus", - spatial.ammonia.nodes, - location=spatial.ammonia.locations, - carrier="NH3" + n.madd( + "Bus", spatial.ammonia.nodes, location=spatial.ammonia.locations, carrier="NH3" ) - n.madd("Link", + n.madd( + "Link", nodes, suffix=" Haber-Bosch", bus0=nodes, @@ -759,13 +813,19 @@ def add_ammonia(n, costs): bus2=nodes + " H2", p_nom_extendable=True, carrier="Haber-Bosch", - efficiency=1 / (cf_industry["MWh_elec_per_tNH3_electrolysis"] / cf_industry["MWh_NH3_per_tNH3"]), # output: MW_NH3 per MW_elec - efficiency2=-cf_industry["MWh_H2_per_tNH3_electrolysis"] / cf_industry["MWh_elec_per_tNH3_electrolysis"], # input: MW_H2 per MW_elec + efficiency=1 + / ( + cf_industry["MWh_elec_per_tNH3_electrolysis"] + / cf_industry["MWh_NH3_per_tNH3"] + ), # output: MW_NH3 per MW_elec + efficiency2=-cf_industry["MWh_H2_per_tNH3_electrolysis"] + / cf_industry["MWh_elec_per_tNH3_electrolysis"], # input: MW_H2 per MW_elec capital_cost=costs.at["Haber-Bosch", "fixed"], - lifetime=costs.at["Haber-Bosch", 'lifetime'] + lifetime=costs.at["Haber-Bosch", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes, suffix=" ammonia cracker", bus0=spatial.ammonia.nodes, @@ -773,12 +833,14 @@ def add_ammonia(n, costs): p_nom_extendable=True, carrier="ammonia cracker", efficiency=1 / cf_industry["MWh_NH3_per_MWh_H2_cracker"], - capital_cost=costs.at["Ammonia cracker", "fixed"] / cf_industry["MWh_NH3_per_MWh_H2_cracker"], # given per MW_H2 - lifetime=costs.at['Ammonia cracker', 'lifetime'] + capital_cost=costs.at["Ammonia cracker", "fixed"] + / cf_industry["MWh_NH3_per_MWh_H2_cracker"], # given per MW_H2 + lifetime=costs.at["Ammonia cracker", "lifetime"], ) # Ammonia Storage - n.madd("Store", + n.madd( + "Store", spatial.ammonia.nodes, suffix=" ammonia store", bus=spatial.ammonia.nodes, @@ -786,42 +848,48 @@ def add_ammonia(n, costs): e_cyclic=True, carrier="ammonia store", capital_cost=costs.at["NH3 (l) storage tank incl. liquefaction", "fixed"], - lifetime=costs.at['NH3 (l) storage tank incl. liquefaction', 'lifetime'] + lifetime=costs.at["NH3 (l) storage tank incl. liquefaction", "lifetime"], ) def add_wave(n, wave_cost_factor): - # TODO: handle in Snakefile wave_fn = "data/WindWaveWEC_GLTB.xlsx" - #in kW - capacity = pd.Series({"Attenuator": 750, - "F2HB": 1000, - "MultiPA": 600}) + # in kW + capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600}) - #in EUR/MW - annuity_factor = annuity(25,0.07) + 0.03 - costs = 1e6 * wave_cost_factor * annuity_factor * pd.Series({"Attenuator": 2.5, - "F2HB": 2, - "MultiPA": 1.5}) + # in EUR/MW + annuity_factor = annuity(25, 0.07) + 0.03 + costs = ( + 1e6 + * wave_cost_factor + * annuity_factor + * pd.Series({"Attenuator": 2.5, "F2HB": 2, "MultiPA": 1.5}) + ) - sheets = pd.read_excel(wave_fn, sheet_name=["FirthForth", "Hebrides"], - usecols=["Attenuator", "F2HB", "MultiPA"], - index_col=0, skiprows=[0], parse_dates=True) + sheets = pd.read_excel( + wave_fn, + sheet_name=["FirthForth", "Hebrides"], + usecols=["Attenuator", "F2HB", "MultiPA"], + index_col=0, + skiprows=[0], + parse_dates=True, + ) - wave = pd.concat([sheets[l].divide(capacity, axis=1) for l in locations], - keys=locations, - axis=1) + wave = pd.concat( + [sheets[l].divide(capacity, axis=1) for l in locations], keys=locations, axis=1 + ) for wave_type in costs.index: - n.add("Generator", + n.add( + "Generator", "Hebrides " + wave_type, - bus="GB4 0", # TODO this location is hardcoded + bus="GB4 0", # TODO this location is hardcoded p_nom_extendable=True, carrier="wave", capital_cost=costs[wave_type], - p_max_pu=wave["Hebrides", wave_type] + p_max_pu=wave["Hebrides", wave_type], ) @@ -829,21 +897,24 @@ def insert_electricity_distribution_grid(n, costs): # TODO pop_layout? # TODO options? - cost_factor = options['electricity_distribution_grid_cost_factor'] + cost_factor = options["electricity_distribution_grid_cost_factor"] - logger.info(f"Inserting electricity distribution grid with investment cost factor of {cost_factor:.2f}") + logger.info( + f"Inserting electricity distribution grid with investment cost factor of {cost_factor:.2f}" + ) nodes = pop_layout.index - - n.madd("Bus", + n.madd( + "Bus", nodes + " low voltage", location=nodes, carrier="low voltage", - unit="MWh_el" + unit="MWh_el", ) - n.madd("Link", + n.madd( + "Link", nodes + " electricity distribution grid", bus0=nodes, bus1=nodes + " low voltage", @@ -851,8 +922,8 @@ def insert_electricity_distribution_grid(n, costs): p_min_pu=-1, carrier="electricity distribution grid", efficiency=1, - lifetime=costs.at['electricity distribution grid', 'lifetime'], - capital_cost=costs.at['electricity distribution grid', 'fixed'] * cost_factor + lifetime=costs.at["electricity distribution grid", "lifetime"], + capital_cost=costs.at["electricity distribution grid", "fixed"] * cost_factor, ) # this catches regular electricity load and "industry electricity" and @@ -877,117 +948,124 @@ def insert_electricity_distribution_grid(n, costs): # set existing solar to cost of utility cost rather the 50-50 rooftop-utility solar = n.generators.index[n.generators.carrier == "solar"] - n.generators.loc[solar, "capital_cost"] = costs.at['solar-utility', 'fixed'] + n.generators.loc[solar, "capital_cost"] = costs.at["solar-utility", "fixed"] if snakemake.wildcards.clusters[-1:] == "m": - simplified_pop_layout = pd.read_csv(snakemake.input.simplified_pop_layout, index_col=0) - pop_solar = simplified_pop_layout.total.rename(index = lambda x: x + " solar") + simplified_pop_layout = pd.read_csv( + snakemake.input.simplified_pop_layout, index_col=0 + ) + pop_solar = simplified_pop_layout.total.rename(index=lambda x: x + " solar") else: - pop_solar = pop_layout.total.rename(index = lambda x: x + " solar") + pop_solar = pop_layout.total.rename(index=lambda x: x + " solar") # add max solar rooftop potential assuming 0.1 kW/m2 and 10 m2/person, # i.e. 1 kW/person (population data is in thousands of people) so we get MW potential = 0.1 * 10 * pop_solar - n.madd("Generator", + n.madd( + "Generator", solar, suffix=" rooftop", bus=n.generators.loc[solar, "bus"] + " low voltage", carrier="solar rooftop", p_nom_extendable=True, p_nom_max=potential, - marginal_cost=n.generators.loc[solar, 'marginal_cost'], - capital_cost=costs.at['solar-rooftop', 'fixed'], - efficiency=n.generators.loc[solar, 'efficiency'], + marginal_cost=n.generators.loc[solar, "marginal_cost"], + capital_cost=costs.at["solar-rooftop", "fixed"], + efficiency=n.generators.loc[solar, "efficiency"], p_max_pu=n.generators_t.p_max_pu[solar], - lifetime=costs.at['solar-rooftop', 'lifetime'] + lifetime=costs.at["solar-rooftop", "lifetime"], ) n.add("Carrier", "home battery") - n.madd("Bus", + n.madd( + "Bus", nodes + " home battery", location=nodes, carrier="home battery", - unit="MWh_el" + unit="MWh_el", ) - n.madd("Store", + n.madd( + "Store", nodes + " home battery", bus=nodes + " home battery", e_cyclic=True, e_nom_extendable=True, carrier="home battery", - capital_cost=costs.at['home battery storage', 'fixed'], - lifetime=costs.at['battery storage', 'lifetime'] + capital_cost=costs.at["home battery storage", "fixed"], + lifetime=costs.at["battery storage", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes + " home battery charger", bus0=nodes + " low voltage", bus1=nodes + " home battery", carrier="home battery charger", - efficiency=costs.at['battery inverter', 'efficiency']**0.5, - capital_cost=costs.at['home battery inverter', 'fixed'], + efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, + capital_cost=costs.at["home battery inverter", "fixed"], p_nom_extendable=True, - lifetime=costs.at['battery inverter', 'lifetime'] + lifetime=costs.at["battery inverter", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes + " home battery discharger", bus0=nodes + " home battery", bus1=nodes + " low voltage", carrier="home battery discharger", - efficiency=costs.at['battery inverter', 'efficiency']**0.5, - marginal_cost=options['marginal_cost_storage'], + efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, + marginal_cost=options["marginal_cost_storage"], p_nom_extendable=True, - lifetime=costs.at['battery inverter', 'lifetime'] + lifetime=costs.at["battery inverter", "lifetime"], ) def insert_gas_distribution_costs(n, costs): # TODO options? - f_costs = options['gas_distribution_grid_cost_factor'] + f_costs = options["gas_distribution_grid_cost_factor"] - logger.info(f"Inserting gas distribution grid with investment cost factor of {f_costs}") + logger.info( + f"Inserting gas distribution grid with investment cost factor of {f_costs}" + ) - capital_cost = costs.loc['electricity distribution grid']["fixed"] * f_costs + capital_cost = costs.loc["electricity distribution grid"]["fixed"] * f_costs # gas boilers - gas_b = n.links.index[n.links.carrier.str.contains("gas boiler") & - (~n.links.carrier.str.contains("urban central"))] + gas_b = n.links.index[ + n.links.carrier.str.contains("gas boiler") + & (~n.links.carrier.str.contains("urban central")) + ] n.links.loc[gas_b, "capital_cost"] += capital_cost # micro CHPs mchp = n.links.index[n.links.carrier.str.contains("micro gas")] - n.links.loc[mchp, "capital_cost"] += capital_cost + n.links.loc[mchp, "capital_cost"] += capital_cost def add_electricity_grid_connection(n, costs): - carriers = ["onwind", "solar"] gens = n.generators.index[n.generators.carrier.isin(carriers)] - n.generators.loc[gens, "capital_cost"] += costs.at['electricity grid connection', 'fixed'] + n.generators.loc[gens, "capital_cost"] += costs.at[ + "electricity grid connection", "fixed" + ] def add_storage_and_grids(n, costs): - logger.info("Add hydrogen storage") nodes = pop_layout.index n.add("Carrier", "H2") - n.madd("Bus", - nodes + " H2", - location=nodes, - carrier="H2", - unit="MWh_LHV" - ) + n.madd("Bus", nodes + " H2", location=nodes, carrier="H2", unit="MWh_LHV") - n.madd("Link", + n.madd( + "Link", nodes + " H2 Electrolysis", bus1=nodes + " H2", bus0=nodes, @@ -995,25 +1073,26 @@ def add_storage_and_grids(n, costs): carrier="H2 Electrolysis", efficiency=costs.at["electrolysis", "efficiency"], capital_cost=costs.at["electrolysis", "fixed"], - lifetime=costs.at['electrolysis', 'lifetime'] + lifetime=costs.at["electrolysis", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes + " H2 Fuel Cell", bus0=nodes + " H2", bus1=nodes, p_nom_extendable=True, - carrier ="H2 Fuel Cell", + carrier="H2 Fuel Cell", efficiency=costs.at["fuel cell", "efficiency"], - capital_cost=costs.at["fuel cell", "fixed"] * costs.at["fuel cell", "efficiency"], #NB: fixed cost is per MWel - lifetime=costs.at['fuel cell', 'lifetime'] + capital_cost=costs.at["fuel cell", "fixed"] + * costs.at["fuel cell", "efficiency"], # NB: fixed cost is per MWel + lifetime=costs.at["fuel cell", "lifetime"], ) cavern_types = snakemake.config["sector"]["hydrogen_underground_storage_locations"] h2_caverns = pd.read_csv(snakemake.input.h2_cavern, index_col=0) - if not h2_caverns.empty and options['hydrogen_underground_storage']: - + if not h2_caverns.empty and options["hydrogen_underground_storage"]: h2_caverns = h2_caverns[cavern_types].sum(axis=1) # only use sites with at least 2 TWh potential @@ -1029,7 +1108,8 @@ def add_storage_and_grids(n, costs): h2_capital_cost = costs.at["hydrogen storage underground", "fixed"] - n.madd("Store", + n.madd( + "Store", h2_caverns.index + " H2 Store", bus=h2_caverns.index + " H2", e_nom_extendable=True, @@ -1037,42 +1117,48 @@ def add_storage_and_grids(n, costs): e_cyclic=True, carrier="H2 Store", capital_cost=h2_capital_cost, - lifetime=costs.at["hydrogen storage underground", "lifetime"] + lifetime=costs.at["hydrogen storage underground", "lifetime"], ) # hydrogen stored overground (where not already underground) - h2_capital_cost = costs.at["hydrogen storage tank type 1 including compressor", "fixed"] + h2_capital_cost = costs.at[ + "hydrogen storage tank type 1 including compressor", "fixed" + ] nodes_overground = h2_caverns.index.symmetric_difference(nodes) - n.madd("Store", + n.madd( + "Store", nodes_overground + " H2 Store", bus=nodes_overground + " H2", e_nom_extendable=True, e_cyclic=True, carrier="H2 Store", - capital_cost=h2_capital_cost + capital_cost=h2_capital_cost, ) if options["gas_network"] or options["H2_retrofit"]: - fn = snakemake.input.clustered_gas_network gas_pipes = pd.read_csv(fn, index_col=0) if options["gas_network"]: - - logger.info("Add natural gas infrastructure, incl. LNG terminals, production and entry-points.") + logger.info( + "Add natural gas infrastructure, incl. LNG terminals, production and entry-points." + ) if options["H2_retrofit"]: gas_pipes["p_nom_max"] = gas_pipes.p_nom - gas_pipes["p_nom_min"] = 0. + gas_pipes["p_nom_min"] = 0.0 # 0.1 EUR/MWkm/a to prefer decommissioning to address degeneracy gas_pipes["capital_cost"] = 0.1 * gas_pipes.length else: gas_pipes["p_nom_max"] = np.inf gas_pipes["p_nom_min"] = gas_pipes.p_nom - gas_pipes["capital_cost"] = gas_pipes.length * costs.at['CH4 (g) pipeline', 'fixed'] + gas_pipes["capital_cost"] = ( + gas_pipes.length * costs.at["CH4 (g) pipeline", "fixed"] + ) - n.madd("Link", + n.madd( + "Link", gas_pipes.index, bus0=gas_pipes.bus0 + " gas", bus1=gas_pipes.bus1 + " gas", @@ -1085,7 +1171,7 @@ def add_storage_and_grids(n, costs): capital_cost=gas_pipes.capital_cost, tags=gas_pipes.name, carrier="gas pipeline", - lifetime=costs.at['CH4 (g) pipeline', 'lifetime'] + lifetime=costs.at["CH4 (g) pipeline", "lifetime"], ) # remove fossil generators where there is neither @@ -1095,7 +1181,7 @@ def add_storage_and_grids(n, costs): gas_input_nodes = pd.read_csv(fn, index_col=0) unique = gas_input_nodes.index.unique() - gas_i = n.generators.carrier == 'gas' + gas_i = n.generators.carrier == "gas" internal_i = ~n.generators.bus.map(n.buses.location).isin(unique) remove_i = n.generators[gas_i & internal_i].index @@ -1109,7 +1195,7 @@ def add_storage_and_grids(n, costs): G = nx.Graph() - gas_buses = n.buses.loc[n.buses.carrier=='gas', 'location'] + gas_buses = n.buses.loc[n.buses.carrier == "gas", "location"] G.add_nodes_from(np.unique(gas_buses.values)) sel = gas_pipes.p_nom > 1500 @@ -1122,113 +1208,118 @@ def add_storage_and_grids(n, costs): # apply k_edge_augmentation weighted by length of complement edges k_edge = options.get("gas_network_connectivity_upgrade", 3) - augmentation = list(k_edge_augmentation(G, k_edge, avail=complement_edges.values)) + augmentation = list( + k_edge_augmentation(G, k_edge, avail=complement_edges.values) + ) if augmentation: - new_gas_pipes = pd.DataFrame(augmentation, columns=["bus0", "bus1"]) new_gas_pipes["length"] = new_gas_pipes.apply(haversine, axis=1) new_gas_pipes.index = new_gas_pipes.apply( - lambda x: f"gas pipeline new {x.bus0} <-> {x.bus1}", axis=1) + lambda x: f"gas pipeline new {x.bus0} <-> {x.bus1}", axis=1 + ) - n.madd("Link", + n.madd( + "Link", new_gas_pipes.index, bus0=new_gas_pipes.bus0 + " gas", bus1=new_gas_pipes.bus1 + " gas", - p_min_pu=-1, # new gas pipes are bidirectional + p_min_pu=-1, # new gas pipes are bidirectional p_nom_extendable=True, length=new_gas_pipes.length, - capital_cost=new_gas_pipes.length * costs.at['CH4 (g) pipeline', 'fixed'], + capital_cost=new_gas_pipes.length + * costs.at["CH4 (g) pipeline", "fixed"], carrier="gas pipeline new", - lifetime=costs.at['CH4 (g) pipeline', 'lifetime'] + lifetime=costs.at["CH4 (g) pipeline", "lifetime"], ) if options["H2_retrofit"]: - logger.info("Add retrofitting options of existing CH4 pipes to H2 pipes.") fr = "gas pipeline" to = "H2 pipeline retrofitted" h2_pipes = gas_pipes.rename(index=lambda x: x.replace(fr, to)) - n.madd("Link", + n.madd( + "Link", h2_pipes.index, bus0=h2_pipes.bus0 + " H2", bus1=h2_pipes.bus1 + " H2", - p_min_pu=-1., # allow that all H2 retrofit pipelines can be used in both directions + p_min_pu=-1.0, # allow that all H2 retrofit pipelines can be used in both directions p_nom_max=h2_pipes.p_nom * options["H2_retrofit_capacity_per_CH4"], p_nom_extendable=True, length=h2_pipes.length, - capital_cost=costs.at['H2 (g) pipeline repurposed', 'fixed'] * h2_pipes.length, + capital_cost=costs.at["H2 (g) pipeline repurposed", "fixed"] + * h2_pipes.length, tags=h2_pipes.name, carrier="H2 pipeline retrofitted", - lifetime=costs.at['H2 (g) pipeline repurposed', 'lifetime'] + lifetime=costs.at["H2 (g) pipeline repurposed", "lifetime"], ) if options.get("H2_network", True): - logger.info("Add options for new hydrogen pipelines.") - h2_pipes = create_network_topology(n, "H2 pipeline ", carriers=["DC", "gas pipeline"]) + h2_pipes = create_network_topology( + n, "H2 pipeline ", carriers=["DC", "gas pipeline"] + ) # TODO Add efficiency losses - n.madd("Link", + n.madd( + "Link", h2_pipes.index, bus0=h2_pipes.bus0.values + " H2", bus1=h2_pipes.bus1.values + " H2", p_min_pu=-1, p_nom_extendable=True, length=h2_pipes.length.values, - capital_cost=costs.at['H2 (g) pipeline', 'fixed'] * h2_pipes.length.values, + capital_cost=costs.at["H2 (g) pipeline", "fixed"] * h2_pipes.length.values, carrier="H2 pipeline", - lifetime=costs.at['H2 (g) pipeline', 'lifetime'] + lifetime=costs.at["H2 (g) pipeline", "lifetime"], ) n.add("Carrier", "battery") - n.madd("Bus", - nodes + " battery", - location=nodes, - carrier="battery", - unit="MWh_el" - ) + n.madd("Bus", nodes + " battery", location=nodes, carrier="battery", unit="MWh_el") - n.madd("Store", + n.madd( + "Store", nodes + " battery", bus=nodes + " battery", e_cyclic=True, e_nom_extendable=True, carrier="battery", - capital_cost=costs.at['battery storage', 'fixed'], - lifetime=costs.at['battery storage', 'lifetime'] + capital_cost=costs.at["battery storage", "fixed"], + lifetime=costs.at["battery storage", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes + " battery charger", bus0=nodes, bus1=nodes + " battery", carrier="battery charger", - efficiency=costs.at['battery inverter', 'efficiency']**0.5, - capital_cost=costs.at['battery inverter', 'fixed'], + efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, + capital_cost=costs.at["battery inverter", "fixed"], p_nom_extendable=True, - lifetime=costs.at['battery inverter', 'lifetime'] + lifetime=costs.at["battery inverter", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes + " battery discharger", bus0=nodes + " battery", bus1=nodes, carrier="battery discharger", - efficiency=costs.at['battery inverter', 'efficiency']**0.5, - marginal_cost=options['marginal_cost_storage'], + efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, + marginal_cost=options["marginal_cost_storage"], p_nom_extendable=True, - lifetime=costs.at['battery inverter', 'lifetime'] + lifetime=costs.at["battery inverter", "lifetime"], ) - if options['methanation']: - - n.madd("Link", + if options["methanation"]: + n.madd( + "Link", spatial.nodes, suffix=" Sabatier", bus0=nodes + " H2", @@ -1237,14 +1328,16 @@ def add_storage_and_grids(n, costs): p_nom_extendable=True, carrier="Sabatier", efficiency=costs.at["methanation", "efficiency"], - efficiency2=-costs.at["methanation", "efficiency"] * costs.at['gas', 'CO2 intensity'], - capital_cost=costs.at["methanation", "fixed"] * costs.at["methanation", "efficiency"], # costs given per kW_gas - lifetime=costs.at['methanation', 'lifetime'] + efficiency2=-costs.at["methanation", "efficiency"] + * costs.at["gas", "CO2 intensity"], + capital_cost=costs.at["methanation", "fixed"] + * costs.at["methanation", "efficiency"], # costs given per kW_gas + lifetime=costs.at["methanation", "lifetime"], ) - if options['helmeth']: - - n.madd("Link", + if options["helmeth"]: + n.madd( + "Link", spatial.nodes, suffix=" helmeth", bus0=nodes, @@ -1253,33 +1346,39 @@ def add_storage_and_grids(n, costs): carrier="helmeth", p_nom_extendable=True, efficiency=costs.at["helmeth", "efficiency"], - efficiency2=-costs.at["helmeth", "efficiency"] * costs.at['gas', 'CO2 intensity'], + efficiency2=-costs.at["helmeth", "efficiency"] + * costs.at["gas", "CO2 intensity"], capital_cost=costs.at["helmeth", "fixed"], - lifetime=costs.at['helmeth', 'lifetime'] + lifetime=costs.at["helmeth", "lifetime"], ) - if options.get('coal_cc'): - - n.madd("Link", + if options.get("coal_cc"): + n.madd( + "Link", spatial.nodes, suffix=" coal CC", bus0=spatial.coal.nodes, bus1=spatial.nodes, bus2="co2 atmosphere", bus3=spatial.co2.nodes, - marginal_cost=costs.at['coal', 'efficiency'] * costs.at['coal', 'VOM'], #NB: VOM is per MWel - capital_cost=costs.at['coal', 'efficiency'] * costs.at['coal', 'fixed'] + costs.at['biomass CHP capture', 'fixed'] * costs.at['coal', 'CO2 intensity'], #NB: fixed cost is per MWel + marginal_cost=costs.at["coal", "efficiency"] + * costs.at["coal", "VOM"], # NB: VOM is per MWel + capital_cost=costs.at["coal", "efficiency"] * costs.at["coal", "fixed"] + + costs.at["biomass CHP capture", "fixed"] + * costs.at["coal", "CO2 intensity"], # NB: fixed cost is per MWel p_nom_extendable=True, carrier="coal", - efficiency=costs.at['coal', 'efficiency'], - efficiency2=costs.at['coal', 'CO2 intensity'] * (1 - costs.at['biomass CHP capture','capture_rate']), - efficiency3=costs.at['coal', 'CO2 intensity'] * costs.at['biomass CHP capture','capture_rate'], - lifetime=costs.at['coal','lifetime'] + efficiency=costs.at["coal", "efficiency"], + efficiency2=costs.at["coal", "CO2 intensity"] + * (1 - costs.at["biomass CHP capture", "capture_rate"]), + efficiency3=costs.at["coal", "CO2 intensity"] + * costs.at["biomass CHP capture", "capture_rate"], + lifetime=costs.at["coal", "lifetime"], ) - if options['SMR']: - - n.madd("Link", + if options["SMR"]: + n.madd( + "Link", spatial.nodes, suffix=" SMR CC", bus0=spatial.gas.nodes, @@ -1289,13 +1388,14 @@ def add_storage_and_grids(n, costs): p_nom_extendable=True, carrier="SMR CC", efficiency=costs.at["SMR CC", "efficiency"], - efficiency2=costs.at['gas', 'CO2 intensity'] * (1 - options["cc_fraction"]), - efficiency3=costs.at['gas', 'CO2 intensity'] * options["cc_fraction"], + efficiency2=costs.at["gas", "CO2 intensity"] * (1 - options["cc_fraction"]), + efficiency3=costs.at["gas", "CO2 intensity"] * options["cc_fraction"], capital_cost=costs.at["SMR CC", "fixed"], - lifetime=costs.at['SMR CC', 'lifetime'] + lifetime=costs.at["SMR CC", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes + " SMR", bus0=spatial.gas.nodes, bus1=nodes + " H2", @@ -1303,9 +1403,9 @@ def add_storage_and_grids(n, costs): p_nom_extendable=True, carrier="SMR", efficiency=costs.at["SMR", "efficiency"], - efficiency2=costs.at['gas', 'CO2 intensity'], + efficiency2=costs.at["gas", "CO2 intensity"], capital_cost=costs.at["SMR", "fixed"], - lifetime=costs.at['SMR', 'lifetime'] + lifetime=costs.at["SMR", "lifetime"], ) @@ -1314,18 +1414,28 @@ def add_land_transport(n, costs): logger.info("Add land transport") - transport = pd.read_csv(snakemake.input.transport_demand, index_col=0, parse_dates=True) - number_cars = pd.read_csv(snakemake.input.transport_data, index_col=0)["number cars"] - avail_profile = pd.read_csv(snakemake.input.avail_profile, index_col=0, parse_dates=True) - dsm_profile = pd.read_csv(snakemake.input.dsm_profile, index_col=0, parse_dates=True) + transport = pd.read_csv( + snakemake.input.transport_demand, index_col=0, parse_dates=True + ) + number_cars = pd.read_csv(snakemake.input.transport_data, index_col=0)[ + "number cars" + ] + avail_profile = pd.read_csv( + snakemake.input.avail_profile, index_col=0, parse_dates=True + ) + dsm_profile = pd.read_csv( + snakemake.input.dsm_profile, index_col=0, parse_dates=True + ) fuel_cell_share = get(options["land_transport_fuel_cell_share"], investment_year) electric_share = get(options["land_transport_electric_share"], investment_year) ice_share = get(options["land_transport_ice_share"], investment_year) - + total_share = fuel_cell_share + electric_share + ice_share if total_share != 1: - logger.warning(f"Total land transport shares sum up to {total_share:.2%}, corresponding to increased or decreased demand assumptions.") + logger.warning( + f"Total land transport shares sum up to {total_share:.2%}, corresponding to increased or decreased demand assumptions." + ) logger.info(f"FCEV share: {fuel_cell_share*100}%") logger.info(f"EV share: {electric_share*100}%") @@ -1334,47 +1444,57 @@ def add_land_transport(n, costs): nodes = pop_layout.index if electric_share > 0: - n.add("Carrier", "Li ion") - n.madd("Bus", + n.madd( + "Bus", nodes, location=nodes, suffix=" EV battery", carrier="Li ion", - unit="MWh_el" + unit="MWh_el", ) - p_set = electric_share * (transport[nodes] + cycling_shift(transport[nodes], 1) + cycling_shift(transport[nodes], 2)) / 3 + p_set = ( + electric_share + * ( + transport[nodes] + + cycling_shift(transport[nodes], 1) + + cycling_shift(transport[nodes], 2) + ) + / 3 + ) - n.madd("Load", + n.madd( + "Load", nodes, suffix=" land transport EV", bus=nodes + " EV battery", carrier="land transport EV", - p_set=p_set + p_set=p_set, ) p_nom = number_cars * options.get("bev_charge_rate", 0.011) * electric_share - n.madd("Link", + n.madd( + "Link", nodes, - suffix= " BEV charger", + suffix=" BEV charger", bus0=nodes, bus1=nodes + " EV battery", p_nom=p_nom, carrier="BEV charger", p_max_pu=avail_profile[nodes], efficiency=options.get("bev_charge_efficiency", 0.9), - #These were set non-zero to find LU infeasibility when availability = 0.25 - #p_nom_extendable=True, - #p_nom_min=p_nom, - #capital_cost=1e6, #i.e. so high it only gets built where necessary + # These were set non-zero to find LU infeasibility when availability = 0.25 + # p_nom_extendable=True, + # p_nom_min=p_nom, + # capital_cost=1e6, #i.e. so high it only gets built where necessary ) if electric_share > 0 and options["v2g"]: - - n.madd("Link", + n.madd( + "Link", nodes, suffix=" V2G", bus1=nodes, @@ -1386,10 +1506,15 @@ def add_land_transport(n, costs): ) if electric_share > 0 and options["bev_dsm"]: + e_nom = ( + number_cars + * options.get("bev_energy", 0.05) + * options["bev_availability"] + * electric_share + ) - e_nom = number_cars * options.get("bev_energy", 0.05) * options["bev_availability"] * electric_share - - n.madd("Store", + n.madd( + "Store", nodes, suffix=" battery storage", bus=nodes + " EV battery", @@ -1397,53 +1522,66 @@ def add_land_transport(n, costs): e_cyclic=True, e_nom=e_nom, e_max_pu=1, - e_min_pu=dsm_profile[nodes] + e_min_pu=dsm_profile[nodes], ) if fuel_cell_share > 0: - - n.madd("Load", + n.madd( + "Load", nodes, suffix=" land transport fuel cell", bus=nodes + " H2", carrier="land transport fuel cell", - p_set=fuel_cell_share / options['transport_fuel_cell_efficiency'] * transport[nodes] + p_set=fuel_cell_share + / options["transport_fuel_cell_efficiency"] + * transport[nodes], ) if ice_share > 0: - if "oil" not in n.buses.carrier.unique(): - n.madd("Bus", + n.madd( + "Bus", spatial.oil.nodes, location=spatial.oil.locations, carrier="oil", - unit="MWh_LHV" + unit="MWh_LHV", ) - ice_efficiency = options['transport_internal_combustion_efficiency'] + ice_efficiency = options["transport_internal_combustion_efficiency"] - n.madd("Load", + n.madd( + "Load", nodes, suffix=" land transport oil", bus=spatial.oil.nodes, carrier="land transport oil", - p_set=ice_share / ice_efficiency * transport[nodes] + p_set=ice_share / ice_efficiency * transport[nodes], ) - co2 = ice_share / ice_efficiency * transport[nodes].sum().sum() / 8760 * costs.at["oil", 'CO2 intensity'] + co2 = ( + ice_share + / ice_efficiency + * transport[nodes].sum().sum() + / 8760 + * costs.at["oil", "CO2 intensity"] + ) - n.add("Load", + n.add( + "Load", "land transport oil emissions", bus="co2 atmosphere", carrier="land transport oil emissions", - p_set=-co2 + p_set=-co2, ) def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = xr.open_dataarray(snakemake.input.heat_demand_total).to_pandas().reindex(index=n.snapshots, method="ffill") + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand_total) + .to_pandas() + .reindex(index=n.snapshots, method="ffill") + ) intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) @@ -1459,7 +1597,7 @@ def build_heat_demand(n): intraday_year_profile = generate_periodic_profiles( daily_space_heat_demand.index.tz_localize("UTC"), nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile + weekly_profile=weekly_profile, ) if use == "space": @@ -1467,21 +1605,27 @@ def build_heat_demand(n): else: heat_demand_shape = intraday_year_profile - heat_demand[f"{sector} {use}"] = (heat_demand_shape/heat_demand_shape.sum()).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 - electric_heat_supply[f"{sector} {use}"] = (heat_demand_shape/heat_demand_shape.sum()).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 + heat_demand[f"{sector} {use}"] = ( + heat_demand_shape / heat_demand_shape.sum() + ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 + electric_heat_supply[f"{sector} {use}"] = ( + heat_demand_shape / heat_demand_shape.sum() + ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) electric_heat_supply = pd.concat(electric_heat_supply, axis=1) # subtract from electricity load since heat demand already in heat_demand electric_nodes = n.loads.index[n.loads.carrier == "electricity"] - n.loads_t.p_set[electric_nodes] = n.loads_t.p_set[electric_nodes] - electric_heat_supply.groupby(level=1, axis=1).sum()[electric_nodes] + n.loads_t.p_set[electric_nodes] = ( + n.loads_t.p_set[electric_nodes] + - electric_heat_supply.groupby(level=1, axis=1).sum()[electric_nodes] + ) return heat_demand def add_heat(n, costs): - logger.info("Add heat sector") sectors = ["residential", "services"] @@ -1490,7 +1634,7 @@ def add_heat(n, costs): nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() - #NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) + # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) # exogenously reduce space heat demand if options["reduce_space_heat_exogenously"]: @@ -1504,30 +1648,38 @@ def add_heat(n, costs): "services rural", "residential urban decentral", "services urban decentral", - "urban central" + "urban central", ] cop = { - "air": xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots), - "ground": xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots) + "air": xr.open_dataarray(snakemake.input.cop_air_total) + .to_pandas() + .reindex(index=n.snapshots), + "ground": xr.open_dataarray(snakemake.input.cop_soil_total) + .to_pandas() + .reindex(index=n.snapshots), } if options["solar_thermal"]: - solar_thermal = xr.open_dataarray(snakemake.input.solar_thermal_total).to_pandas().reindex(index=n.snapshots) + solar_thermal = ( + xr.open_dataarray(snakemake.input.solar_thermal_total) + .to_pandas() + .reindex(index=n.snapshots) + ) # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 - solar_thermal = options['solar_cf_correction'] * solar_thermal / 1e3 + solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 for name in heat_systems: - name_type = "central" if name == "urban central" else "decentral" n.add("Carrier", name + " heat") - n.madd("Bus", + n.madd( + "Bus", nodes[name] + f" {name} heat", location=nodes[name], carrier=name + " heat", - unit="MWh_th" + unit="MWh_th", ) ## Add heat load @@ -1539,23 +1691,36 @@ def add_heat(n, costs): elif "urban central" in name: factor = dist_fraction[nodes[name]] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - \ - dist_fraction[nodes[name]] + factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] else: - raise NotImplementedError(f" {name} not in " f"heat systems: {heat_systems}") + raise NotImplementedError( + f" {name} not in " f"heat systems: {heat_systems}" + ) if sector in name: - heat_load = heat_demand[[sector + " water",sector + " space"]].groupby(level=1,axis=1).sum()[nodes[name]].multiply(factor) + heat_load = ( + heat_demand[[sector + " water", sector + " space"]] + .groupby(level=1, axis=1) + .sum()[nodes[name]] + .multiply(factor) + ) if name == "urban central": - heat_load = heat_demand.groupby(level=1,axis=1).sum()[nodes[name]].multiply(factor * (1 + options['district_heating']['district_heating_loss'])) + heat_load = ( + heat_demand.groupby(level=1, axis=1) + .sum()[nodes[name]] + .multiply( + factor * (1 + options["district_heating"]["district_heating_loss"]) + ) + ) - n.madd("Load", + n.madd( + "Load", nodes[name], suffix=f" {name} heat", bus=nodes[name] + f" {name} heat", carrier=name + " heat", - p_set=heat_load + p_set=heat_load, ) ## Add heat pumps @@ -1563,118 +1728,130 @@ def add_heat(n, costs): heat_pump_type = "air" if "urban" in name else "ground" costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" - efficiency = cop[heat_pump_type][nodes[name]] if options["time_dep_hp_cop"] else costs.at[costs_name, 'efficiency'] + efficiency = ( + cop[heat_pump_type][nodes[name]] + if options["time_dep_hp_cop"] + else costs.at[costs_name, "efficiency"] + ) - n.madd("Link", + n.madd( + "Link", nodes[name], suffix=f" {name} {heat_pump_type} heat pump", bus0=nodes[name], bus1=nodes[name] + f" {name} heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, - capital_cost=costs.at[costs_name, 'efficiency'] * costs.at[costs_name, 'fixed'], + capital_cost=costs.at[costs_name, "efficiency"] + * costs.at[costs_name, "fixed"], p_nom_extendable=True, - lifetime=costs.at[costs_name, 'lifetime'] + lifetime=costs.at[costs_name, "lifetime"], ) if options["tes"]: - n.add("Carrier", name + " water tanks") - n.madd("Bus", + n.madd( + "Bus", nodes[name] + f" {name} water tanks", location=nodes[name], carrier=name + " water tanks", - unit="MWh_th" + unit="MWh_th", ) - n.madd("Link", + n.madd( + "Link", nodes[name] + f" {name} water tanks charger", bus0=nodes[name] + f" {name} heat", bus1=nodes[name] + f" {name} water tanks", - efficiency=costs.at['water tank charger', 'efficiency'], + efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", - p_nom_extendable=True + p_nom_extendable=True, ) - n.madd("Link", + n.madd( + "Link", nodes[name] + f" {name} water tanks discharger", bus0=nodes[name] + f" {name} water tanks", bus1=nodes[name] + f" {name} heat", carrier=name + " water tanks discharger", - efficiency=costs.at['water tank discharger', 'efficiency'], - p_nom_extendable=True + efficiency=costs.at["water tank discharger", "efficiency"], + p_nom_extendable=True, ) - if isinstance(options["tes_tau"], dict): tes_time_constant_days = options["tes_tau"][name_type] else: - logger.warning("Deprecated: a future version will require you to specify 'tes_tau' ", - "for 'decentral' and 'central' separately.") - tes_time_constant_days = options["tes_tau"] if name_type == "decentral" else 180. + logger.warning( + "Deprecated: a future version will require you to specify 'tes_tau' ", + "for 'decentral' and 'central' separately.", + ) + tes_time_constant_days = ( + options["tes_tau"] if name_type == "decentral" else 180.0 + ) - n.madd("Store", + n.madd( + "Store", nodes[name] + f" {name} water tanks", bus=nodes[name] + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", - standing_loss=1 - np.exp(- 1 / 24 / tes_time_constant_days), - capital_cost=costs.at[name_type + ' water tank storage', 'fixed'], - lifetime=costs.at[name_type + ' water tank storage', 'lifetime'] + standing_loss=1 - np.exp(-1 / 24 / tes_time_constant_days), + capital_cost=costs.at[name_type + " water tank storage", "fixed"], + lifetime=costs.at[name_type + " water tank storage", "lifetime"], ) if options["boilers"]: - key = f"{name_type} resistive heater" - n.madd("Link", + n.madd( + "Link", nodes[name] + f" {name} resistive heater", bus0=nodes[name], bus1=nodes[name] + f" {name} heat", carrier=name + " resistive heater", - efficiency=costs.at[key, 'efficiency'], - capital_cost=costs.at[key, 'efficiency'] * costs.at[key, 'fixed'], + efficiency=costs.at[key, "efficiency"], + capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], p_nom_extendable=True, - lifetime=costs.at[key, 'lifetime'] + lifetime=costs.at[key, "lifetime"], ) key = f"{name_type} gas boiler" - n.madd("Link", + n.madd( + "Link", nodes[name] + f" {name} gas boiler", p_nom_extendable=True, bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, bus1=nodes[name] + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", - efficiency=costs.at[key, 'efficiency'], - efficiency2=costs.at['gas', 'CO2 intensity'], - capital_cost=costs.at[key, 'efficiency'] * costs.at[key, 'fixed'], - lifetime=costs.at[key, 'lifetime'] + efficiency=costs.at[key, "efficiency"], + efficiency2=costs.at["gas", "CO2 intensity"], + capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + lifetime=costs.at[key, "lifetime"], ) - if options["solar_thermal"]: - n.add("Carrier", name + " solar thermal") - n.madd("Generator", + n.madd( + "Generator", nodes[name], suffix=f" {name} solar thermal collector", bus=nodes[name] + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, - capital_cost=costs.at[name_type + ' solar thermal', 'fixed'], + capital_cost=costs.at[name_type + " solar thermal", "fixed"], p_max_pu=solar_thermal[nodes[name]], - lifetime=costs.at[name_type + ' solar thermal', 'lifetime'] + lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) if options["chp"] and name == "urban central": - # add gas CHP; biomass CHP is added in biomass section - n.madd("Link", + n.madd( + "Link", nodes[name] + " urban central gas CHP", bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, bus1=nodes[name], @@ -1682,15 +1859,18 @@ def add_heat(n, costs): bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, - capital_cost=costs.at['central gas CHP', 'fixed'] * costs.at['central gas CHP', 'efficiency'], - marginal_cost=costs.at['central gas CHP', 'VOM'], - efficiency=costs.at['central gas CHP', 'efficiency'], - efficiency2=costs.at['central gas CHP', 'efficiency'] / costs.at['central gas CHP', 'c_b'], - efficiency3=costs.at['gas', 'CO2 intensity'], - lifetime=costs.at['central gas CHP', 'lifetime'] + capital_cost=costs.at["central gas CHP", "fixed"] + * costs.at["central gas CHP", "efficiency"], + marginal_cost=costs.at["central gas CHP", "VOM"], + efficiency=costs.at["central gas CHP", "efficiency"], + efficiency2=costs.at["central gas CHP", "efficiency"] + / costs.at["central gas CHP", "c_b"], + efficiency3=costs.at["gas", "CO2 intensity"], + lifetime=costs.at["central gas CHP", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes[name] + " urban central gas CHP CC", bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, bus1=nodes[name], @@ -1699,18 +1879,35 @@ def add_heat(n, costs): bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, - capital_cost=costs.at['central gas CHP', 'fixed']*costs.at['central gas CHP', 'efficiency'] + costs.at['biomass CHP capture', 'fixed']*costs.at['gas', 'CO2 intensity'], - marginal_cost=costs.at['central gas CHP', 'VOM'], - efficiency=costs.at['central gas CHP', 'efficiency'] - costs.at['gas', 'CO2 intensity'] * (costs.at['biomass CHP capture', 'electricity-input'] + costs.at['biomass CHP capture', 'compression-electricity-input']), - efficiency2=costs.at['central gas CHP', 'efficiency'] / costs.at['central gas CHP', 'c_b'] + costs.at['gas', 'CO2 intensity'] * (costs.at['biomass CHP capture', 'heat-output'] + costs.at['biomass CHP capture', 'compression-heat-output'] - costs.at['biomass CHP capture', 'heat-input']), - efficiency3=costs.at['gas', 'CO2 intensity'] * (1-costs.at['biomass CHP capture', 'capture_rate']), - efficiency4=costs.at['gas', 'CO2 intensity'] * costs.at['biomass CHP capture', 'capture_rate'], - lifetime=costs.at['central gas CHP', 'lifetime'] + capital_cost=costs.at["central gas CHP", "fixed"] + * costs.at["central gas CHP", "efficiency"] + + costs.at["biomass CHP capture", "fixed"] + * costs.at["gas", "CO2 intensity"], + marginal_cost=costs.at["central gas CHP", "VOM"], + efficiency=costs.at["central gas CHP", "efficiency"] + - costs.at["gas", "CO2 intensity"] + * ( + costs.at["biomass CHP capture", "electricity-input"] + + costs.at["biomass CHP capture", "compression-electricity-input"] + ), + efficiency2=costs.at["central gas CHP", "efficiency"] + / costs.at["central gas CHP", "c_b"] + + costs.at["gas", "CO2 intensity"] + * ( + costs.at["biomass CHP capture", "heat-output"] + + costs.at["biomass CHP capture", "compression-heat-output"] + - costs.at["biomass CHP capture", "heat-input"] + ), + efficiency3=costs.at["gas", "CO2 intensity"] + * (1 - costs.at["biomass CHP capture", "capture_rate"]), + efficiency4=costs.at["gas", "CO2 intensity"] + * costs.at["biomass CHP capture", "capture_rate"], + lifetime=costs.at["central gas CHP", "lifetime"], ) if options["chp"] and options["micro_chp"] and name != "urban central": - - n.madd("Link", + n.madd( + "Link", nodes[name] + f" {name} micro gas CHP", p_nom_extendable=True, bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, @@ -1718,31 +1915,32 @@ def add_heat(n, costs): bus2=nodes[name] + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", - efficiency=costs.at['micro CHP', 'efficiency'], - efficiency2=costs.at['micro CHP', 'efficiency-heat'], - efficiency3=costs.at['gas', 'CO2 intensity'], - capital_cost=costs.at['micro CHP', 'fixed'], - lifetime=costs.at['micro CHP', 'lifetime'] + efficiency=costs.at["micro CHP", "efficiency"], + efficiency2=costs.at["micro CHP", "efficiency-heat"], + efficiency3=costs.at["gas", "CO2 intensity"], + capital_cost=costs.at["micro CHP", "fixed"], + lifetime=costs.at["micro CHP", "lifetime"], ) - - if options['retrofitting']['retro_endogen']: - + if options["retrofitting"]["retro_endogen"]: logger.info("Add retrofitting endogenously") # resample heat demand temporal 'heat_demand_r' depending on in config # specified temporal resolution, to not overestimate retrofitting - hours = list(filter(re.compile(r'^\d+h$', re.IGNORECASE).search, opts)) - if len(hours)==0: + hours = list(filter(re.compile(r"^\d+h$", re.IGNORECASE).search, opts)) + if len(hours) == 0: hours = [n.snapshots[1] - n.snapshots[0]] - heat_demand_r = heat_demand.resample(hours[0]).mean() + heat_demand_r = heat_demand.resample(hours[0]).mean() # retrofitting data 'retro_data' with 'costs' [EUR/m^2] and heat # demand 'dE' [per unit of original heat demand] for each country and # different retrofitting strengths [additional insulation thickness in m] - retro_data = pd.read_csv(snakemake.input.retro_cost_energy, - index_col=[0, 1], skipinitialspace=True, - header=[0, 1]) + retro_data = pd.read_csv( + snakemake.input.retro_cost_energy, + index_col=[0, 1], + skipinitialspace=True, + header=[0, 1], + ) # heated floor area [10^6 * m^2] per country floor_area = pd.read_csv(snakemake.input.floor_area, index_col=[0, 1]) @@ -1751,31 +1949,32 @@ def add_heat(n, costs): # share of space heat demand 'w_space' of total heat demand w_space = {} for sector in sectors: - w_space[sector] = heat_demand_r[sector + " space"] / \ - (heat_demand_r[sector + " space"] + heat_demand_r[sector + " water"]) - w_space["tot"] = ((heat_demand_r["services space"] + - heat_demand_r["residential space"]) / - heat_demand_r.groupby(level=[1], axis=1).sum()) - - - for name in n.loads[n.loads.carrier.isin([x + " heat" for x in heat_systems])].index: + w_space[sector] = heat_demand_r[sector + " space"] / ( + heat_demand_r[sector + " space"] + heat_demand_r[sector + " water"] + ) + w_space["tot"] = ( + heat_demand_r["services space"] + heat_demand_r["residential space"] + ) / heat_demand_r.groupby(level=[1], axis=1).sum() + for name in n.loads[ + n.loads.carrier.isin([x + " heat" for x in heat_systems]) + ].index: node = n.buses.loc[name, "location"] ct = pop_layout.loc[node, "ct"] # weighting 'f' depending on the size of the population at the node - f = urban_fraction[node] if "urban" in name else (1-urban_fraction[node]) + f = urban_fraction[node] if "urban" in name else (1 - urban_fraction[node]) if f == 0: continue # get sector name ("residential"/"services"/or both "tot" for urban central) sec = [x if x in name else "tot" for x in sectors][0] # get floor aread at node and region (urban/rural) in m^2 - floor_area_node = ((pop_layout.loc[node].fraction - * floor_area.loc[ct, "value"] * 10**6).loc[sec] * f) + floor_area_node = ( + pop_layout.loc[node].fraction * floor_area.loc[ct, "value"] * 10**6 + ).loc[sec] * f # total heat demand at node [MWh] - demand = (n.loads_t.p_set[name].resample(hours[0]) - .mean()) + demand = n.loads_t.p_set[name].resample(hours[0]).mean() # space heat demand at node [MWh] space_heat_demand = demand * w_space[sec][node] @@ -1786,10 +1985,13 @@ def add_heat(n, costs): # minimum heat demand 'dE' after retrofitting in units of original heat demand (values between 0-1) dE = retro_data.loc[(ct, sec), ("dE")] # get additional energy savings 'dE_diff' between the different retrofitting strengths/generators at one node - dE_diff = abs(dE.diff()).fillna(1-dE.iloc[0]) + dE_diff = abs(dE.diff()).fillna(1 - dE.iloc[0]) # convert costs Euro/m^2 -> Euro/MWh - capital_cost = retro_data.loc[(ct, sec), ("cost")] * floor_area_node / \ - ((1 - dE) * space_heat_demand.max()) + capital_cost = ( + retro_data.loc[(ct, sec), ("cost")] + * floor_area_node + / ((1 - dE) * space_heat_demand.max()) + ) # number of possible retrofitting measures 'strengths' (set in list at config.yaml 'l_strength') # given in additional insulation thickness [m] # for each measure, a retrofitting generator is added at the node @@ -1806,17 +2008,20 @@ def add_heat(n, costs): # add for each retrofitting strength a generator with heat generation profile following the profile of the heat demand for strength in strengths: - n.madd('Generator', + n.madd( + "Generator", [node], - suffix=' retrofitting ' + strength + " " + name[6::], + suffix=" retrofitting " + strength + " " + name[6::], bus=name, carrier="retrofitting", p_nom_extendable=True, - p_nom_max=dE_diff[strength] * space_heat_demand.max(), # maximum energy savings for this renovation strength + p_nom_max=dE_diff[strength] + * space_heat_demand.max(), # maximum energy savings for this renovation strength p_max_pu=space_pu, p_min_pu=space_pu, country=ct, - capital_cost=capital_cost[strength] * options['retrofitting']['cost_factor'] + capital_cost=capital_cost[strength] + * options["retrofitting"]["cost_factor"], ) @@ -1843,14 +2048,15 @@ def create_nodes_for_heat_sector(): district_heat_share = pop_weighted_energy_totals["district heat share"] # maximum potential of urban demand covered by district heating - central_fraction = options['district_heating']["potential"] + central_fraction = options["district_heating"]["potential"] # district heating share at each node - dist_fraction_node = district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) nodes["urban central"] = dist_fraction_node.index # if district heating share larger than urban fraction -> set urban # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], - axis=1).max(axis=1) + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) # difference of max potential and today's share of district heating diff = (urban_fraction * central_fraction) - dist_fraction_node progress = get(options["district_heating"]["progress"], investment_year) @@ -1864,59 +2070,66 @@ def create_nodes_for_heat_sector(): def add_biomass(n, costs): - logger.info("Add biomass") biomass_potentials = pd.read_csv(snakemake.input.biomass_potentials, index_col=0) # need to aggregate potentials if gas not nodally resolved if options["gas_network"]: - biogas_potentials_spatial = biomass_potentials["biogas"].rename(index=lambda x: x + " biogas") + biogas_potentials_spatial = biomass_potentials["biogas"].rename( + index=lambda x: x + " biogas" + ) else: biogas_potentials_spatial = biomass_potentials["biogas"].sum() if options.get("biomass_spatial", options["biomass_transport"]): - solid_biomass_potentials_spatial = biomass_potentials["solid biomass"].rename(index=lambda x: x + " solid biomass") + solid_biomass_potentials_spatial = biomass_potentials["solid biomass"].rename( + index=lambda x: x + " solid biomass" + ) else: solid_biomass_potentials_spatial = biomass_potentials["solid biomass"].sum() - n.add("Carrier", "biogas") n.add("Carrier", "solid biomass") - n.madd("Bus", + n.madd( + "Bus", spatial.gas.biogas, location=spatial.gas.locations, carrier="biogas", - unit="MWh_LHV" + unit="MWh_LHV", ) - n.madd("Bus", + n.madd( + "Bus", spatial.biomass.nodes, location=spatial.biomass.locations, carrier="solid biomass", - unit="MWh_LHV" + unit="MWh_LHV", ) - n.madd("Store", + n.madd( + "Store", spatial.gas.biogas, bus=spatial.gas.biogas, carrier="biogas", e_nom=biogas_potentials_spatial, - marginal_cost=costs.at['biogas', 'fuel'], - e_initial=biogas_potentials_spatial + marginal_cost=costs.at["biogas", "fuel"], + e_initial=biogas_potentials_spatial, ) - n.madd("Store", + n.madd( + "Store", spatial.biomass.nodes, bus=spatial.biomass.nodes, carrier="solid biomass", e_nom=solid_biomass_potentials_spatial, - marginal_cost=costs.at['solid biomass', 'fuel'], - e_initial=solid_biomass_potentials_spatial + marginal_cost=costs.at["solid biomass", "fuel"], + e_initial=solid_biomass_potentials_spatial, ) - n.madd("Link", + n.madd( + "Link", spatial.gas.biogas_to_gas, bus0=spatial.gas.biogas, bus1=spatial.gas.nodes, @@ -1924,26 +2137,30 @@ def add_biomass(n, costs): carrier="biogas to gas", capital_cost=costs.loc["biogas upgrading", "fixed"], marginal_cost=costs.loc["biogas upgrading", "VOM"], - efficiency2=-costs.at['gas', 'CO2 intensity'], - p_nom_extendable=True + efficiency2=-costs.at["gas", "CO2 intensity"], + p_nom_extendable=True, ) if options["biomass_transport"]: - transport_costs = pd.read_csv( snakemake.input.biomass_transport_costs, index_col=0, ).squeeze() # add biomass transport - biomass_transport = create_network_topology(n, "biomass transport ", bidirectional=False) + biomass_transport = create_network_topology( + n, "biomass transport ", bidirectional=False + ) # costs bus0_costs = biomass_transport.bus0.apply(lambda x: transport_costs[x[:2]]) bus1_costs = biomass_transport.bus1.apply(lambda x: transport_costs[x[:2]]) - biomass_transport["costs"] = pd.concat([bus0_costs, bus1_costs], axis=1).mean(axis=1) + biomass_transport["costs"] = pd.concat([bus0_costs, bus1_costs], axis=1).mean( + axis=1 + ) - n.madd("Link", + n.madd( + "Link", biomass_transport.index, bus0=biomass_transport.bus0 + " solid biomass", bus1=biomass_transport.bus1 + " solid biomass", @@ -1951,31 +2168,33 @@ def add_biomass(n, costs): p_nom=5e4, length=biomass_transport.length.values, marginal_cost=biomass_transport.costs * biomass_transport.length.values, - carrier="solid biomass transport" + carrier="solid biomass transport", ) - #AC buses with district heating + # AC buses with district heating urban_central = n.buses.index[n.buses.carrier == "urban central heat"] if not urban_central.empty and options["chp"]: - urban_central = urban_central.str[:-len(" urban central heat")] + urban_central = urban_central.str[: -len(" urban central heat")] - key = 'central solid biomass CHP' + key = "central solid biomass CHP" - n.madd("Link", + n.madd( + "Link", urban_central + " urban central solid biomass CHP", bus0=spatial.biomass.df.loc[urban_central, "nodes"].values, bus1=urban_central, bus2=urban_central + " urban central heat", carrier="urban central solid biomass CHP", p_nom_extendable=True, - capital_cost=costs.at[key, 'fixed'] * costs.at[key, 'efficiency'], - marginal_cost=costs.at[key, 'VOM'], - efficiency=costs.at[key, 'efficiency'], - efficiency2=costs.at[key, 'efficiency-heat'], - lifetime=costs.at[key, 'lifetime'] + capital_cost=costs.at[key, "fixed"] * costs.at[key, "efficiency"], + marginal_cost=costs.at[key, "VOM"], + efficiency=costs.at[key, "efficiency"], + efficiency2=costs.at[key, "efficiency-heat"], + lifetime=costs.at[key, "lifetime"], ) - n.madd("Link", + n.madd( + "Link", urban_central + " urban central solid biomass CHP CC", bus0=spatial.biomass.df.loc[urban_central, "nodes"].values, bus1=urban_central, @@ -1984,86 +2203,114 @@ def add_biomass(n, costs): bus4=spatial.co2.df.loc[urban_central, "nodes"].values, carrier="urban central solid biomass CHP CC", p_nom_extendable=True, - capital_cost=costs.at[key, 'fixed'] * costs.at[key, 'efficiency'] + costs.at['biomass CHP capture', 'fixed'] * costs.at['solid biomass', 'CO2 intensity'], - marginal_cost=costs.at[key, 'VOM'], - efficiency=costs.at[key, 'efficiency'] - costs.at['solid biomass', 'CO2 intensity'] * (costs.at['biomass CHP capture', 'electricity-input'] + costs.at['biomass CHP capture', 'compression-electricity-input']), - efficiency2=costs.at[key, 'efficiency-heat'] + costs.at['solid biomass', 'CO2 intensity'] * (costs.at['biomass CHP capture', 'heat-output'] + costs.at['biomass CHP capture', 'compression-heat-output'] - costs.at['biomass CHP capture', 'heat-input']), - efficiency3=-costs.at['solid biomass', 'CO2 intensity'] * costs.at['biomass CHP capture', 'capture_rate'], - efficiency4=costs.at['solid biomass', 'CO2 intensity'] * costs.at['biomass CHP capture', 'capture_rate'], - lifetime=costs.at[key, 'lifetime'] + capital_cost=costs.at[key, "fixed"] * costs.at[key, "efficiency"] + + costs.at["biomass CHP capture", "fixed"] + * costs.at["solid biomass", "CO2 intensity"], + marginal_cost=costs.at[key, "VOM"], + efficiency=costs.at[key, "efficiency"] + - costs.at["solid biomass", "CO2 intensity"] + * ( + costs.at["biomass CHP capture", "electricity-input"] + + costs.at["biomass CHP capture", "compression-electricity-input"] + ), + efficiency2=costs.at[key, "efficiency-heat"] + + costs.at["solid biomass", "CO2 intensity"] + * ( + costs.at["biomass CHP capture", "heat-output"] + + costs.at["biomass CHP capture", "compression-heat-output"] + - costs.at["biomass CHP capture", "heat-input"] + ), + efficiency3=-costs.at["solid biomass", "CO2 intensity"] + * costs.at["biomass CHP capture", "capture_rate"], + efficiency4=costs.at["solid biomass", "CO2 intensity"] + * costs.at["biomass CHP capture", "capture_rate"], + lifetime=costs.at[key, "lifetime"], ) if options["biomass_boiler"]: - #TODO: Add surcharge for pellets + # TODO: Add surcharge for pellets nodes_heat = create_nodes_for_heat_sector()[0] - for name in ["residential rural", "services rural", - "residential urban decentral", "services urban decentral"]: - - n.madd("Link", + for name in [ + "residential rural", + "services rural", + "residential urban decentral", + "services urban decentral", + ]: + n.madd( + "Link", nodes_heat[name] + f" {name} biomass boiler", p_nom_extendable=True, bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, bus1=nodes_heat[name] + f" {name} heat", carrier=name + " biomass boiler", - efficiency=costs.at['biomass boiler', 'efficiency'], - capital_cost=costs.at['biomass boiler', 'efficiency'] * costs.at['biomass boiler', 'fixed'], - lifetime=costs.at['biomass boiler', 'lifetime'] + efficiency=costs.at["biomass boiler", "efficiency"], + capital_cost=costs.at["biomass boiler", "efficiency"] + * costs.at["biomass boiler", "fixed"], + lifetime=costs.at["biomass boiler", "lifetime"], ) - #Solid biomass to liquid fuel + # Solid biomass to liquid fuel if options["biomass_to_liquid"]: - n.madd("Link", - spatial.biomass.nodes, - suffix=" biomass to liquid", - bus0=spatial.biomass.nodes, - bus1=spatial.oil.nodes, - bus2="co2 atmosphere", - carrier="biomass to liquid", - lifetime=costs.at['BtL', 'lifetime'], - efficiency=costs.at['BtL', 'efficiency'], - efficiency2=-costs.at['solid biomass', 'CO2 intensity'] + costs.at['BtL', 'CO2 stored'], - p_nom_extendable=True, - capital_cost=costs.at['BtL', 'fixed'], - marginal_cost=costs.at['BtL', 'efficiency']*costs.loc["BtL", "VOM"] + n.madd( + "Link", + spatial.biomass.nodes, + suffix=" biomass to liquid", + bus0=spatial.biomass.nodes, + bus1=spatial.oil.nodes, + bus2="co2 atmosphere", + carrier="biomass to liquid", + lifetime=costs.at["BtL", "lifetime"], + efficiency=costs.at["BtL", "efficiency"], + efficiency2=-costs.at["solid biomass", "CO2 intensity"] + + costs.at["BtL", "CO2 stored"], + p_nom_extendable=True, + capital_cost=costs.at["BtL", "fixed"], + marginal_cost=costs.at["BtL", "efficiency"] * costs.loc["BtL", "VOM"], ) - #TODO: Update with energy penalty - n.madd("Link", - spatial.biomass.nodes, - suffix=" biomass to liquid CC", - bus0=spatial.biomass.nodes, - bus1=spatial.oil.nodes, - bus2="co2 atmosphere", - bus3=spatial.co2.nodes, - carrier="biomass to liquid", - lifetime=costs.at['BtL', 'lifetime'], - efficiency=costs.at['BtL', 'efficiency'], - efficiency2=-costs.at['solid biomass', 'CO2 intensity'] + costs.at['BtL', 'CO2 stored'] * (1 - costs.at['BtL', 'capture rate']), - efficiency3=costs.at['BtL', 'CO2 stored'] * costs.at['BtL', 'capture rate'], - p_nom_extendable=True, - capital_cost=costs.at['BtL', 'fixed'] + costs.at['biomass CHP capture', 'fixed'] * costs.at[ - "BtL", "CO2 stored"], - marginal_cost=costs.at['BtL', 'efficiency'] * costs.loc["BtL", "VOM"]) + # TODO: Update with energy penalty + n.madd( + "Link", + spatial.biomass.nodes, + suffix=" biomass to liquid CC", + bus0=spatial.biomass.nodes, + bus1=spatial.oil.nodes, + bus2="co2 atmosphere", + bus3=spatial.co2.nodes, + carrier="biomass to liquid", + lifetime=costs.at["BtL", "lifetime"], + efficiency=costs.at["BtL", "efficiency"], + efficiency2=-costs.at["solid biomass", "CO2 intensity"] + + costs.at["BtL", "CO2 stored"] * (1 - costs.at["BtL", "capture rate"]), + efficiency3=costs.at["BtL", "CO2 stored"] * costs.at["BtL", "capture rate"], + p_nom_extendable=True, + capital_cost=costs.at["BtL", "fixed"] + + costs.at["biomass CHP capture", "fixed"] * costs.at["BtL", "CO2 stored"], + marginal_cost=costs.at["BtL", "efficiency"] * costs.loc["BtL", "VOM"], + ) - #BioSNG from solid biomass + # BioSNG from solid biomass if options["biosng"]: - n.madd("Link", + n.madd( + "Link", spatial.biomass.nodes, suffix=" solid biomass to gas", bus0=spatial.biomass.nodes, bus1=spatial.gas.nodes, bus3="co2 atmosphere", carrier="BioSNG", - lifetime=costs.at['BioSNG', 'lifetime'], - efficiency=costs.at['BioSNG', 'efficiency'], - efficiency3=-costs.at['solid biomass', 'CO2 intensity'] + costs.at['BioSNG', 'CO2 stored'], + lifetime=costs.at["BioSNG", "lifetime"], + efficiency=costs.at["BioSNG", "efficiency"], + efficiency3=-costs.at["solid biomass", "CO2 intensity"] + + costs.at["BioSNG", "CO2 stored"], p_nom_extendable=True, - capital_cost=costs.at['BioSNG', 'fixed'], - marginal_cost=costs.at['BioSNG', 'efficiency']*costs.loc["BioSNG", "VOM"] + capital_cost=costs.at["BioSNG", "fixed"], + marginal_cost=costs.at["BioSNG", "efficiency"] * costs.loc["BioSNG", "VOM"], ) - #TODO: Update with energy penalty for CC - n.madd("Link", + # TODO: Update with energy penalty for CC + n.madd( + "Link", spatial.biomass.nodes, suffix=" solid biomass to gas CC", bus0=spatial.biomass.nodes, @@ -2071,56 +2318,69 @@ def add_biomass(n, costs): bus2=spatial.co2.nodes, bus3="co2 atmosphere", carrier="BioSNG", - lifetime=costs.at['BioSNG', 'lifetime'], - efficiency=costs.at['BioSNG', 'efficiency'], - efficiency2=costs.at['BioSNG', 'CO2 stored'] * costs.at['BioSNG', 'capture rate'], - efficiency3=-costs.at['solid biomass', 'CO2 intensity'] + costs.at['BioSNG', 'CO2 stored'] * (1 - costs.at['BioSNG', 'capture rate']), + lifetime=costs.at["BioSNG", "lifetime"], + efficiency=costs.at["BioSNG", "efficiency"], + efficiency2=costs.at["BioSNG", "CO2 stored"] + * costs.at["BioSNG", "capture rate"], + efficiency3=-costs.at["solid biomass", "CO2 intensity"] + + costs.at["BioSNG", "CO2 stored"] + * (1 - costs.at["BioSNG", "capture rate"]), p_nom_extendable=True, - capital_cost=costs.at['BioSNG', 'fixed'] + costs.at['biomass CHP capture', 'fixed'] * costs.at[ - "BioSNG", "CO2 stored"], - marginal_cost=costs.at['BioSNG', 'efficiency']*costs.loc["BioSNG", "VOM"] - + capital_cost=costs.at["BioSNG", "fixed"] + + costs.at["biomass CHP capture", "fixed"] + * costs.at["BioSNG", "CO2 stored"], + marginal_cost=costs.at["BioSNG", "efficiency"] * costs.loc["BioSNG", "VOM"], ) def add_industry(n, costs): - logger.info("Add industrial demand") nodes = pop_layout.index # 1e6 to convert TWh to MWh - industrial_demand = pd.read_csv(snakemake.input.industrial_demand, index_col=0) * 1e6 + industrial_demand = ( + pd.read_csv(snakemake.input.industrial_demand, index_col=0) * 1e6 + ) - n.madd("Bus", + n.madd( + "Bus", spatial.biomass.industry, location=spatial.biomass.locations, carrier="solid biomass for industry", - unit="MWh_LHV" + unit="MWh_LHV", ) if options.get("biomass_spatial", options["biomass_transport"]): - p_set = industrial_demand.loc[spatial.biomass.locations, "solid biomass"].rename(index=lambda x: x + " solid biomass for industry") / 8760 + p_set = ( + industrial_demand.loc[spatial.biomass.locations, "solid biomass"].rename( + index=lambda x: x + " solid biomass for industry" + ) + / 8760 + ) else: p_set = industrial_demand["solid biomass"].sum() / 8760 - n.madd("Load", + n.madd( + "Load", spatial.biomass.industry, bus=spatial.biomass.industry, carrier="solid biomass for industry", - p_set=p_set + p_set=p_set, ) - n.madd("Link", + n.madd( + "Link", spatial.biomass.industry, bus0=spatial.biomass.nodes, bus1=spatial.biomass.industry, carrier="solid biomass for industry", p_nom_extendable=True, - efficiency=1. + efficiency=1.0, ) - n.madd("Link", + n.madd( + "Link", spatial.biomass.industry_cc, bus0=spatial.biomass.nodes, bus1=spatial.biomass.industry, @@ -2128,45 +2388,53 @@ def add_industry(n, costs): bus3=spatial.co2.nodes, carrier="solid biomass for industry CC", p_nom_extendable=True, - capital_cost=costs.at["cement capture", "fixed"] * costs.at['solid biomass', 'CO2 intensity'], - efficiency=0.9, # TODO: make config option - efficiency2=-costs.at['solid biomass', 'CO2 intensity'] * costs.at["cement capture", "capture_rate"], - efficiency3=costs.at['solid biomass', 'CO2 intensity'] * costs.at["cement capture", "capture_rate"], - lifetime=costs.at['cement capture', 'lifetime'] + capital_cost=costs.at["cement capture", "fixed"] + * costs.at["solid biomass", "CO2 intensity"], + efficiency=0.9, # TODO: make config option + efficiency2=-costs.at["solid biomass", "CO2 intensity"] + * costs.at["cement capture", "capture_rate"], + efficiency3=costs.at["solid biomass", "CO2 intensity"] + * costs.at["cement capture", "capture_rate"], + lifetime=costs.at["cement capture", "lifetime"], ) - n.madd("Bus", + n.madd( + "Bus", spatial.gas.industry, location=spatial.gas.locations, carrier="gas for industry", - unit="MWh_LHV") + unit="MWh_LHV", + ) - gas_demand = industrial_demand.loc[nodes, "methane"] / 8760. + gas_demand = industrial_demand.loc[nodes, "methane"] / 8760.0 if options["gas_network"]: spatial_gas_demand = gas_demand.rename(index=lambda x: x + " gas for industry") else: spatial_gas_demand = gas_demand.sum() - n.madd("Load", + n.madd( + "Load", spatial.gas.industry, bus=spatial.gas.industry, carrier="gas for industry", - p_set=spatial_gas_demand + p_set=spatial_gas_demand, ) - n.madd("Link", + n.madd( + "Link", spatial.gas.industry, bus0=spatial.gas.nodes, bus1=spatial.gas.industry, bus2="co2 atmosphere", carrier="gas for industry", p_nom_extendable=True, - efficiency=1., - efficiency2=costs.at['gas', 'CO2 intensity'] + efficiency=1.0, + efficiency2=costs.at["gas", "CO2 intensity"], ) - n.madd("Link", + n.madd( + "Link", spatial.gas.industry_cc, bus0=spatial.gas.nodes, bus1=spatial.gas.industry, @@ -2174,86 +2442,104 @@ def add_industry(n, costs): bus3=spatial.co2.nodes, carrier="gas for industry CC", p_nom_extendable=True, - capital_cost=costs.at["cement capture", "fixed"] * costs.at['gas', 'CO2 intensity'], + capital_cost=costs.at["cement capture", "fixed"] + * costs.at["gas", "CO2 intensity"], efficiency=0.9, - efficiency2=costs.at['gas', 'CO2 intensity'] * (1 - costs.at["cement capture", "capture_rate"]), - efficiency3=costs.at['gas', 'CO2 intensity'] * costs.at["cement capture", "capture_rate"], - lifetime=costs.at['cement capture', 'lifetime'] + efficiency2=costs.at["gas", "CO2 intensity"] + * (1 - costs.at["cement capture", "capture_rate"]), + efficiency3=costs.at["gas", "CO2 intensity"] + * costs.at["cement capture", "capture_rate"], + lifetime=costs.at["cement capture", "lifetime"], ) - n.madd("Load", + n.madd( + "Load", nodes, suffix=" H2 for industry", bus=nodes + " H2", carrier="H2 for industry", - p_set=industrial_demand.loc[nodes, "hydrogen"] / 8760 + p_set=industrial_demand.loc[nodes, "hydrogen"] / 8760, ) - shipping_hydrogen_share = get(options['shipping_hydrogen_share'], investment_year) - shipping_methanol_share = get(options['shipping_methanol_share'], investment_year) - shipping_oil_share = get(options['shipping_oil_share'], investment_year) + shipping_hydrogen_share = get(options["shipping_hydrogen_share"], investment_year) + shipping_methanol_share = get(options["shipping_methanol_share"], investment_year) + shipping_oil_share = get(options["shipping_oil_share"], investment_year) total_share = shipping_hydrogen_share + shipping_methanol_share + shipping_oil_share if total_share != 1: - logger.warning(f"Total shipping shares sum up to {total_share:.2%}, corresponding to increased or decreased demand assumptions.") + logger.warning( + f"Total shipping shares sum up to {total_share:.2%}, corresponding to increased or decreased demand assumptions." + ) - domestic_navigation = pop_weighted_energy_totals.loc[nodes, "total domestic navigation"].squeeze() - international_navigation = pd.read_csv(snakemake.input.shipping_demand, index_col=0).squeeze() + domestic_navigation = pop_weighted_energy_totals.loc[ + nodes, "total domestic navigation" + ].squeeze() + international_navigation = pd.read_csv( + snakemake.input.shipping_demand, index_col=0 + ).squeeze() all_navigation = domestic_navigation + international_navigation p_set = all_navigation * 1e6 / 8760 if shipping_hydrogen_share: - - oil_efficiency = options.get('shipping_oil_efficiency', options.get('shipping_average_efficiency', 0.4)) + oil_efficiency = options.get( + "shipping_oil_efficiency", options.get("shipping_average_efficiency", 0.4) + ) efficiency = oil_efficiency / costs.at["fuel cell", "efficiency"] - shipping_hydrogen_share = get(options['shipping_hydrogen_share'], investment_year) + shipping_hydrogen_share = get( + options["shipping_hydrogen_share"], investment_year + ) if options["shipping_hydrogen_liquefaction"]: - - n.madd("Bus", + n.madd( + "Bus", nodes, suffix=" H2 liquid", carrier="H2 liquid", location=nodes, - unit="MWh_LHV" + unit="MWh_LHV", ) - n.madd("Link", + n.madd( + "Link", nodes + " H2 liquefaction", bus0=nodes + " H2", bus1=nodes + " H2 liquid", carrier="H2 liquefaction", - efficiency=costs.at["H2 liquefaction", 'efficiency'], - capital_cost=costs.at["H2 liquefaction", 'fixed'], + efficiency=costs.at["H2 liquefaction", "efficiency"], + capital_cost=costs.at["H2 liquefaction", "fixed"], p_nom_extendable=True, - lifetime=costs.at['H2 liquefaction', 'lifetime'] + lifetime=costs.at["H2 liquefaction", "lifetime"], ) shipping_bus = nodes + " H2 liquid" else: shipping_bus = nodes + " H2" - efficiency = options['shipping_oil_efficiency'] / costs.at["fuel cell", "efficiency"] + efficiency = ( + options["shipping_oil_efficiency"] / costs.at["fuel cell", "efficiency"] + ) p_set_hydrogen = shipping_hydrogen_share * p_set * efficiency - n.madd("Load", + n.madd( + "Load", nodes, suffix=" H2 for shipping", bus=shipping_bus, carrier="H2 for shipping", - p_set=p_set_hydrogen + p_set=p_set_hydrogen, ) if shipping_methanol_share: - - n.madd("Bus", + n.madd( + "Bus", spatial.methanol.nodes, carrier="methanol", location=spatial.methanol.locations, - unit="MWh_LHV" + unit="MWh_LHV", ) - n.madd("Store", + n.madd( + "Store", spatial.methanol.nodes, suffix=" Store", bus=spatial.methanol.nodes, @@ -2262,7 +2548,8 @@ def add_industry(n, costs): carrier="methanol", ) - n.madd("Link", + n.madd( + "Link", spatial.h2.locations + " methanolisation", bus0=spatial.h2.nodes, bus1=spatial.methanol.nodes, @@ -2271,17 +2558,21 @@ def add_industry(n, costs): carrier="methanolisation", p_nom_extendable=True, p_min_pu=options.get("min_part_load_methanolisation", 0), - capital_cost=costs.at["methanolisation", 'fixed'] * options["MWh_MeOH_per_MWh_H2"], # EUR/MW_H2/a - lifetime=costs.at["methanolisation", 'lifetime'], + capital_cost=costs.at["methanolisation", "fixed"] + * options["MWh_MeOH_per_MWh_H2"], # EUR/MW_H2/a + lifetime=costs.at["methanolisation", "lifetime"], efficiency=options["MWh_MeOH_per_MWh_H2"], - efficiency2=- options["MWh_MeOH_per_MWh_H2"] / options["MWh_MeOH_per_MWh_e"], - efficiency3=- options["MWh_MeOH_per_MWh_H2"] / options["MWh_MeOH_per_tCO2"], + efficiency2=-options["MWh_MeOH_per_MWh_H2"] / options["MWh_MeOH_per_MWh_e"], + efficiency3=-options["MWh_MeOH_per_MWh_H2"] / options["MWh_MeOH_per_tCO2"], ) - efficiency = options["shipping_oil_efficiency"] / options["shipping_methanol_efficiency"] + efficiency = ( + options["shipping_oil_efficiency"] / options["shipping_methanol_efficiency"] + ) p_set_methanol = shipping_methanol_share * p_set.sum() * efficiency - n.madd("Load", + n.madd( + "Load", spatial.methanol.nodes, suffix=" shipping methanol", bus=spatial.methanol.nodes, @@ -2292,7 +2583,8 @@ def add_industry(n, costs): # CO2 intensity methanol based on stoichiometric calculation with 22.7 GJ/t methanol (32 g/mol), CO2 (44 g/mol), 277.78 MWh/TJ = 0.218 t/MWh co2 = p_set_methanol / options["MWh_MeOH_per_tCO2"] - n.add("Load", + n.add( + "Load", "shipping methanol emissions", bus="co2 atmosphere", carrier="shipping methanol emissions", @@ -2300,38 +2592,40 @@ def add_industry(n, costs): ) if shipping_oil_share: - p_set_oil = shipping_oil_share * p_set.sum() - n.madd("Load", + n.madd( + "Load", spatial.oil.nodes, suffix=" shipping oil", bus=spatial.oil.nodes, carrier="shipping oil", - p_set=p_set_oil + p_set=p_set_oil, ) co2 = p_set_oil * costs.at["oil", "CO2 intensity"] - n.add("Load", + n.add( + "Load", "shipping oil emissions", bus="co2 atmosphere", carrier="shipping oil emissions", - p_set=-co2 + p_set=-co2, ) if "oil" not in n.buses.carrier.unique(): - n.madd("Bus", + n.madd( + "Bus", spatial.oil.nodes, location=spatial.oil.locations, carrier="oil", - unit="MWh_LHV" + unit="MWh_LHV", ) if "oil" not in n.stores.carrier.unique(): - - #could correct to e.g. 0.001 EUR/kWh * annuity and O&M - n.madd("Store", + # could correct to e.g. 0.001 EUR/kWh * annuity and O&M + n.madd( + "Store", [oil_bus + " Store" for oil_bus in spatial.oil.nodes], bus=spatial.oil.nodes, e_nom_extendable=True, @@ -2340,46 +2634,54 @@ def add_industry(n, costs): ) if "oil" not in n.generators.carrier.unique(): - - n.madd("Generator", + n.madd( + "Generator", spatial.oil.nodes, bus=spatial.oil.nodes, p_nom_extendable=True, carrier="oil", - marginal_cost=costs.at["oil", 'fuel'] + marginal_cost=costs.at["oil", "fuel"], ) if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] - for name in ["residential rural", "services rural", "residential urban decentral", "services urban decentral"]: - - n.madd("Link", + for name in [ + "residential rural", + "services rural", + "residential urban decentral", + "services urban decentral", + ]: + n.madd( + "Link", nodes_heat[name] + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, bus1=nodes_heat[name] + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", - efficiency=costs.at['decentral oil boiler', 'efficiency'], - efficiency2=costs.at['oil', 'CO2 intensity'], - capital_cost=costs.at['decentral oil boiler', 'efficiency'] * costs.at['decentral oil boiler', 'fixed'], - lifetime=costs.at['decentral oil boiler', 'lifetime'] + efficiency=costs.at["decentral oil boiler", "efficiency"], + efficiency2=costs.at["oil", "CO2 intensity"], + capital_cost=costs.at["decentral oil boiler", "efficiency"] + * costs.at["decentral oil boiler", "fixed"], + lifetime=costs.at["decentral oil boiler", "lifetime"], ) - n.madd("Link", + n.madd( + "Link", nodes + " Fischer-Tropsch", bus0=nodes + " H2", bus1=spatial.oil.nodes, bus2=spatial.co2.nodes, carrier="Fischer-Tropsch", - efficiency=costs.at["Fischer-Tropsch", 'efficiency'], - capital_cost=costs.at["Fischer-Tropsch", 'fixed'] * costs.at["Fischer-Tropsch", 'efficiency'], # EUR/MW_H2/a - efficiency2=-costs.at["oil", 'CO2 intensity'] * costs.at["Fischer-Tropsch", 'efficiency'], + efficiency=costs.at["Fischer-Tropsch", "efficiency"], + capital_cost=costs.at["Fischer-Tropsch", "fixed"] + * costs.at["Fischer-Tropsch", "efficiency"], # EUR/MW_H2/a + efficiency2=-costs.at["oil", "CO2 intensity"] + * costs.at["Fischer-Tropsch", "efficiency"], p_nom_extendable=True, p_min_pu=options.get("min_part_load_fischer_tropsch", 0), - lifetime=costs.at['Fischer-Tropsch', 'lifetime'] + lifetime=costs.at["Fischer-Tropsch", "lifetime"], ) demand_factor = options.get("HVC_demand_factor", 1) @@ -2387,97 +2689,131 @@ def add_industry(n, costs): if demand_factor != 1: logger.warning(f"Changing HVC demand by {demand_factor*100-100:+.2f}%.") - n.madd("Load", + n.madd( + "Load", ["naphtha for industry"], bus=spatial.oil.nodes, carrier="naphtha for industry", - p_set=p_set + p_set=p_set, ) demand_factor = options.get("aviation_demand_factor", 1) all_aviation = ["total international aviation", "total domestic aviation"] - p_set = demand_factor * pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1).sum() * 1e6 / 8760 + p_set = ( + demand_factor + * pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1).sum() + * 1e6 + / 8760 + ) if demand_factor != 1: logger.warning(f"Changing aviation demand by {demand_factor*100-100:+.2f}%.") - n.madd("Load", + n.madd( + "Load", ["kerosene for aviation"], bus=spatial.oil.nodes, carrier="kerosene for aviation", - p_set=p_set + p_set=p_set, ) - #NB: CO2 gets released again to atmosphere when plastics decay or kerosene is burned - #except for the process emissions when naphtha is used for petrochemicals, which can be captured with other industry process emissions - #tco2 per hour + # NB: CO2 gets released again to atmosphere when plastics decay or kerosene is burned + # except for the process emissions when naphtha is used for petrochemicals, which can be captured with other industry process emissions + # tco2 per hour co2_release = ["naphtha for industry", "kerosene for aviation"] - co2 = n.loads.loc[co2_release, "p_set"].sum() * costs.at["oil", 'CO2 intensity'] - industrial_demand.loc[nodes, "process emission from feedstock"].sum() / 8760 + co2 = ( + n.loads.loc[co2_release, "p_set"].sum() * costs.at["oil", "CO2 intensity"] + - industrial_demand.loc[nodes, "process emission from feedstock"].sum() / 8760 + ) - n.add("Load", + n.add( + "Load", "oil emissions", bus="co2 atmosphere", carrier="oil emissions", - p_set=-co2 + p_set=-co2, ) # TODO simplify bus expression - n.madd("Load", + n.madd( + "Load", nodes, suffix=" low-temperature heat for industry", - bus=[node + " urban central heat" if node + " urban central heat" in n.buses.index else node + " services urban decentral heat" for node in nodes], + bus=[ + node + " urban central heat" + if node + " urban central heat" in n.buses.index + else node + " services urban decentral heat" + for node in nodes + ], carrier="low-temperature heat for industry", - p_set=industrial_demand.loc[nodes, "low-temperature heat"] / 8760 + p_set=industrial_demand.loc[nodes, "low-temperature heat"] / 8760, ) # remove today's industrial electricity demand by scaling down total electricity demand for ct in n.buses.country.dropna().unique(): # TODO map onto n.bus.country - loads_i = n.loads.index[(n.loads.index.str[:2] == ct) & (n.loads.carrier == "electricity")] - if n.loads_t.p_set[loads_i].empty: continue - factor = 1 - industrial_demand.loc[loads_i, "current electricity"].sum() / n.loads_t.p_set[loads_i].sum().sum() + loads_i = n.loads.index[ + (n.loads.index.str[:2] == ct) & (n.loads.carrier == "electricity") + ] + if n.loads_t.p_set[loads_i].empty: + continue + factor = ( + 1 + - industrial_demand.loc[loads_i, "current electricity"].sum() + / n.loads_t.p_set[loads_i].sum().sum() + ) n.loads_t.p_set[loads_i] *= factor - n.madd("Load", + n.madd( + "Load", nodes, suffix=" industry electricity", bus=nodes, carrier="industry electricity", - p_set=industrial_demand.loc[nodes, "electricity"] / 8760 + p_set=industrial_demand.loc[nodes, "electricity"] / 8760, ) - n.madd("Bus", + n.madd( + "Bus", spatial.co2.process_emissions, location=spatial.co2.locations, carrier="process emissions", - unit="t_co2" + unit="t_co2", ) sel = ["process emission", "process emission from feedstock"] if options["co2_spatial"] or options["co2network"]: - p_set = -industrial_demand.loc[nodes, sel].sum(axis=1).rename(index=lambda x: x + " process emissions") / 8760 + p_set = ( + -industrial_demand.loc[nodes, sel] + .sum(axis=1) + .rename(index=lambda x: x + " process emissions") + / 8760 + ) else: p_set = -industrial_demand.loc[nodes, sel].sum(axis=1).sum() / 8760 # this should be process emissions fossil+feedstock # then need load on atmosphere for feedstock emissions that are currently going to atmosphere via Link Fischer-Tropsch demand - n.madd("Load", + n.madd( + "Load", spatial.co2.process_emissions, bus=spatial.co2.process_emissions, carrier="process emissions", p_set=p_set, ) - n.madd("Link", + n.madd( + "Link", spatial.co2.process_emissions, bus0=spatial.co2.process_emissions, bus1="co2 atmosphere", carrier="process emissions", p_nom_extendable=True, - efficiency=1. + efficiency=1.0, ) - #assume enough local waste heat for CC - n.madd("Link", + # assume enough local waste heat for CC + n.madd( + "Link", spatial.co2.locations, suffix=" process emissions CC", bus0=spatial.co2.process_emissions, @@ -2488,21 +2824,26 @@ def add_industry(n, costs): capital_cost=costs.at["cement capture", "fixed"], efficiency=1 - costs.at["cement capture", "capture_rate"], efficiency2=costs.at["cement capture", "capture_rate"], - lifetime=costs.at['cement capture', 'lifetime'] + lifetime=costs.at["cement capture", "lifetime"], ) if options.get("ammonia"): - - if options["ammonia"] == 'regional': - p_set = industrial_demand.loc[spatial.ammonia.locations, "ammonia"].rename(index=lambda x: x + " NH3") / 8760 + if options["ammonia"] == "regional": + p_set = ( + industrial_demand.loc[spatial.ammonia.locations, "ammonia"].rename( + index=lambda x: x + " NH3" + ) + / 8760 + ) else: p_set = industrial_demand["ammonia"].sum() / 8760 - n.madd("Load", + n.madd( + "Load", spatial.ammonia.nodes, bus=spatial.ammonia.nodes, carrier="NH3", - p_set=p_set + p_set=p_set, ) @@ -2511,146 +2852,194 @@ def add_waste_heat(n): logger.info("Add possibility to use industrial waste heat in district heating") - #AC buses with district heating + # AC buses with district heating urban_central = n.buses.index[n.buses.carrier == "urban central heat"] if not urban_central.empty: - urban_central = urban_central.str[:-len(" urban central heat")] + urban_central = urban_central.str[: -len(" urban central heat")] # TODO what is the 0.95 and should it be a config option? - if options['use_fischer_tropsch_waste_heat']: - n.links.loc[urban_central + " Fischer-Tropsch", "bus3"] = urban_central + " urban central heat" - n.links.loc[urban_central + " Fischer-Tropsch", "efficiency3"] = 0.95 - n.links.loc[urban_central + " Fischer-Tropsch", "efficiency"] + if options["use_fischer_tropsch_waste_heat"]: + n.links.loc[urban_central + " Fischer-Tropsch", "bus3"] = ( + urban_central + " urban central heat" + ) + n.links.loc[urban_central + " Fischer-Tropsch", "efficiency3"] = ( + 0.95 - n.links.loc[urban_central + " Fischer-Tropsch", "efficiency"] + ) # TODO integrate usable waste heat efficiency into technology-data from DEA - if options.get('use_electrolysis_waste_heat', False): - n.links.loc[urban_central + " H2 Electrolysis", "bus2"] = urban_central + " urban central heat" - n.links.loc[urban_central + " H2 Electrolysis", "efficiency2"] = 0.84 - n.links.loc[urban_central + " H2 Electrolysis", "efficiency"] + if options.get("use_electrolysis_waste_heat", False): + n.links.loc[urban_central + " H2 Electrolysis", "bus2"] = ( + urban_central + " urban central heat" + ) + n.links.loc[urban_central + " H2 Electrolysis", "efficiency2"] = ( + 0.84 - n.links.loc[urban_central + " H2 Electrolysis", "efficiency"] + ) - if options['use_fuel_cell_waste_heat']: - n.links.loc[urban_central + " H2 Fuel Cell", "bus2"] = urban_central + " urban central heat" - n.links.loc[urban_central + " H2 Fuel Cell", "efficiency2"] = 0.95 - n.links.loc[urban_central + " H2 Fuel Cell", "efficiency"] + if options["use_fuel_cell_waste_heat"]: + n.links.loc[urban_central + " H2 Fuel Cell", "bus2"] = ( + urban_central + " urban central heat" + ) + n.links.loc[urban_central + " H2 Fuel Cell", "efficiency2"] = ( + 0.95 - n.links.loc[urban_central + " H2 Fuel Cell", "efficiency"] + ) def add_agriculture(n, costs): - - logger.info('Add agriculture, forestry and fishing sector.') + logger.info("Add agriculture, forestry and fishing sector.") nodes = pop_layout.index # electricity - n.madd("Load", + n.madd( + "Load", nodes, suffix=" agriculture electricity", bus=nodes, - carrier='agriculture electricity', - p_set=pop_weighted_energy_totals.loc[nodes, "total agriculture electricity"] * 1e6 / 8760 + carrier="agriculture electricity", + p_set=pop_weighted_energy_totals.loc[nodes, "total agriculture electricity"] + * 1e6 + / 8760, ) # heat - n.madd("Load", + n.madd( + "Load", nodes, suffix=" agriculture heat", bus=nodes + " services rural heat", carrier="agriculture heat", - p_set=pop_weighted_energy_totals.loc[nodes, "total agriculture heat"] * 1e6 / 8760 + p_set=pop_weighted_energy_totals.loc[nodes, "total agriculture heat"] + * 1e6 + / 8760, ) # machinery - electric_share = get(options["agriculture_machinery_electric_share"], investment_year) + electric_share = get( + options["agriculture_machinery_electric_share"], investment_year + ) oil_share = get(options["agriculture_machinery_oil_share"], investment_year) total_share = electric_share + oil_share if total_share != 1: - logger.warning(f"Total agriculture machinery shares sum up to {total_share:.2%}, corresponding to increased or decreased demand assumptions.") + logger.warning( + f"Total agriculture machinery shares sum up to {total_share:.2%}, corresponding to increased or decreased demand assumptions." + ) - machinery_nodal_energy = pop_weighted_energy_totals.loc[nodes, "total agriculture machinery"] + machinery_nodal_energy = pop_weighted_energy_totals.loc[ + nodes, "total agriculture machinery" + ] if electric_share > 0: + efficiency_gain = ( + options["agriculture_machinery_fuel_efficiency"] + / options["agriculture_machinery_electric_efficiency"] + ) - efficiency_gain = options["agriculture_machinery_fuel_efficiency"] / options["agriculture_machinery_electric_efficiency"] - - n.madd("Load", + n.madd( + "Load", nodes, suffix=" agriculture machinery electric", bus=nodes, carrier="agriculture machinery electric", - p_set=electric_share / efficiency_gain * machinery_nodal_energy * 1e6 / 8760, + p_set=electric_share + / efficiency_gain + * machinery_nodal_energy + * 1e6 + / 8760, ) if oil_share > 0: - - n.madd("Load", + n.madd( + "Load", ["agriculture machinery oil"], bus=spatial.oil.nodes, carrier="agriculture machinery oil", - p_set=oil_share * machinery_nodal_energy.sum() * 1e6 / 8760 + p_set=oil_share * machinery_nodal_energy.sum() * 1e6 / 8760, ) - co2 = oil_share * machinery_nodal_energy.sum() * 1e6 / 8760 * costs.at["oil", 'CO2 intensity'] + co2 = ( + oil_share + * machinery_nodal_energy.sum() + * 1e6 + / 8760 + * costs.at["oil", "CO2 intensity"] + ) - n.add("Load", + n.add( + "Load", "agriculture machinery oil emissions", bus="co2 atmosphere", carrier="agriculture machinery oil emissions", - p_set=-co2 + p_set=-co2, ) def decentral(n): - """Removes the electricity transmission system.""" + """ + Removes the electricity transmission system. + """ n.lines.drop(n.lines.index, inplace=True) n.links.drop(n.links.index[n.links.carrier.isin(["DC", "B2B"])], inplace=True) def remove_h2_network(n): - - n.links.drop(n.links.index[n.links.carrier.str.contains("H2 pipeline")], inplace=True) + n.links.drop( + n.links.index[n.links.carrier.str.contains("H2 pipeline")], inplace=True + ) if "EU H2 Store" in n.stores.index: n.stores.drop("EU H2 Store", inplace=True) def maybe_adjust_costs_and_potentials(n, opts): - for o in opts: - if "+" not in o: continue + if "+" not in o: + continue oo = o.split("+") - carrier_list = np.hstack((n.generators.carrier.unique(), n.links.carrier.unique(), - n.stores.carrier.unique(), n.storage_units.carrier.unique())) + carrier_list = np.hstack( + ( + n.generators.carrier.unique(), + n.links.carrier.unique(), + n.stores.carrier.unique(), + n.storage_units.carrier.unique(), + ) + ) suptechs = map(lambda c: c.split("-", 2)[0], carrier_list) if oo[0].startswith(tuple(suptechs)): carrier = oo[0] attr_lookup = {"p": "p_nom_max", "e": "e_nom_max", "c": "capital_cost"} attr = attr_lookup[oo[1][0]] factor = float(oo[1][1:]) - #beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan + # beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan if carrier == "AC": # lines do not have carrier n.lines[attr] *= factor else: - if attr == 'p_nom_max': + if attr == "p_nom_max": comps = {"Generator", "Link", "StorageUnit"} - elif attr == 'e_nom_max': + elif attr == "e_nom_max": comps = {"Store"} else: comps = {"Generator", "Link", "StorageUnit", "Store"} for c in n.iterate_components(comps): - if carrier=='solar': - sel = c.df.carrier.str.contains(carrier) & ~c.df.carrier.str.contains("solar rooftop") + if carrier == "solar": + sel = c.df.carrier.str.contains( + carrier + ) & ~c.df.carrier.str.contains("solar rooftop") else: sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel,attr] *= factor + c.df.loc[sel, attr] *= factor logger.info(f"changing {attr} for {carrier} by factor {factor}") # TODO this should rather be a config no wildcard def limit_individual_line_extension(n, maxext): logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW") - n.lines['s_nom_max'] = n.lines['s_nom'] + maxext - hvdc = n.links.index[n.links.carrier == 'DC'] - n.links.loc[hvdc, 'p_nom_max'] = n.links.loc[hvdc, 'p_nom'] + maxext + n.lines["s_nom_max"] = n.lines["s_nom"] + maxext + hvdc = n.links.index[n.links.carrier == "DC"] + n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + maxext aggregate_dict = { @@ -2663,9 +3052,9 @@ aggregate_dict = { "s_nom_max": "sum", "p_nom_min": "sum", "s_nom_min": "sum", - 'v_ang_min': "max", - "v_ang_max":"min", - "terrain_factor":"mean", + "v_ang_min": "max", + "v_ang_max": "min", + "terrain_factor": "mean", "num_parallel": "sum", "p_set": "sum", "e_initial": "sum", @@ -2677,11 +3066,14 @@ aggregate_dict = { "inflow": "sum", "p_max_pu": "first", "x": "mean", - "y": "mean" + "y": "mean", } + def cluster_heat_buses(n): - """Cluster residential and service heat buses to one representative bus. + """ + Cluster residential and service heat buses to one representative bus. + This can be done to save memory and speed up optimisation """ @@ -2711,15 +3103,16 @@ def cluster_heat_buses(n): for c in n.iterate_components(components): df = c.df - cols = df.columns[df.columns.str.contains("bus") | (df.columns=="carrier")] + cols = df.columns[df.columns.str.contains("bus") | (df.columns == "carrier")] # rename columns and index - df[cols] = (df[cols] - .apply(lambda x: x.str.replace("residential ","") - .str.replace("services ", ""), axis=1)) - df = df.rename(index=lambda x: x.replace("residential ","") - .replace("services ", "")) - + df[cols] = df[cols].apply( + lambda x: x.str.replace("residential ", "").str.replace("services ", ""), + axis=1, + ) + df = df.rename( + index=lambda x: x.replace("residential ", "").replace("services ", "") + ) # cluster heat nodes # static dataframe @@ -2729,13 +3122,13 @@ def cluster_heat_buses(n): pnl = c.pnl agg = define_clustering(pd.Index(pnl.keys()), aggregate_dict) for k in pnl.keys(): - pnl[k].rename(columns=lambda x: x.replace("residential ","") - .replace("services ", ""), inplace=True) - pnl[k] = ( - pnl[k] - .groupby(level=0, axis=1) - .agg(agg[k], **agg_group_kwargs) + pnl[k].rename( + columns=lambda x: x.replace("residential ", "").replace( + "services ", "" + ), + inplace=True, ) + pnl[k] = pnl[k].groupby(level=0, axis=1).agg(agg[k], **agg_group_kwargs) # remove unclustered assets of service/residential to_drop = c.df.index.difference(df.index) @@ -2745,9 +3138,11 @@ def cluster_heat_buses(n): import_components_from_dataframe(n, df.loc[to_add], c.name) -def apply_time_segmentation(n, segments, solver_name="cbc", - overwrite_time_dependent=True): - """Aggregating time series to segments with different lengths +def apply_time_segmentation( + n, segments, solver_name="cbc", overwrite_time_dependent=True +): + """ + Aggregating time series to segments with different lengths. Input: n: pypsa Network @@ -2760,36 +3155,43 @@ def apply_time_segmentation(n, segments, solver_name="cbc", try: import tsam.timeseriesaggregation as tsam except: - raise ModuleNotFoundError("Optional dependency 'tsam' not found." - "Install via 'pip install tsam'") + raise ModuleNotFoundError( + "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" + ) # get all time-dependent data - columns = pd.MultiIndex.from_tuples([],names=['component', 'key', 'asset']) - raw = pd.DataFrame(index=n.snapshots,columns=columns) + columns = pd.MultiIndex.from_tuples([], names=["component", "key", "asset"]) + raw = pd.DataFrame(index=n.snapshots, columns=columns) for c in n.iterate_components(): for attr, pnl in c.pnl.items(): # exclude e_min_pu which is used for SOC of EVs in the morning - if not pnl.empty and attr != 'e_min_pu': + if not pnl.empty and attr != "e_min_pu": df = pnl.copy() df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns]) raw = pd.concat([raw, df], axis=1) # normalise all time-dependent data - annual_max = raw.max().replace(0,1) + annual_max = raw.max().replace(0, 1) raw = raw.div(annual_max, level=0) # get representative segments - agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw), - noTypicalPeriods=1, noSegments=int(segments), - segmentation=True, solver=solver_name) + agg = tsam.TimeSeriesAggregation( + raw, + hoursPerPeriod=len(raw), + noTypicalPeriods=1, + noSegments=int(segments), + segmentation=True, + solver=solver_name, + ) segmented = agg.createTypicalPeriods() - weightings = segmented.index.get_level_values("Segment Duration") offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0) timesteps = [raw.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets] - snapshots = pd.DatetimeIndex(timesteps) - sn_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64") + snapshots = pd.DatetimeIndex(timesteps) + sn_weightings = pd.Series( + weightings, index=snapshots, name="weightings", dtype="float64" + ) n.set_snapshots(sn_weightings.index) n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0) @@ -2802,8 +3204,11 @@ def apply_time_segmentation(n, segments, solver_name="cbc", return n + def set_temporal_aggregation(n, opts, solver_name): - """Aggregate network temporally.""" + """ + Aggregate network temporally. + """ for o in opts: # temporal averaging m = re.match(r"^\d+h$", o, re.IGNORECASE) @@ -2827,27 +3232,29 @@ def set_temporal_aggregation(n, opts, solver_name): break return n -#%% + +# %% if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'prepare_sector_network', - simpl='', + "prepare_sector_network", + simpl="", opts="", clusters="37", lv=1.5, - sector_opts='cb40ex0-365H-T-H-B-I-A-solar+p3-dist1', + sector_opts="cb40ex0-365H-T-H-B-I-A-solar+p3-dist1", planning_horizons="2020", ) - logging.basicConfig(level=snakemake.config['logging_level']) + logging.basicConfig(level=snakemake.config["logging_level"]) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) options = snakemake.config["sector"] - opts = snakemake.wildcards.sector_opts.split('-') + opts = snakemake.wildcards.sector_opts.split("-") investment_year = int(snakemake.wildcards.planning_horizons[-4:]) @@ -2857,23 +3264,26 @@ if __name__ == "__main__": pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) Nyears = n.snapshot_weightings.generators.sum() / 8760 - costs = prepare_costs(snakemake.input.costs, - snakemake.config['costs']['USD2013_to_EUR2013'], - snakemake.config['costs']['discountrate'], - Nyears, - snakemake.config['costs']['lifetime']) + costs = prepare_costs( + snakemake.input.costs, + snakemake.config["costs"]["USD2013_to_EUR2013"], + snakemake.config["costs"]["discountrate"], + Nyears, + snakemake.config["costs"]["lifetime"], + ) - pop_weighted_energy_totals = pd.read_csv(snakemake.input.pop_weighted_energy_totals, index_col=0) + pop_weighted_energy_totals = pd.read_csv( + snakemake.input.pop_weighted_energy_totals, index_col=0 + ) patch_electricity_network(n) spatial = define_spatial(pop_layout.index, options) - if snakemake.config["foresight"] == 'myopic': - + if snakemake.config["foresight"] == "myopic": add_lifetime_wind_solar(n, costs) - conventional = snakemake.config['existing_capacities']['conventional_carriers'] + conventional = snakemake.config["existing_capacities"]["conventional_carriers"] for carrier in conventional: add_carrier_buses(n, carrier) @@ -2887,11 +3297,15 @@ if __name__ == "__main__": for o in opts: if o[:4] == "wave": wave_cost_factor = float(o[4:].replace("p", ".").replace("m", "-")) - logger.info(f"Including wave generators with cost factor of {wave_cost_factor}") + logger.info( + f"Including wave generators with cost factor of {wave_cost_factor}" + ) add_wave(n, wave_cost_factor) if o[:4] == "dist": - options['electricity_distribution_grid'] = True - options['electricity_distribution_grid_cost_factor'] = float(o[4:].replace("p", ".").replace("m", "-")) + options["electricity_distribution_grid"] = True + options["electricity_distribution_grid_cost_factor"] = float( + o[4:].replace("p", ".").replace("m", "-") + ) if o == "biomasstransport": options["biomass_transport"] = True @@ -2907,7 +3321,7 @@ if __name__ == "__main__": if "B" in opts: add_biomass(n, costs) - if options['ammonia']: + if options["ammonia"]: add_ammonia(n, costs) if "I" in opts: @@ -2919,7 +3333,7 @@ if __name__ == "__main__": if "A" in opts: # requires H and I add_agriculture(n, costs) - if options['dac']: + if options["dac"]: add_dac(n, costs) if "decentral" in opts: @@ -2940,49 +3354,58 @@ if __name__ == "__main__": limit_type = "config" limit = get(snakemake.config["co2_budget"], investment_year) for o in opts: - if not "cb" in o: continue + if not "cb" in o: + continue limit_type = "carbon budget" - fn = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/carbon_budget_distribution.csv' + fn = ( + snakemake.config["results_dir"] + + snakemake.config["run"] + + "/csvs/carbon_budget_distribution.csv" + ) if not os.path.exists(fn): emissions_scope = snakemake.config["energy"]["emissions"] report_year = snakemake.config["energy"]["eurostat_report_year"] - build_carbon_budget(o, snakemake.input.eurostat, fn, emissions_scope, report_year) + build_carbon_budget( + o, snakemake.input.eurostat, fn, emissions_scope, report_year + ) co2_cap = pd.read_csv(fn, index_col=0).squeeze() limit = co2_cap.loc[investment_year] break for o in opts: - if not "Co2L" in o: continue + if not "Co2L" in o: + continue limit_type = "wildcard" - limit = o[o.find("Co2L")+4:] + limit = o[o.find("Co2L") + 4 :] limit = float(limit.replace("p", ".").replace("m", "-")) break logger.info(f"Add CO2 limit from {limit_type}") add_co2limit(n, Nyears, limit) for o in opts: - if not o[:10] == 'linemaxext': continue + if not o[:10] == "linemaxext": + continue maxext = float(o[10:]) * 1e3 limit_individual_line_extension(n, maxext) break - if options['electricity_distribution_grid']: + if options["electricity_distribution_grid"]: insert_electricity_distribution_grid(n, costs) maybe_adjust_costs_and_potentials(n, opts) - if options['gas_distribution_grid']: + if options["gas_distribution_grid"]: insert_gas_distribution_costs(n, costs) - if options['electricity_grid_connection']: + if options["electricity_grid_connection"]: add_electricity_grid_connection(n, costs) - first_year_myopic = ((snakemake.config["foresight"] == 'myopic') and - (snakemake.config["scenario"]["planning_horizons"][0]==investment_year)) + first_year_myopic = (snakemake.config["foresight"] == "myopic") and ( + snakemake.config["scenario"]["planning_horizons"][0] == investment_year + ) if options.get("cluster_heat_buses", False) and not first_year_myopic: cluster_heat_buses(n) - n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/retrieve_gas_infrastructure_data.py b/scripts/retrieve_gas_infrastructure_data.py index 4bae3e29..bdb9509f 100644 --- a/scripts/retrieve_gas_infrastructure_data.py +++ b/scripts/retrieve_gas_infrastructure_data.py @@ -1,23 +1,26 @@ +# -*- coding: utf-8 -*- """ -Retrieve gas infrastructure data from https://zenodo.org/record/4767098/files/IGGIELGN.zip +Retrieve gas infrastructure data from +https://zenodo.org/record/4767098/files/IGGIELGN.zip. """ import logging -from helper import progress_retrieve - import zipfile from pathlib import Path +from helper import progress_retrieve + logger = logging.getLogger(__name__) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake - snakemake = mock_snakemake('retrieve_gas_network_data') - rootpath = '..' + + snakemake = mock_snakemake("retrieve_gas_network_data") + rootpath = ".." else: - rootpath = '.' + rootpath = "." url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip" diff --git a/scripts/retrieve_sector_databundle.py b/scripts/retrieve_sector_databundle.py index 9fba27ea..ef3e79e9 100644 --- a/scripts/retrieve_sector_databundle.py +++ b/scripts/retrieve_sector_databundle.py @@ -1,8 +1,10 @@ +# -*- coding: utf-8 -*- """ Retrieve and extract sector data bundle. """ import logging + logger = logging.getLogger(__name__) import os @@ -13,8 +15,7 @@ from pathlib import Path # Add pypsa-eur scripts to path for import of _helpers sys.path.insert(0, os.getcwd() + "/../pypsa-eur/scripts") -from _helpers import progress_retrieve, configure_logging - +from _helpers import configure_logging, progress_retrieve if __name__ == "__main__": configure_logging(snakemake) @@ -32,4 +33,4 @@ if __name__ == "__main__": tarball_fn.unlink() - logger.info(f"Databundle available in '{to_fn}'.") \ No newline at end of file + logger.info(f"Databundle available in '{to_fn}'.") diff --git a/scripts/solve_network.py b/scripts/solve_network.py index ddac9196..41725012 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -1,40 +1,51 @@ -"""Solve network.""" - -import pypsa -import numpy as np - -from vresutils.benchmark import memory_logger -from helper import override_component_attrs, update_config_with_sector_opts +# -*- coding: utf-8 -*- +""" +Solve network. +""" import logging + +import numpy as np +import pypsa +from helper import override_component_attrs, update_config_with_sector_opts +from vresutils.benchmark import memory_logger + logger = logging.getLogger(__name__) pypsa.pf.logger.setLevel(logging.WARNING) def add_land_use_constraint(n): - - if 'm' in snakemake.wildcards.clusters: + if "m" in snakemake.wildcards.clusters: _add_land_use_constraint_m(n) else: _add_land_use_constraint(n) def _add_land_use_constraint(n): - #warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' + # warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' - for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']: - ext_i = (n.generators.carrier==carrier) & ~n.generators.p_nom_extendable - existing = n.generators.loc[ext_i,"p_nom"].groupby(n.generators.bus.map(n.buses.location)).sum() + for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]: + ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable + existing = ( + n.generators.loc[ext_i, "p_nom"] + .groupby(n.generators.bus.map(n.buses.location)) + .sum() + ) existing.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons - n.generators.loc[existing.index,"p_nom_max"] -= existing + n.generators.loc[existing.index, "p_nom_max"] -= existing # check if existing capacities are larger than technical potential - existing_large = n.generators[n.generators["p_nom_min"] > n.generators["p_nom_max"]].index + existing_large = n.generators[ + n.generators["p_nom_min"] > n.generators["p_nom_max"] + ].index if len(existing_large): - logger.warning(f"Existing capacities larger than technical potential for {existing_large},\ - adjust technical potential to existing capacities") - n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[existing_large, "p_nom_min"] - + logger.warning( + f"Existing capacities larger than technical potential for {existing_large},\ + adjust technical potential to existing capacities" + ) + n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[ + existing_large, "p_nom_min" + ] n.generators.p_nom_max.clip(lower=0, inplace=True) @@ -46,80 +57,109 @@ def _add_land_use_constraint_m(n): grouping_years = snakemake.config["existing_capacities"]["grouping_years"] current_horizon = snakemake.wildcards.planning_horizons - for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']: - - existing = n.generators.loc[n.generators.carrier==carrier,"p_nom"] - ind = list(set([i.split(sep=" ")[0] + ' ' + i.split(sep=" ")[1] for i in existing.index])) + for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]: + existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"] + ind = list( + set( + [ + i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] + for i in existing.index + ] + ) + ) previous_years = [ - str(y) for y in - planning_horizons + grouping_years + str(y) + for y in planning_horizons + grouping_years if y < int(snakemake.wildcards.planning_horizons) ] for p_year in previous_years: - ind2 = [i for i in ind if i + " " + carrier + "-" + p_year in existing.index] + ind2 = [ + i for i in ind if i + " " + carrier + "-" + p_year in existing.index + ] sel_current = [i + " " + carrier + "-" + current_horizon for i in ind2] sel_p_year = [i + " " + carrier + "-" + p_year for i in ind2] - n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[sel_p_year].rename(lambda x: x[:-4] + current_horizon) + n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[ + sel_p_year + ].rename(lambda x: x[:-4] + current_horizon) n.generators.p_nom_max.clip(lower=0, inplace=True) def add_co2_sequestration_limit(n, limit=200): - """Add a global constraint on the amount of Mt CO2 that can be sequestered.""" + """ + Add a global constraint on the amount of Mt CO2 that can be sequestered. + """ n.carriers.loc["co2 stored", "co2_absorptions"] = -1 n.carriers.co2_absorptions = n.carriers.co2_absorptions.fillna(0) limit = limit * 1e6 for o in opts: - if not "seq" in o: continue - limit = float(o[o.find("seq")+3:]) * 1e6 + if not "seq" in o: + continue + limit = float(o[o.find("seq") + 3 :]) * 1e6 break - n.add("GlobalConstraint", 'co2_sequestration_limit', sense="<=", constant=limit, - type="primary_energy", carrier_attribute="co2_absorptions") + n.add( + "GlobalConstraint", + "co2_sequestration_limit", + sense="<=", + constant=limit, + type="primary_energy", + carrier_attribute="co2_absorptions", + ) def prepare_network(n, solve_opts=None, config=None): + if "clip_p_max_pu" in solve_opts: + for df in ( + n.generators_t.p_max_pu, + n.generators_t.p_min_pu, + n.storage_units_t.inflow, + ): + df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True) - if 'clip_p_max_pu' in solve_opts: - for df in (n.generators_t.p_max_pu, n.generators_t.p_min_pu, n.storage_units_t.inflow): - df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True) - - if solve_opts.get('load_shedding'): + if solve_opts.get("load_shedding"): # intersect between macroeconomic and surveybased willingness to pay # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full n.add("Carrier", "Load") - n.madd("Generator", n.buses.index, " load", - bus=n.buses.index, - carrier='load', - sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW - marginal_cost=1e2, # Eur/kWh - p_nom=1e9 # kW + n.madd( + "Generator", + n.buses.index, + " load", + bus=n.buses.index, + carrier="load", + sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW + marginal_cost=1e2, # Eur/kWh + p_nom=1e9, # kW ) - if solve_opts.get('noisy_costs'): + if solve_opts.get("noisy_costs"): for t in n.iterate_components(): - #if 'capital_cost' in t.df: + # if 'capital_cost' in t.df: # t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5) - if 'marginal_cost' in t.df: + if "marginal_cost" in t.df: np.random.seed(174) - t.df['marginal_cost'] += 1e-2 + 2e-3 * (np.random.random(len(t.df)) - 0.5) + t.df["marginal_cost"] += 1e-2 + 2e-3 * ( + np.random.random(len(t.df)) - 0.5 + ) - for t in n.iterate_components(['Line', 'Link']): + for t in n.iterate_components(["Line", "Link"]): np.random.seed(123) - t.df['capital_cost'] += (1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)) * t.df['length'] + t.df["capital_cost"] += ( + 1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5) + ) * t.df["length"] - if solve_opts.get('nhours'): - nhours = solve_opts['nhours'] + if solve_opts.get("nhours"): + nhours = solve_opts["nhours"] n.set_snapshots(n.snapshots[:nhours]) - n.snapshot_weightings[:] = 8760./nhours + n.snapshot_weightings[:] = 8760.0 / nhours - if snakemake.config['foresight'] == 'myopic': + if snakemake.config["foresight"] == "myopic": add_land_use_constraint(n) - if n.stores.carrier.eq('co2 stored').any(): + if n.stores.carrier.eq("co2 stored").any(): limit = config["sector"].get("co2_sequestration_potential", 200) add_co2_sequestration_limit(n, limit=limit) @@ -134,23 +174,29 @@ def add_battery_constraints(n): discharger_bool = n.links.index.str.contains("battery discharger") charger_bool = n.links.index.str.contains("battery charger") - dischargers_ext= n.links[discharger_bool].query("p_nom_extendable").index - chargers_ext= n.links[charger_bool].query("p_nom_extendable").index + dischargers_ext = n.links[discharger_bool].query("p_nom_extendable").index + chargers_ext = n.links[charger_bool].query("p_nom_extendable").index eff = n.links.efficiency[dischargers_ext].values - lhs = n.model["Link-p_nom"].loc[chargers_ext] - n.model["Link-p_nom"].loc[dischargers_ext] * eff + lhs = ( + n.model["Link-p_nom"].loc[chargers_ext] + - n.model["Link-p_nom"].loc[dischargers_ext] * eff + ) n.model.add_constraints(lhs == 0, name="Link-charger_ratio") def add_chp_constraints(n): - - electric = (n.links.index.str.contains("urban central") - & n.links.index.str.contains("CHP") - & n.links.index.str.contains("electric")) - heat = (n.links.index.str.contains("urban central") - & n.links.index.str.contains("CHP") - & n.links.index.str.contains("heat")) + electric = ( + n.links.index.str.contains("urban central") + & n.links.index.str.contains("CHP") + & n.links.index.str.contains("electric") + ) + heat = ( + n.links.index.str.contains("urban central") + & n.links.index.str.contains("CHP") + & n.links.index.str.contains("heat") + ) electric_ext = n.links[electric].query("p_nom_extendable").index heat_ext = n.links[heat].query("p_nom_extendable").index @@ -158,38 +204,50 @@ def add_chp_constraints(n): electric_fix = n.links[electric].query("~p_nom_extendable").index heat_fix = n.links[heat].query("~p_nom_extendable").index - p = n.model["Link-p"] # dimension: [time, link] + p = n.model["Link-p"] # dimension: [time, link] # output ratio between heat and electricity and top_iso_fuel_line for extendable if not electric_ext.empty: p_nom = n.model["Link-p_nom"] - lhs = (p_nom.loc[electric_ext] * (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values - - p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values) - n.model.add_constraints(lhs == 0, name='chplink-fix_p_nom_ratio') + lhs = ( + p_nom.loc[electric_ext] + * (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values + - p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values + ) + n.model.add_constraints(lhs == 0, name="chplink-fix_p_nom_ratio") rename = {"Link-ext": "Link"} - lhs = p.loc[:, electric_ext] + p.loc[:, heat_ext] - p_nom.rename(rename).loc[electric_ext] - n.model.add_constraints(lhs <= 0, name='chplink-top_iso_fuel_line_ext') - + lhs = ( + p.loc[:, electric_ext] + + p.loc[:, heat_ext] + - p_nom.rename(rename).loc[electric_ext] + ) + n.model.add_constraints(lhs <= 0, name="chplink-top_iso_fuel_line_ext") # top_iso_fuel_line for fixed if not electric_fix.empty: lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix] rhs = n.links.p_nom[electric_fix] - n.model.add_constraints(lhs <= rhs, name='chplink-top_iso_fuel_line_fix') + n.model.add_constraints(lhs <= rhs, name="chplink-top_iso_fuel_line_fix") # back-pressure if not electric.empty: - lhs = (p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values) - - p.loc[:, electric] * n.links.efficiency[electric]) - n.model.add_constraints(lhs <= rhs, name='chplink-backpressure') + lhs = ( + p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values) + - p.loc[:, electric] * n.links.efficiency[electric] + ) + n.model.add_constraints(lhs <= rhs, name="chplink-backpressure") def add_pipe_retrofit_constraint(n): - """Add constraint for retrofitting existing CH4 pipelines to H2 pipelines.""" + """ + Add constraint for retrofitting existing CH4 pipelines to H2 pipelines. + """ gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index - h2_retrofitted_i = n.links.query("carrier == 'H2 pipeline retrofitted' and p_nom_extendable").index + h2_retrofitted_i = n.links.query( + "carrier == 'H2 pipeline retrofitted' and p_nom_extendable" + ).index if h2_retrofitted_i.empty or gas_pipes_i.empty: return @@ -200,7 +258,7 @@ def add_pipe_retrofit_constraint(n): lhs = p_nom.loc[gas_pipes_i] + CH4_per_H2 * p_nom.loc[h2_retrofitted_i] rhs = n.links.p_nom[gas_pipes_i].rename_axis("Link-ext") - n.model.add_constraints(lhs == rhs, name='Link-pipe_retrofit') + n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit") def extra_functionality(n, snapshots): @@ -209,9 +267,11 @@ def extra_functionality(n, snapshots): def solve_network(n, config, opts="", **kwargs): - set_of_options = config['solving']['solver']['options'] - solver_options = config['solving']["solver_options"][set_of_options] if set_of_options else {} - solver_name = config['solving']['solver']['name'] + set_of_options = config["solving"]["solver"]["options"] + solver_options = ( + config["solving"]["solver_options"][set_of_options] if set_of_options else {} + ) + solver_name = config["solving"]["solver"]["name"] cf_solving = config["solving"]["options"] track_iterations = cf_solving.get("track_iterations", False) min_iterations = cf_solving.get("min_iterations", 4) @@ -245,46 +305,52 @@ def solve_network(n, config, opts="", **kwargs): ) if status != "ok": - logger.warning(f"Solving status '{status}' with termination condition '{condition}'") + logger.warning( + f"Solving status '{status}' with termination condition '{condition}'" + ) return n -#%% +# %% if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from helper import mock_snakemake + snakemake = mock_snakemake( - 'solve_network_myopic', - simpl='', + "solve_network_myopic", + simpl="", opts="", clusters="45", lv=1.0, - sector_opts='8760H-T-H-B-I-A-solar+p3-dist1', + sector_opts="8760H-T-H-B-I-A-solar+p3-dist1", planning_horizons="2020", ) - logging.basicConfig(filename=snakemake.log.python, - level=snakemake.config['logging_level']) + logging.basicConfig( + filename=snakemake.log.python, level=snakemake.config["logging_level"] + ) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) - tmpdir = snakemake.config['solving'].get('tmpdir') + tmpdir = snakemake.config["solving"].get("tmpdir") if tmpdir is not None: from pathlib import Path + Path(tmpdir).mkdir(parents=True, exist_ok=True) - opts = snakemake.wildcards.sector_opts.split('-') - solve_opts = snakemake.config['solving']['options'] - - fn = getattr(snakemake.log, 'memory', None) - with memory_logger(filename=fn, interval=30.) as mem: + opts = snakemake.wildcards.sector_opts.split("-") + solve_opts = snakemake.config["solving"]["options"] + fn = getattr(snakemake.log, "memory", None) + with memory_logger(filename=fn, interval=30.0) as mem: overrides = override_component_attrs(snakemake.input.overrides) n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) n = prepare_network(n, solve_opts, config=snakemake.config) - n = solve_network(n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver) + n = solve_network( + n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver + ) if "lv_limit" in n.global_constraints.index: n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"] diff --git a/test/config.myopic.yaml b/test/config.myopic.yaml index 40daced7..34ad488b 100644 --- a/test/config.myopic.yaml +++ b/test/config.myopic.yaml @@ -3,15 +3,15 @@ foresight: myopic scenario: lv: - - 1.5 + - 1.5 clusters: - - 5 + - 5 sector_opts: - - 191H-T-H-B-I-A-solar+p3-dist1 + - 191H-T-H-B-I-A-solar+p3-dist1 planning_horizons: - - 2030 - - 2040 - - 2050 + - 2030 + - 2040 + - 2050 snapshots: start: "2013-03-01" @@ -25,4 +25,3 @@ solving: name: cbc options: cbc-default mem: 4000 - diff --git a/test/config.overnight.yaml b/test/config.overnight.yaml index 7214906b..e8f8194a 100644 --- a/test/config.overnight.yaml +++ b/test/config.overnight.yaml @@ -3,13 +3,13 @@ foresight: overnight scenario: lv: - - 1.5 + - 1.5 clusters: - - 5 + - 5 sector_opts: - - CO2L0-191H-T-H-B-I-A-solar+p3-dist1 + - CO2L0-191H-T-H-B-I-A-solar+p3-dist1 planning_horizons: - - 2030 + - 2030 snapshots: start: "2013-03-01"