diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 0b78b5b6..3f1edbd8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,3 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 +9865a970893d9e515786f33c629b14f71645bf1e diff --git a/.gitattributes b/.gitattributes index 3f5e771d..b82aaff1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c0fb745d..c17c0425 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -19,7 +19,7 @@ on: - cron: "0 5 * * TUE" env: - DATA_CACHE_NUMBER: 2 + DATA_CACHE_NUMBER: 1 jobs: build: @@ -32,7 +32,14 @@ jobs: - ubuntu-latest - macos-latest - windows-latest - + inhouse: + - stable + - master + exclude: + - os: macos-latest + inhouse: master + - os: windows-latest + inhouse: master runs-on: ${{ matrix.os }} defaults: @@ -46,16 +53,6 @@ jobs: run: | echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc - - name: Add solver to environment - run: | - echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml - if: ${{ matrix.os }} == 'windows-latest' - - - name: Add solver to environment - run: | - echo -e "- glpk\n- ipopt" >> envs/environment.yaml - if: ${{ matrix.os }} != 'windows-latest' - - name: Setup micromamba uses: mamba-org/setup-micromamba@v1 with: @@ -66,6 +63,11 @@ jobs: cache-environment: true cache-downloads: true + - name: Install inhouse packages + run: | + pip install git+https://github.com/PyPSA/atlite.git@master git+https://github.com/PyPSA/powerplantmatching.git@master git+https://github.com/PyPSA/linopy.git@master + if: ${{ matrix.inhouse }} == 'master' + - name: Set cache dates run: | echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV @@ -79,14 +81,10 @@ jobs: key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }} - name: Test snakemake workflow - run: | - snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime - snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime - snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime - snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime + run: ./test.sh - name: Upload artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4.3.0 with: name: resources-results path: | @@ -94,3 +92,4 @@ jobs: results if-no-files-found: warn retention-days: 1 + if: matrix.os == 'ubuntu' && matrix.inhouse == 'stable' diff --git a/.gitignore b/.gitignore index c9d2e171..3336fca7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -20,10 +20,18 @@ gurobi.log /notebooks /data /cutouts - +/tmp doc/_build +/scripts/old +/scripts/create_scenarios.py +/config/create_scenarios.py + +config/config.yaml +config/scenarios.yaml + config.yaml +config/config.yaml dconf /data/links_p_nom.csv @@ -53,25 +61,15 @@ d1gam3xoknrgr2.cloudfront.net/ *.nc *~ -/scripts/old *.pyc -/cutouts -/tmp -/pypsa *.xlsx -config.yaml - -doc/_build - *.xls *.geojson *.ipynb -data/costs_* - merger-todos.md diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 78e70b57..3ffe8d9e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ exclude: "^LICENSES" repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-merge-conflict - id: end-of-file-fixer @@ -51,7 +51,7 @@ repos: # Formatting with "black" coding style - repo: https://github.com/psf/black-pre-commit-mirror - rev: 23.12.1 + rev: 24.4.2 hooks: # Format Python files - id: black @@ -67,14 +67,14 @@ repos: # Do YAML formatting (before the linter checks it for misses) - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks - rev: v2.12.0 + rev: v2.13.0 hooks: - id: pretty-format-yaml args: [--autofix, --indent, "2", --preserve-quotes] # Format Snakemake rule / workflow files - repo: https://github.com/snakemake/snakefmt - rev: v0.8.5 + rev: v0.10.1 hooks: - id: snakefmt @@ -87,6 +87,6 @@ repos: # Check for FSFE REUSE compliance (licensing) - repo: https://github.com/fsfe/reuse-tool - rev: v2.1.0 + rev: v3.0.2 hooks: - id: reuse diff --git a/.readthedocs.yml b/.readthedocs.yml index 30684052..ca388d80 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/.reuse/dep5 b/.reuse/dep5 index cd8e2660..27edd808 100644 --- a/.reuse/dep5 +++ b/.reuse/dep5 @@ -4,33 +4,33 @@ Upstream-Contact: Tom Brown Source: https://github.com/pypsa/pypsa-eur Files: doc/img/* -Copyright: 2019-2023 The PyPSA-Eur Authors +Copyright: 2019-2024 The PyPSA-Eur Authors License: CC-BY-4.0 Files: doc/data.csv -Copyright: 2019-2023 The PyPSA-Eur Authors +Copyright: 2019-2024 The PyPSA-Eur Authors License: CC-BY-4.0 Files: doc/configtables/* -Copyright: 2019-2023 The PyPSA-Eur Authors +Copyright: 2019-2024 The PyPSA-Eur Authors License: CC-BY-4.0 Files: data/* -Copyright: 2017-2023 The PyPSA-Eur Authors +Copyright: 2017-2024 The PyPSA-Eur Authors License: CC-BY-4.0 Files: .github/* -Copyright: 2019-2023 The PyPSA-Eur Authors +Copyright: 2019-2024 The PyPSA-Eur Authors License: CC0-1.0 Files: matplotlibrc -Copyright: 2017-2023 The PyPSA-Eur Authors +Copyright: 2017-2024 The PyPSA-Eur Authors License: CC0-1.0 Files: borg-it -Copyright: 2017-2023 The PyPSA-Eur Authors +Copyright: 2017-2024 The PyPSA-Eur Authors License: CC0-1.0 Files: graphics/* -Copyright: 2017-2023 The PyPSA-Eur Authors +Copyright: 2017-2024 The PyPSA-Eur Authors License: CC-BY-4.0 diff --git a/.sync-send b/.sync-send index 72252956..483c7a99 100644 --- a/.sync-send +++ b/.sync-send @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/CITATION.cff b/CITATION.cff index f8b28b5f..af26fd53 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -6,7 +6,7 @@ cff-version: 1.1.0 message: "If you use this package, please cite it in the following way." title: "PyPSA-Eur: An open sector-coupled optimisation model of the European energy system" repository: https://github.com/pypsa/pypsa-eur -version: 0.9.0 +version: 0.10.0 license: MIT authors: - family-names: Brown diff --git a/LICENSES/MIT.txt b/LICENSES/MIT.txt index 87f6d959..baf15333 100644 --- a/LICENSES/MIT.txt +++ b/LICENSES/MIT.txt @@ -1,6 +1,6 @@ MIT License -Copyright 2017-2023 The PyPSA-Eur Authors +Copyright 2017-2024 The PyPSA-Eur Authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.md b/README.md index 4a58d75c..b4c03574 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ diff --git a/Snakefile b/Snakefile index 7c16ff9f..e6980708 100644 --- a/Snakefile +++ b/Snakefile @@ -1,36 +1,34 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +from pathlib import Path +import yaml from os.path import normpath, exists from shutil import copyfile, move, rmtree - -from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider - -HTTP = HTTPRemoteProvider() - from snakemake.utils import min_version -min_version("7.7") +min_version("8.11") + +from scripts._helpers import path_provider, copy_default_files, get_scenarios, get_rdir -if not exists("config/config.yaml") and exists("config/config.default.yaml"): - copyfile("config/config.default.yaml", "config/config.yaml") +copy_default_files(workflow) +configfile: "config/config.default.yaml" configfile: "config/config.yaml" -COSTS = f"data/costs_{config['costs']['year']}.csv" -ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4) +run = config["run"] +scenarios = get_scenarios(run) +RDIR = get_rdir(run) -run = config.get("run", {}) -RDIR = run["name"] + "/" if run.get("name") else "" -CDIR = RDIR if not run.get("shared_cutouts") else "" +logs = path_provider("logs/", RDIR, run["shared_resources"]) +benchmarks = path_provider("benchmarks/", RDIR, run["shared_resources"]) +resources = path_provider("resources/", RDIR, run["shared_resources"]) -LOGS = "logs/" + RDIR -BENCHMARKS = "benchmarks/" + RDIR -RESOURCES = "resources/" + RDIR if not run.get("shared_resources") else "resources/" +CDIR = "" if run["shared_cutouts"] else RDIR RESULTS = "results/" + RDIR @@ -41,9 +39,9 @@ localrules: wildcard_constraints: simpl="[a-zA-Z0-9]*", clusters="[0-9]+(m|c)?|all", - ll="(v|c)([0-9\.]+|opt)", - opts="[-+a-zA-Z0-9\.]*", - sector_opts="[-+a-zA-Z0-9\.\s]*", + ll=r"(v|c)([0-9\.]+|opt)", + opts=r"[-+a-zA-Z0-9\.]*", + sector_opts=r"[-+a-zA-Z0-9\.\s]*", include: "rules/common.smk" @@ -73,10 +71,19 @@ if config["foresight"] == "perfect": rule all: input: - RESULTS + "graphs/costs.pdf", + expand(RESULTS + "graphs/costs.pdf", run=config["run"]["name"]), default_target: True +rule create_scenarios: + output: + config["run"]["scenarios"]["file"], + conda: + "envs/retrieve.yaml" + script: + "config/create_scenarios.py" + + rule purge: run: import builtins @@ -97,13 +104,13 @@ rule dag: message: "Creating DAG of workflow." output: - dot=RESOURCES + "dag.dot", - pdf=RESOURCES + "dag.pdf", - png=RESOURCES + "dag.png", + dot=resources("dag.dot"), + pdf=resources("dag.pdf"), + png=resources("dag.png"), conda: "envs/environment.yaml" shell: - """ + r""" snakemake --rulegraph all | sed -n "/digraph/,\$p" > {output.dot} dot -Tpdf -o {output.pdf} {output.dot} dot -Tpng -o {output.png} {output.dot} diff --git a/config/config.default.yaml b/config/config.default.yaml index eb190c55..dad45cba 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -1,9 +1,9 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#top-level-configuration -version: 0.9.0 +version: 0.10.0 tutorial: false logging: @@ -20,7 +20,11 @@ remote: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run run: + prefix: "" name: "" + scenarios: + enable: false + file: config/scenarios.yaml disable_progressbar: false shared_resources: false shared_cutouts: true @@ -44,7 +48,7 @@ scenario: opts: - '' sector_opts: - - Co2L0-3H-T-H-B-I-A-solar+p3-dist1 + - Co2L0-3H-T-H-B-I-A-dist1 planning_horizons: # - 2020 # - 2030 @@ -59,9 +63,6 @@ snapshots: start: "2013-01-01" end: "2014-01-01" inclusive: 'left' - resolution: false - segmentation: false - #representative: false # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#enable enable: @@ -71,11 +72,11 @@ enable: retrieve_sector_databundle: true retrieve_cost_data: true build_cutout: false - retrieve_irena: false retrieve_cutout: true build_natura_raster: false retrieve_natura_raster: true custom_busmap: false + drop_leap_day: true # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget @@ -114,7 +115,7 @@ electricity: Store: [battery, H2] Link: [] # H2 pipeline - powerplants_filter: (DateOut >= 2022 or DateOut != DateOut) + powerplants_filter: (DateOut >= 2023 or DateOut != DateOut) and not (Country == 'Germany' and Fueltype == 'Nuclear') custom_powerplants: false everywhere_powerplants: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] @@ -257,6 +258,9 @@ renewable: flatten_dispatch: false flatten_dispatch_buffer: 0.2 clip_min_inflow: 1.0 + eia_norm_year: false + eia_correct_by_capacity: false + eia_approximate_missing: false # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#conventional conventional: @@ -302,11 +306,12 @@ transformers: # docs-load in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#load load: - power_statistics: true interpolate_limit: 3 time_shift_for_large_gaps: 1w manual_adjustments: true # false scaling_factor: 1.0 + fixed_year: false # false or year (e.g. 2013) + supplement_synthetic: true # docs # TODO: PyPSA-Eur merge issue in prepare_sector_network.py @@ -334,9 +339,8 @@ pypsa_eur: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#energy energy: - energy_totals_year: 2011 + energy_totals_year: 2019 base_emissions_year: 1990 - eurostat_report_year: 2016 emissions: CO2 # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#biomass @@ -371,12 +375,14 @@ solar_thermal: orientation: slope: 45. azimuth: 180. + cutout: default # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#existing-capacities existing_capacities: - grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030] - grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020 + grouping_years_power: [1895, 1920, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030] + grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020] # heat grouping years >= baseyear will be ignored threshold_capacity: 10 + default_heating_lifetime: 20 conventional_carriers: - lignite - coal @@ -385,6 +391,11 @@ existing_capacities: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#sector sector: + transport: true + heating: true + biomass: true + industry: true + agriculture: true district_heating: potential: 0.6 progress: @@ -396,7 +407,8 @@ sector: 2045: 0.8 2050: 1.0 district_heating_loss: 0.15 - cluster_heat_buses: false + cluster_heat_buses: true + heat_demand_cutout: default bev_dsm_restriction_value: 0.75 bev_dsm_restriction_time: 7 transport_heating_deadband_upper: 20. @@ -502,6 +514,7 @@ sector: resistive_heaters: true oil_boilers: false biomass_boiler: true + overdimension_individual_heating: 1.1 #to cover demand peaks bigger than data chp: true micro_chp: false solar_thermal: true @@ -519,9 +532,14 @@ sector: SMR_cc: true regional_methanol_demand: false regional_oil_demand: false + regional_coal_demand: false regional_co2_sequestration_potential: enable: false - attribute: 'conservative estimate Mt' + attribute: + - conservative estimate Mt + - conservative estimate GAS Mt + - conservative estimate OIL Mt + - conservative estimate aquifer Mt include_onshore: false min_size: 3 max_size: 25 @@ -548,6 +566,7 @@ sector: use_methanation_waste_heat: true use_fuel_cell_waste_heat: true use_electrolysis_waste_heat: true + electricity_transmission_grid: true electricity_distribution_grid: true electricity_distribution_grid_cost_factor: 1.0 electricity_grid_connection: true @@ -556,8 +575,8 @@ sector: efficiency_static: 0.98 efficiency_per_1000km: 0.977 H2 pipeline: - efficiency_per_1000km: 1 # 0.979 - compression_per_1000km: 0.019 + efficiency_per_1000km: 1 # 0.982 + compression_per_1000km: 0.018 gas pipeline: efficiency_per_1000km: 1 #0.977 compression_per_1000km: 0.01 @@ -626,9 +645,43 @@ industry: MWh_NH3_per_MWh_H2_cracker: 1.46 # https://github.com/euronion/trace/blob/44a5ff8401762edbef80eff9cfe5a47c8d3c8be4/data/efficiencies.csv NH3_process_emissions: 24.5 petrochemical_process_emissions: 25.5 - HVC_primary_fraction: 1. - HVC_mechanical_recycling_fraction: 0. - HVC_chemical_recycling_fraction: 0. + #HVC primary/recycling based on values used in Neumann et al https://doi.org/10.1016/j.joule.2023.06.016, linearly interpolated between 2020 and 2050 + #2020 recycling rates based on Agora https://static.agora-energiewende.de/fileadmin/Projekte/2021/2021_02_EU_CEAP/A-EW_254_Mobilising-circular-economy_study_WEB.pdf + #fractions refer to the total primary HVC production in 2020 + #assumes 6.7 Mtplastics produced from recycling in 2020 + HVC_primary_fraction: + 2020: 1.0 + 2025: 0.9 + 2030: 0.8 + 2035: 0.7 + 2040: 0.6 + 2045: 0.5 + 2050: 0.4 + HVC_mechanical_recycling_fraction: + 2020: 0.12 + 2025: 0.15 + 2030: 0.18 + 2035: 0.21 + 2040: 0.24 + 2045: 0.27 + 2050: 0.30 + HVC_chemical_recycling_fraction: + 2020: 0.0 + 2025: 0.0 + 2030: 0.04 + 2035: 0.08 + 2040: 0.12 + 2045: 0.16 + 2050: 0.20 + sector_ratios_fraction_future: + 2020: 0.0 + 2025: 0.1 + 2030: 0.3 + 2035: 0.5 + 2040: 0.7 + 2045: 0.9 + 2050: 1.0 + basic_chemicals_without_NH3_production_today: 69. #Mt/a, = 86 Mtethylene-equiv - 17 MtNH3 HVC_production_today: 52. MWh_elec_per_tHVC_mechanical_recycling: 0.547 MWh_elec_per_tHVC_chemical_recycling: 6.9 @@ -645,7 +698,7 @@ industry: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs costs: year: 2030 - version: 1e6e79a + version: v0.8.1 rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) social_discountrate: 0.02 fill_values: @@ -695,6 +748,14 @@ clustering: committable: any ramp_limit_up: max ramp_limit_down: max + temporal: + resolution_elec: false + resolution_sector: false + +# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#adjustments +adjustments: + electricity: false + sector: false # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving solving: @@ -707,6 +768,7 @@ solving: rolling_horizon: false seed: 123 custom_extra_functionality: "../data/custom_extra_functionality.py" + # io_api: "direct" # Increases performance but only supported for the highs and gurobi solvers # options that go into the optimize function track_iterations: false min_iterations: 4 @@ -727,7 +789,7 @@ solving: solver_options: highs-default: - # refer to https://ergo-code.github.io/HiGHS/options/definitions.html#solver + # refer to https://ergo-code.github.io/HiGHS/dev/options/definitions/ threads: 4 solver: "ipm" run_crossover: "off" @@ -748,7 +810,6 @@ solving: PreDual: 0 GURO_PAR_BARDENSETHRESH: 200 gurobi-numeric-focus: - name: gurobi NumericFocus: 3 # Favour numeric stability over speed method: 2 # barrier crossover: 0 # do not use crossover @@ -760,7 +821,6 @@ solving: threads: 8 Seed: 123 gurobi-fallback: # Use gurobi defaults - name: gurobi crossover: 0 method: 2 # barrier BarHomogeneous: 1 # Use homogeneous barrier if standard does not converge @@ -775,11 +835,16 @@ solving: solutiontype: 2 # non basic solution, ie no crossover barrier.convergetol: 1.e-5 feasopt.tolerance: 1.e-6 + copt-default: + Threads: 8 + LpMethod: 2 + Crossover: 0 cbc-default: {} # Used in CI glpk-default: {} # Used in CI - mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2 - walltime: "12:00:00" + mem_mb: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2 + runtime: 6h #runtime in humanfriendly style https://humanfriendly.readthedocs.io/en/latest/ + # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#plotting plotting: @@ -788,6 +853,13 @@ plotting: color_geomap: ocean: white land: white + projection: + name: "EqualEarth" + # See https://scitools.org.uk/cartopy/docs/latest/reference/projections.html for alternatives, for example: + # name: "LambertConformal" + # central_longitude: 10. + # central_latitude: 50. + # standard_parallels: [35, 65] eu_node_location: x: -5.5 y: 46. @@ -828,7 +900,7 @@ plotting: offshore wind (DC): "#74c6f2" offshore wind dc: "#74c6f2" offwind-float: "#b5e2fa" - offshore wind (float): "#b5e2fa" + offshore wind (Float): "#b5e2fa" offshore wind float: "#b5e2fa" # water hydro: '#298c81' @@ -838,7 +910,6 @@ plotting: hydroelectricity: '#298c81' PHS: '#51dbcc' hydro+PHS: "#08ad97" - wave: '#a7d4cf' # solar solar: "#f9d002" solar PV: "#f9d002" @@ -987,9 +1058,11 @@ plotting: air heat pump: '#36eb41' residential urban decentral air heat pump: '#48f74f' services urban decentral air heat pump: '#5af95d' + services rural air heat pump: '#5af95d' urban central air heat pump: '#6cfb6b' ground heat pump: '#2fb537' residential rural ground heat pump: '#48f74f' + residential rural air heat pump: '#48f74f' services rural ground heat pump: '#5af95d' Ambient: '#98eb9d' CHP: '#8a5751' diff --git a/config/config.entsoe-all.yaml b/config/config.entsoe-all.yaml index dd19d2c7..40e3c0a5 100644 --- a/config/config.entsoe-all.yaml +++ b/config/config.entsoe-all.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/config/config.perfect.yaml b/config/config.perfect.yaml index f355763c..7bfdbdd2 100644 --- a/config/config.perfect.yaml +++ b/config/config.perfect.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 run: @@ -19,13 +19,16 @@ scenario: opts: - '' sector_opts: - - 1p5-4380H-T-H-B-I-A-solar+p3-dist1 - - 1p7-4380H-T-H-B-I-A-solar+p3-dist1 - - 2p0-4380H-T-H-B-I-A-solar+p3-dist1 + - 1p5-4380H-T-H-B-I-A-dist1 + - 1p7-4380H-T-H-B-I-A-dist1 + - 2p0-4380H-T-H-B-I-A-dist1 planning_horizons: - 2020 + - 2025 - 2030 + - 2035 - 2040 + - 2045 - 2050 diff --git a/config/config.validation.yaml b/config/config.validation.yaml index 5bcd5c31..062e82f5 100644 --- a/config/config.validation.yaml +++ b/config/config.validation.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 run: @@ -69,9 +69,6 @@ conventional: biomass: p_max_pu: 0.65 -load: - power_statistics: false - lines: s_max_pu: 0.23 under_construction: 'remove' diff --git a/config/create_scenarios.py b/config/create_scenarios.py new file mode 100644 index 00000000..cccc29bc --- /dev/null +++ b/config/create_scenarios.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +# This script helps to generate a scenarios.yaml file for PyPSA-Eur. +# You can modify the template to your needs and define all possible combinations of config values that should be considered. + +if "snakemake" in globals(): + filename = snakemake.output[0] +else: + filename = "../config/scenarios.yaml" + +import itertools + +# Insert your config values that should be altered in the template. +# Change `config_section` and `config_section2` to the actual config sections. +template = """ +scenario{scenario_number}: + config_section: + config_key: {config_value} + config_section2: + config_key2: {config_value2} +""" + +# Define all possible combinations of config values. +# This must define all config values that are used in the template. +config_values = dict(config_value=["true", "false"], config_value2=[1, 2, 3, 4]) + +combinations = [ + dict(zip(config_values.keys(), values)) + for values in itertools.product(*config_values.values()) +] + +with open(filename, "w") as f: + for i, config in enumerate(combinations): + f.write(template.format(scenario_number=i, **config)) diff --git a/config/scenarios.template.yaml b/config/scenarios.template.yaml new file mode 100644 index 00000000..0eba9d75 --- /dev/null +++ b/config/scenarios.template.yaml @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +# This file is used to define the scenarios that are run by snakemake. Each entry on the first level is a scenario. Each scenario can contain configuration overrides with respect to the config/config.yaml settings. +# +# Example +# +# custom-scenario: # name of the scenario +# electricity: +# renewable_carriers: [wind, solar] # override the list of renewable carriers + +normal: + electricity: + renewable_carriers: + - solar + - onwind + - offwind-ac + - offwind-dc + - hydro + +no-offwind: + electricity: + renewable_carriers: + - solar + - onwind + - hydro diff --git a/config/test/config.electricity.yaml b/config/test/config.electricity.yaml index cfb04f0e..979453cc 100644 --- a/config/test/config.electricity.yaml +++ b/config/test/config.electricity.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -8,14 +8,14 @@ tutorial: true run: name: "test-elec" # use this to keep track of runs with different settings disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true scenario: clusters: - 5 opts: - - Co2L-24H + - Co2L-24h countries: ['BE'] diff --git a/config/test/config.myopic.yaml b/config/test/config.myopic.yaml index 2d6779d7..a9335046 100644 --- a/config/test/config.myopic.yaml +++ b/config/test/config.myopic.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -7,7 +7,7 @@ tutorial: true run: name: "test-sector-myopic" disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true foresight: myopic @@ -18,7 +18,7 @@ scenario: clusters: - 5 sector_opts: - - 24H-T-H-B-I-A-solar+p3-dist1 + - 24h-T-H-B-I-A-dist1 planning_horizons: - 2030 - 2040 diff --git a/config/test/config.overnight.yaml b/config/test/config.overnight.yaml index f0ca6172..b02c0449 100644 --- a/config/test/config.overnight.yaml +++ b/config/test/config.overnight.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -7,7 +7,7 @@ tutorial: true run: name: "test-sector-overnight" disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true @@ -17,7 +17,7 @@ scenario: clusters: - 5 sector_opts: - - CO2L0-24H-T-H-B-I-A-solar+p3-dist1 + - CO2L0-24h-T-H-B-I-A-dist1 planning_horizons: - 2030 diff --git a/config/test/config.perfect.yaml b/config/test/config.perfect.yaml index 78e9ba27..7fbfb630 100644 --- a/config/test/config.perfect.yaml +++ b/config/test/config.perfect.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -7,7 +7,7 @@ tutorial: true run: name: "test-sector-perfect" disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true foresight: perfect @@ -18,7 +18,7 @@ scenario: clusters: - 5 sector_opts: - - 8760H-T-H-B-I-A-solar+p3-dist1 + - 8760h-T-H-B-I-A-dist1 planning_horizons: - 2030 - 2040 diff --git a/config/test/config.scenarios.yaml b/config/test/config.scenarios.yaml new file mode 100644 index 00000000..8ecbb91b --- /dev/null +++ b/config/test/config.scenarios.yaml @@ -0,0 +1,60 @@ +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +tutorial: true + +run: + name: + - test-elec-no-offshore-wind + - test-elec-no-onshore-wind + scenarios: + enable: true + file: "config/test/scenarios.yaml" + disable_progressbar: true + shared_resources: base + shared_cutouts: true + +scenario: + clusters: + - 5 + opts: + - Co2L-24H + +countries: ['BE'] + +snapshots: + start: "2013-03-01" + end: "2013-03-08" + +electricity: + extendable_carriers: + Generator: [OCGT] + StorageUnit: [battery, H2] + Store: [] + +atlite: + default_cutout: be-03-2013-era5 + cutouts: + be-03-2013-era5: + module: era5 + x: [4., 15.] + y: [46., 56.] + time: ["2013-03-01", "2013-03-08"] + +renewable: + onwind: + cutout: be-03-2013-era5 + offwind-ac: + cutout: be-03-2013-era5 + max_depth: false + offwind-dc: + cutout: be-03-2013-era5 + max_depth: false + solar: + cutout: be-03-2013-era5 + +solving: + solver: + name: glpk + options: "glpk-default" diff --git a/config/test/scenarios.yaml b/config/test/scenarios.yaml new file mode 100644 index 00000000..962cc91e --- /dev/null +++ b/config/test/scenarios.yaml @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +test-elec-no-offshore-wind: + electricity: + renewable_carriers: [solar, onwind] + +test-elec-no-onshore-wind: + electricity: + renewable_carriers: [solar, offwind-ac, offwind-dc] diff --git a/data/custom_powerplants.csv b/data/custom_powerplants.csv index 4fd47498..84553a74 100644 --- a/data/custom_powerplants.csv +++ b/data/custom_powerplants.csv @@ -2,36 +2,36 @@ 1266,Khmelnitskiy,Nuclear,,PP,UA,1901.8916595755832,,0.0,0.0,0.0,0.0,1988.0,2005.0,,,50.3023,26.6466,[nan],"{'GEO': ['GEO3842'], 'GPD': ['WRI1005111'], 'CARMA': ['CARMA22000']}" 1268,Kaniv,Hydro,Reservoir,PP,UA,452.1656050955414,,0.0,0.0,0.0,0.0,1972.0,2003.0,,,49.76653,31.47165,[nan],"{'GEO': ['GEO43017'], 'GPD': ['WRI1005122'], 'CARMA': ['CARMA21140']}" 1269,Kahovska kakhovka,Hydro,Reservoir,PP,UA,352.45222929936307,,0.0,0.0,0.0,0.0,1955.0,1956.0,,,46.77858,33.36965,[nan],"{'GEO': ['GEO43018'], 'GPD': ['WRI1005118'], 'CARMA': ['CARMA20855']}" -1347,Kharkiv,Natural Gas,Steam Turbine,CHP,UA,494.94274967602314,,0.0,0.0,0.0,0.0,1979.0,1980.0,,,49.9719,36107,[nan],"{'GEO': ['GEO43027'], 'GPD': ['WRI1005126'], 'CARMA': ['CARMA21972']}" +1347,Kharkiv,Natural Gas,Steam Turbine,CHP,UA,494.94274967602314,,0.0,0.0,0.0,0.0,1979.0,1980.0,,,49.9719,36.107,[nan],"{'GEO': ['GEO43027'], 'GPD': ['WRI1005126'], 'CARMA': ['CARMA21972']}" 1348,Kremenchuk,Hydro,Reservoir,PP,UA,617.0382165605096,,0.0,0.0,0.0,0.0,1959.0,1960.0,,,49.07759,33.2505,[nan],"{'GEO': ['GEO43019'], 'GPD': ['WRI1005121'], 'CARMA': ['CARMA23072']}" 1377,Krivorozhskaya,Hard Coal,Steam Turbine,PP,UA,2600.0164509342876,,0.0,0.0,0.0,0.0,1965.0,1992.0,,,47.5432,33.6583,[nan],"{'GEO': ['GEO42989'], 'GPD': ['WRI1005100'], 'CARMA': ['CARMA23176']}" 1407,Zmiyevskaya zmiivskaya,Hard Coal,Steam Turbine,PP,UA,2028.3816283884514,,0.0,0.0,0.0,0.0,1960.0,2005.0,,,49.5852,36.5231,[nan],"{'GEO': ['GEO42999'], 'GPD': ['WRI1005103'], 'CARMA': ['CARMA51042']}" 1408,Pridneprovskaya,Hard Coal,Steam Turbine,CHP,UA,1627.3152609570984,,0.0,0.0,0.0,0.0,1959.0,1966.0,,,48.4051,35.1131,[nan],"{'GEO': ['GEO42990'], 'GPD': ['WRI1005102'], 'CARMA': ['CARMA35874']}" 1409,Kurakhovskaya,Hard Coal,Steam Turbine,PP,UA,1371.0015824607397,,0.0,0.0,0.0,0.0,1972.0,2003.0,,,47.9944,37.24022,[nan],"{'GEO': ['GEO42994'], 'GPD': ['WRI1005104'], 'CARMA': ['CARMA23339']}" -1410,Dobrotvorsky,Hard Coal,Steam Turbine,PP,UA,553.1949895604868,,0.0,0.0,0.0,0.0,1960.0,1964.0,,,50.2133,24375,[nan],"{'GEO': ['GEO42992'], 'GPD': ['WRI1005096'], 'CARMA': ['CARMA10971']}" +1410,Dobrotvorsky,Hard Coal,Steam Turbine,PP,UA,553.1949895604868,,0.0,0.0,0.0,0.0,1960.0,1964.0,,,50.2133,24.375,[nan],"{'GEO': ['GEO42992'], 'GPD': ['WRI1005096'], 'CARMA': ['CARMA10971']}" 1422,Zuyevskaya,Hard Coal,Steam Turbine,PP,UA,1147.87960333801,,0.0,0.0,0.0,0.0,1982.0,2007.0,,,48.0331,38.28615,[nan],"{'GEO': ['GEO42995'], 'GPD': ['WRI1005106'], 'CARMA': ['CARMA51083']}" 1423,Zaporozhye,Nuclear,,PP,UA,5705.67497872675,,0.0,0.0,0.0,0.0,1985.0,1996.0,,,47.5119,34.5863,[nan],"{'GEO': ['GEO6207'], 'GPD': ['WRI1005114'], 'CARMA': ['CARMA50875']}" 1424,Trypilska,Hard Coal,Steam Turbine,PP,UA,1659.5849686814602,,0.0,0.0,0.0,0.0,1969.0,1972.0,,,50.1344,30.7468,[nan],"{'GEO': ['GEO43000'], 'GPD': ['WRI1005099'], 'CARMA': ['CARMA46410']}" 1425,Tashlyk,Hydro,Pumped Storage,Store,UA,285.55968954109585,,0.0,0.0,0.0,0.0,2006.0,2007.0,,,47.7968,31.1811,[nan],"{'GEO': ['GEO43025'], 'GPD': ['WRI1005117'], 'CARMA': ['CARMA44696']}" 1426,Starobeshivska,Hard Coal,Steam Turbine,PP,UA,1636.5351774497733,,0.0,0.0,0.0,0.0,1961.0,1967.0,,,47.7997,38.00612,[nan],"{'GEO': ['GEO43003'], 'GPD': ['WRI1005105'], 'CARMA': ['CARMA43083']}" -1427,South,Nuclear,,PP,UA,2852.837489363375,,0.0,0.0,0.0,0.0,1983.0,1989.0,,,47812,31.22,[nan],"{'GEO': ['GEO5475'], 'GPD': ['WRI1005113'], 'CARMA': ['CARMA42555']}" +1427,South,Nuclear,,PP,UA,2852.837489363375,,0.0,0.0,0.0,0.0,1983.0,1989.0,,,47.812,31.22,[nan],"{'GEO': ['GEO5475'], 'GPD': ['WRI1005113'], 'CARMA': ['CARMA42555']}" 1428,Rovno rivne,Nuclear,,PP,UA,2695.931427448389,,0.0,0.0,0.0,0.0,1981.0,2006.0,,,51.3245,25.89744,[nan],"{'GEO': ['GEO5174'], 'GPD': ['WRI1005112'], 'CARMA': ['CARMA38114']}" -1429,Ladyzhinska,Hard Coal,Steam Turbine,PP,UA,1659.5849686814602,,0.0,0.0,0.0,0.0,1970.0,1971.0,,,48706,29.2202,[nan],"{'GEO': ['GEO42993'], 'GPD': ['WRI1005098'], 'CARMA': ['CARMA24024']}" -1430,Kiev,Hydro,Pumped Storage,PP,UA,635.8694635681177,,0.0,0.0,0.0,0.0,1964.0,1972.0,,,50.5998,30501,"[nan, nan]","{'GEO': ['GEO43024', 'GEO43023'], 'GPD': ['WRI1005123', 'WRI1005124'], 'CARMA': ['CARMA23516', 'CARMA23517']}" +1429,Ladyzhinska,Hard Coal,Steam Turbine,PP,UA,1659.5849686814602,,0.0,0.0,0.0,0.0,1970.0,1971.0,,,48.706,29.2202,[nan],"{'GEO': ['GEO42993'], 'GPD': ['WRI1005098'], 'CARMA': ['CARMA24024']}" +1430,Kiev,Hydro,Pumped Storage,PP,UA,635.8694635681177,,0.0,0.0,0.0,0.0,1964.0,1972.0,,,50.5998,30.501,"[nan, nan]","{'GEO': ['GEO43024', 'GEO43023'], 'GPD': ['WRI1005123', 'WRI1005124'], 'CARMA': ['CARMA23516', 'CARMA23517']}" 2450,Cet chisinau,Natural Gas,,PP,MD,306.0,,0.0,0.0,0.0,0.0,,,,,47.027550000000005,28.8801,"[nan, nan]","{'GPD': ['WRI1002985', 'WRI1002984'], 'CARMA': ['CARMA8450', 'CARMA8451']}" 2460,Hydropower che costesti,Hydro,,PP,MD,16.0,,0.0,0.0,0.0,0.0,1978.0,,,,47.8381,27.2246,[nan],"{'GPD': ['WRI1002987'], 'CARMA': ['CARMA9496']}" 2465,Moldavskaya gres,Hard Coal,,PP,MD,2520.0,,0.0,0.0,0.0,0.0,,,,,46.6292,29.9407,[nan],"{'GPD': ['WRI1002989'], 'CARMA': ['CARMA28979']}" -2466,Hydropower dubasari,Hydro,,PP,MD,48.0,,0.0,0.0,0.0,0.0,,,,,47.2778,29123,[nan],"{'GPD': ['WRI1002988'], 'CARMA': ['CARMA11384']}" +2466,Hydropower dubasari,Hydro,,PP,MD,48.0,,0.0,0.0,0.0,0.0,,,,,47.2778,29.123,[nan],"{'GPD': ['WRI1002988'], 'CARMA': ['CARMA11384']}" 2676,Cet nord balti,Natural Gas,,PP,MD,24.0,,0.0,0.0,0.0,0.0,,,,,47.7492,27.8938,[nan],"{'GPD': ['WRI1002986'], 'CARMA': ['CARMA3071']}" 2699,Dniprodzerzhynsk,Hydro,Reservoir,PP,UA,360.3503184713376,,0.0,0.0,0.0,0.0,1963.0,1964.0,,,48.5485,34.541015,[nan],"{'GEO': ['GEO43020'], 'GPD': ['WRI1005119']}" 2707,Burshtynska tes,Hard Coal,Steam Turbine,PP,UA,2212.779958241947,,0.0,0.0,0.0,0.0,1965.0,1984.0,,,49.21038,24.66654,[nan],"{'GEO': ['GEO42991'], 'GPD': ['WRI1005097']}" 2708,Danipro dnieper,Hydro,Reservoir,PP,UA,1484.8407643312103,,0.0,0.0,0.0,0.0,1932.0,1947.0,,,47.86944,35.08611,[nan],"{'GEO': ['GEO43016'], 'GPD': ['WRI1005120']}" 2709,Dniester,Hydro,Pumped Storage,Store,UA,612.7241020616891,,0.0,0.0,0.0,0.0,2009.0,2011.0,,,48.51361,27.47333,[nan],"{'GEO': ['GEO43022'], 'GPD': ['WRI1005116', 'WRI1005115']}" -2710,Kiev,Natural Gas,Steam Turbine,CHP,UA,458.2803237740955,,0.0,0.0,0.0,0.0,1982.0,1984.0,,,50532,30.6625,[nan],"{'GEO': ['GEO42998'], 'GPD': ['WRI1005125']}" +2710,Kiev,Natural Gas,Steam Turbine,CHP,UA,458.2803237740955,,0.0,0.0,0.0,0.0,1982.0,1984.0,,,50.532,30.6625,[nan],"{'GEO': ['GEO42998'], 'GPD': ['WRI1005125']}" 2712,Luganskaya,Hard Coal,Steam Turbine,PP,UA,1060.2903966575996,,0.0,0.0,0.0,0.0,1962.0,1969.0,,,48.74781,39.2624,[nan],"{'GEO': ['GEO42996'], 'GPD': ['WRI1005110']}" -2713,Slavyanskaya,Hard Coal,Steam Turbine,PP,UA,737.5933194139823,,0.0,0.0,0.0,0.0,1971.0,1971.0,,,48872,37.76567,[nan],"{'GEO': ['GEO43002'], 'GPD': ['WRI1005109']}" +2713,Slavyanskaya,Hard Coal,Steam Turbine,PP,UA,737.5933194139823,,0.0,0.0,0.0,0.0,1971.0,1971.0,,,48.872,37.76567,[nan],"{'GEO': ['GEO43002'], 'GPD': ['WRI1005109']}" 2714,Vuhlehirska uglegorskaya,Hard Coal,Steam Turbine,PP,UA,3319.1699373629203,,0.0,0.0,0.0,0.0,1972.0,1977.0,,,48.4633,38.20328,[nan],"{'GEO': ['GEO43001'], 'GPD': ['WRI1005107']}" 2715,Zaporiska,Hard Coal,Steam Turbine,PP,UA,3319.1699373629203,,0.0,0.0,0.0,0.0,1972.0,1977.0,,,47.5089,34.6253,[nan],"{'GEO': ['GEO42988'], 'GPD': ['WRI1005101']}" 3678,Mironovskaya,Hard Coal,,PP,UA,815.0,,0.0,0.0,0.0,0.0,,,,,48.3407,38.4049,[nan],"{'GPD': ['WRI1005108'], 'CARMA': ['CARMA28679']}" 3679,Kramatorskaya,Hard Coal,,PP,UA,120.0,,0.0,0.0,0.0,0.0,1974.0,,,,48.7477,37.5723,[nan],"{'GPD': ['WRI1075856'], 'CARMA': ['CARMA54560']}" -3680,Chernihiv,Hard Coal,,PP,UA,200.0,,0.0,0.0,0.0,0.0,1968.0,,,,51455,31.2602,[nan],"{'GPD': ['WRI1075853'], 'CARMA': ['CARMA8190']}" +3680,Chernihiv,Hard Coal,,PP,UA,200.0,,0.0,0.0,0.0,0.0,1968.0,,,,51.455,31.2602,[nan],"{'GPD': ['WRI1075853'], 'CARMA': ['CARMA8190']}" diff --git a/data/eia_hydro_annual_capacity.csv b/data/eia_hydro_annual_capacity.csv new file mode 100644 index 00000000..42c0cc6f --- /dev/null +++ b/data/eia_hydro_annual_capacity.csv @@ -0,0 +1,53 @@ +# https://www.eia.gov/international/data/world/electricity/electricity-generation?pd=2&p=00000000000000000000008&u=1&f=A&v=mapbubble&a=-&i=none&vo=value&t=R&g=000000000000002&l=73-1028i008017kg6368g80a4k000e0ag00gg0004g8g0ho00g000400008&l=72-00000000000000000000000000080000000000000000000g&s=315532800000&e=1609459200000&ev=false& +Report generated on: 03-14-2024 13:39:49 +"API","","1980","1981","1982","1983","1984","1985","1986","1987","1988","1989","1990","1991","1992","1993","1994","1995","1996","1997","1998","1999","2000","2001","2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014","2015","2016","2017","2018","2019","2020","2021" +"","hydroelectricity installed capacity (million kW)","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" +"INTL.33-7-EURO-MK.A"," Europe","136.143","137.425","141.734","143.763","142.894","144.738","147.631","150.428","153.428","153.345","139.346","134.524","137.463","138.338","139.688","141.47","142.121","143.595","143.957","146.4204","147.3512","147.736","152.173","152.938","150.4894","151.424","152.276","154.198","155.405","156.988","159.495","162.112","165.452","170.146","171.588","174.906","176.998","178.221","180.212","181.039","184.728","185.46" +"INTL.33-7-ALB-MK.A"," Albania","0.5","0.525","0.55","0.6","0.625","0.65","0.675","0.68","0.69","0.69","1.668","1.668","1.668","1.668","1.668","1.445","1.445","1.445","1.445","1.445","1.445","1.445","1.445","1.445","1.445","1.432","1.432","1.432","1.45","1.45","1.461","1.508","1.628","1.781","1.725","1.798","1.913","2.047","2.105","2.193","2.39","2.39" +"INTL.33-7-AUT-MK.A"," Austria","8.206","9.157","9.51","9.582","10.034","10.171","10.423","10.691","10.762","10.858","7.028","7.129","7.204","7.202","7.245","7.323","7.385","7.54","7.685","7.669","7.676","7.703","7.567","7.607","7.613","7.667","7.684","7.845","7.848","7.827","7.913","7.947","7.97","8.272","8.321","8.457","8.493","8.506","8.591","8.63","9.001","9.151" +"INTL.33-7-BEL-MK.A"," Belgium","0.073","0.08","0.086","0.086","0.086","0.087","0.089","0.09","0.093","0.095","0.094","0.094","0.094","0.095","0.095","0.096","0.096","0.096","0.097","0.103","0.103","0.111","0.111","0.11","0.115","0.105","0.107","0.11","0.111","0.11","0.118","0.119","0.12","0.119","0.121","0.112","0.115","0.107","0.108","0.108","0.12","0.12" +"INTL.33-7-BIH-MK.A"," Bosnia and Herzegovina","--","--","--","--","--","--","--","--","--","--","--","--","1.2","1.2","1.139","1.219","1.219","1.219","1.624","1.983","1.983","1.993","2.38","2.38","2.38","2.38","2.411","2.411","2.117","2.117","2.117","2.117","2.12","2.12","2.049","2.055","2.084","2.084","2.09","2.09","2.093","1.747" +"INTL.33-7-BGR-MK.A"," Bulgaria","1.895","1.895","1.895","1.975","1.975","1.975","1.975","1.975","1.975","1.973","1.973","1.401","1.401","1.401","1.401","1.401","1.401","1.803","1.803","1.803","1.881","1.706","1.948","1.984","1.984","1.984","1.984","2.012","2.12","2.137","2.184","2.035","2.095","2.165","2.19","2.206","2.206","2.21","1.725","1.725","1.725","1.725" +"INTL.33-7-HRV-MK.A"," Croatia","--","--","--","--","--","--","--","--","--","--","--","--","1.769","1.77","1.77","1.781","1.785","1.785","1.785","1.785","1.785","1.785","1.775","1.783","1.79","1.804","1.804","1.782","1.782","1.799","1.848","1.848","1.848","1.897","1.9","1.915","1.912","1.912","1.913","1.913","1.848","1.874" +"INTL.33-7-CYP-MK.A"," Cyprus","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-7-CZE-MK.A"," Czechia","--","--","--","--","--","--","--","--","--","--","--","--","--","0.911","0.906","0.908","0.868","0.905","0.888","1.008","0.952","1","1","1.004","1.015","1.02","1.016","1.024","1.029","1.037","1.049","1.05","1.065","1.08","1.08","1.088","1.09","1.093","1.094","1.096","1.097","1.109" +"INTL.33-7-DNK-MK.A"," Denmark","0.008","0.008","0.01","0.009","0.009","0.009","0.009","0.009","0.009","0.011","0.01","0.01","0.01","0.01","0.008","0.01","0.01","0.01","0.011","0.011","0.01","0.011","0.011","0.011","0.011","0.011","0.009","0.009","0.009","0.009","0.009","0.009","0.009","0.009","0.009","0.007","0.009","0.009","0.009","0.009","0.009","0.007" +"INTL.33-7-EST-MK.A"," Estonia","--","--","--","--","--","--","--","--","--","--","--","--","0.001","0.001","0.001","0.001","0.001","0.001","0.001","0.0012","0.0012","0.003","0.003","0.004","0.004","0.005","0.005","0.005","0.005","0.007","0.006","0.005","0.008","0.008","0.005","0.006","0.006","0.007","0.007","0.007","0.008","0.004" +"INTL.33-7-FRO-MK.A"," Faroe Islands","0.018","0.018","0.018","0.018","0.018","0.018","0.018","0.031","0.031","0.031","0.031","0.031","0.031","0.031","0.031","0.031","0.031","0.031","0.031","0.0314","0.032","0.032","0.032","0.032","0.0314","0.031","0.031","0.031","0.031","0.031","0.031","0.031","0.039","0.039","0.04","0.04","0.04","0.04","0.04","0.04","0.039","0.039" +"INTL.33-7-FIN-MK.A"," Finland","2.42","2.467","2.474","2.503","2.497","2.505","2.555","2.586","2.597","2.586","2.621","2.648","2.679","2.731","2.736","2.777","2.785","2.861","2.881","2.881","2.882","2.926","2.964","2.966","2.999","3.035","3.062","3.102","3.122","3.145","3.155","3.196","3.196","3.224","3.248","3.249","3.249","3.272","3.287","3.287","3.263","3.263" +"INTL.33-7-CSK-MK.A"," Former Czechoslovakia","2.578","2.832","2.84","2.84","2.875","2.897","2.89","2.975","2.988","3.042","3.036","3.061","3.061","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-7-SCG-MK.A"," Former Serbia and Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","2.25","2.25","2.25","2.25","2.25","2.25","2.25","2.296","2.296","2.296","2.296","2.296","2.206","2.206","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-7-YUG-MK.A"," Former Yugoslavia","6.2","6.25","5.886","5.886","6.386","6.736","7.086","7.386","7.625","7.686","7.386","7.386","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-7-FRA-MK.A"," France","17.431","17.63","18.247","18.4","18.661","19.034","19.786","19.991","20.174","20.338","17.717","17.655","17.767","17.837","17.902","17.898","18","18.018","18.024","17.947","17.646","17.674","17.775","17.927","17.812","17.808","17.82","17.832","17.922","18.009","18.156","18.373","18.388","18.379","18.392","18.415","18.486","18.561","18.857","18.88","19.671","19.657" +"INTL.33-7-DEU-MK.A"," Germany","--","--","--","--","--","--","--","--","--","--","--","3.31","3.317","3.385","3.471","3.624","3.563","3.569","3.642","3.802","4.086","4.101","4.193","4.088","4.209","4.134","4.117","4.083","4.104","4.283","4.252","4.469","4.451","4.433","4.424","4.433","4.442","4.449","4.456","4.456","4.658","4.684" +"INTL.33-7-DDR-MK.A"," Germany, East","1.852","1.845","1.852","1.851","1.845","1.844","1.844","1.844","1.844","1.844","1.844","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-7-DEUW-MK.A"," Germany, West","6.45","6.509","6.531","6.631","6.668","6.71","6.71","6.71","6.85","6.86","6.86","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-7-GIB-MK.A"," Gibraltar","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-7-GRC-MK.A"," Greece","1.415","1.714","1.714","1.714","1.714","1.822","1.822","1.822","1.836","1.986","2.093","2.197","2.208","2.208","2.208","2.208","2.207","2.412","2.241","2.344","2.373","2.377","2.379","2.38","2.4","2.407","2.435","2.451","2.477","2.502","2.516","2.525","2.537","2.539","2.69","2.693","2.693","2.693","2.71","2.71","2.697","2.722" +"INTL.33-7-HUN-MK.A"," Hungary","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.048","0.054","0.049","0.049","0.049","0.049","0.051","0.053","0.053","0.055","0.056","0.057","0.057","0.057","0.057","0.057","0.057","0.057","0.056","0.058" +"INTL.33-7-ISL-MK.A"," Iceland","0.545","0.615","0.755","0.755","0.755","0.755","0.756","0.756","0.756","0.756","0.756","0.779","0.879","0.879","0.884","0.884","0.884","0.923","0.956","1.016","1.064","1.109","1.155","1.155","1.163","1.163","1.163","1.758","1.879","1.875","1.883","1.884","1.877","1.984","1.984","1.987","1.987","1.995","2.099","2.099","2.086","2.086" +"INTL.33-7-IRL-MK.A"," Ireland","0.224","0.224","0.225","0.225","0.226","0.226","0.221","0.222","0.222","0.222","0.223","0.226","0.226","0.226","0.227","0.227","0.232","0.233","0.233","0.236","0.236","0.238","0.24","0.24","0.24","0.234","0.234","0.234","0.234","0.234","0.237","0.237","0.237","0.237","0.237","0.237","0.237","0.237","0.237","0.237","0.237","0.216" +"INTL.33-7-ITA-MK.A"," Italy","15.826","15.766","16.877","17.125","12.166","12.16","12.419","12.435","12.495","12.547","12.582","12.692","12.718","12.788","12.864","12.964","12.999","13.06","13.058","13.417","13.389","13.456","13.557","13.703","13.789","13.89","13.528","13.573","13.732","13.827","13.976","14.193","14.325","14.454","14.506","14.628","14.991","15.109","15.182","15.583","14.908","14.908" +"INTL.33-7-XKS-MK.A"," Kosovo","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","0.043","0.043","0.045","0.046","0.046","0.046","0.046","0.046","0.078","0.08","0.096","0.096","0.092","0.092" +"INTL.33-7-LVA-MK.A"," Latvia","--","--","--","--","--","--","--","--","--","--","--","--","1.499","1.504","1.506","1.521","1.521","1.487","1.517","1.523","1.523","1.565","1.565","1.537","1.536","1.536","1.536","1.536","1.536","1.536","1.576","1.576","1.576","1.587","1.588","1.588","1.564","1.564","1.565","1.565","1.576","1.588" +"INTL.33-7-LTU-MK.A"," Lithuania","--","--","--","--","--","--","--","--","--","--","--","--","0.106","0.106","0.108","0.108","0.108","0.108","0.108","0.112","0.112","0.113","0.103","0.109","0.11","0.117","0.117","0.115","0.115","0.116","0.116","0.116","0.116","0.116","0.117","0.117","0.117","0.117","0.117","0.117","0.116","0.128" +"INTL.33-7-LUX-MK.A"," Luxembourg","0.029","0.029","0.029","0.032","0.032","0.032","0.032","0.032","0.032","0.032","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.039","0.039","0.039","0.039","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.034","0.035","0.034","0.034","0.034","0.037" +"INTL.33-7-MLT-MK.A"," Malta","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-7-MNE-MK.A"," Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","0.658","0.658","0.658","0.658","0.658","0.658","0.658","0.651","0.651","0.651","0.651","0.652","0.652","0.652","0.658","0.649" +"INTL.33-7-NLD-MK.A"," Netherlands","0","0","0","0","0","0.002","0.002","0.002","0.002","0.025","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.037","0.038","0.038" +"INTL.33-7-MKD-MK.A"," North Macedonia","--","--","--","--","--","--","--","--","--","--","--","--","0.426","0.426","0.413","0.423","0.423","0.434","0.434","0.4338","0.434","0.436","0.436","0.448","0.516","0.54","0.545","0.546","0.552","0.553","0.555","0.556","0.595","0.603","0.617","0.632","0.66","0.674","0.674","0.674","0.674","0.644" +"INTL.33-7-NOR-MK.A"," Norway","19.443","20.963","21.789","22.37","22.628","23.076","23.555","24.358","24.453","25.728","25.817","25.823","25.963","25.983","26.215","27.379","27.494","27.327","26.982","27.54","26.766","26.319","26.604","26.947","26.721","27.222","27.398","27.647","28.062","28.188","28.367","28.618","29.158","29.682","29.889","29.939","30.281","30.382","31.12","31.182","31.556","31.952" +"INTL.33-7-POL-MK.A"," Poland","0.647","0.647","0.647","0.647","0.645","0.646","0.646","0.646","0.772","0.647","0.467","0.467","0.468","0.475","0.489","0.482","0.492","0.495","0.501","0.505","0.509","0.517","0.517","0.524","0.535","0.542","0.549","0.546","0.553","0.556","0.56","0.564","0.569","0.573","0.582","0.588","0.597","0.591","0.592","0.592","0.605","0.605" +"INTL.33-7-PRT-MK.A"," Portugal","2.516","2.615","2.854","2.944","3.016","2.721","2.818","2.82","2.722","2.799","2.783","2.772","3.146","3.613","3.697","3.848","3.867","3.877","3.94","3.93","3.918","3.943","3.966","3.966","3.974","3.968","4.004","4.012","4.009","4.042","4.057","4.49","4.414","4.363","4.368","4.446","4.458","4.462","4.484","4.484","4.373","4.372" +"INTL.33-7-ROU-MK.A"," Romania","3.455","3.533","3.734","3.885","4.062","4.42","4.706","5.057","5.421","5.583","5.666","5.723","5.687","5.872","5.938","6.011","6.038","6.074","6.081","6.082","6.12","6.122","6.242","6.248","6.279","6.289","6.282","6.331","6.362","6.358","6.382","6.391","6.456","6.249","6.256","6.359","6.377","6.328","6.328","6.328","6.221","6.221" +"INTL.33-7-SRB-MK.A"," Serbia","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","2.206","2.206","2.206","2.221","2.293","2.322","2.352","2.317","2.403","2.408","2.416","2.424","2.424","2.424","2.484","2.491" +"INTL.33-7-SVK-MK.A"," Slovakia","--","--","--","--","--","--","--","--","--","--","--","--","--","0.925","1.208","1.525","1.655","1.664","1.682","1.684","1.685","1.586","1.586","1.588","1.602","1.596","1.597","1.599","1.632","1.571","1.6","1.607","1.606","1.607","1.607","1.606","1.608","1.607","1.612","1.612","1.505","1.505" +"INTL.33-7-SVN-MK.A"," Slovenia","--","--","--","--","--","--","--","--","--","--","--","--","0.755","0.755","0.756","0.757","0.734","0.734","0.861","0.846","0.846","0.839","0.983","0.974","0.974","0.979","1.009","1.018","1.027","1.07","1.074","1.073","1.074","1.119","1.116","1.115","1.113","1.167","1.163","1.163","1.163","1.121" +"INTL.33-7-ESP-MK.A"," Spain","13.473","10.869","10.945","10.917","10.935","10.959","11.153","10.556","10.984","11.597","11.32","11.429","11.484","11.484","11.545","11.689","11.793","11.596","11.537","11.802","12.672","12.744","15.55","15.525","12.82","12.808","12.907","12.961","13.04","13.069","13.275","13.283","13.293","14.076","14.081","14.086","14.053","14.052","14.053","14.053","14.292","14.308" +"INTL.33-7-SWE-MK.A"," Sweden","14.859","14.919","15.215","15.29","15.445","15.69","15.813","15.996","16.112","15.759","15.904","15.891","16.021","15.867","16.072","15.725","15.776","16.371","16.169","16.432","16.506","16.523","16.187","16.098","16.302","16.302","16.234","16.592","16.352","16.544","16.624","16.478","16.315","16.395","15.897","16.23","16.367","16.403","16.332","16.332","16.379","16.379" +"INTL.33-7-CHE-MK.A"," Switzerland","11.45","11.46","11.47","11.47","11.48","11.48","11.51","11.51","11.52","11.58","3.474","3.484","3.504","3.509","3.526","3.541","3.55","3.553","3.584","3.614","3.636","3.642","3.653","3.669","3.65","3.682","3.694","3.7","3.709","3.749","3.81","3.852","3.882","3.896","3.948","3.996","4.06","4.112","4.193","4.193","4.193","4.193" +"INTL.33-7-TUR-MK.A"," Turkiye","2.131","2.356","3.082","3.239","3.875","3.875","3.878","5.003","6.219","6.598","6.764","7.114","8.379","9.682","9.865","9.863","9.935","10.102","10.307","10.537","11.175","11.673","12.241","12.579","12.645","12.906","13.063","13.395","13.829","14.553","15.831","17.137","19.609","22.289","23.643","25.868","26.681","27.273","28.291","28.503","30.984","31.497" +"INTL.33-7-GBR-MK.A"," United Kingdom","2.451","2.451","2.451","2.721","4.188","4.19","4.192","4.197","4.196","1.424","1.11","1.415","1.423","1.425","1.425","1.432","1.455","1.488","1.475","1.477","1.485","1.629","1.59","1.486","1.499","1.501","1.515","1.522","1.626","1.638","1.637","1.673","1.693","1.709","1.73","1.777","1.836","1.873","1.878","1.878","1.879","1.88" +""," Eurasia","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" +"INTL.33-7-MDA-MK.A"," Moldova","--","--","--","--","--","--","--","--","--","--","--","--","0.064","0.064","0.064","0.056","0.056","0.064","0.064","0.064","0.064","0.06","0.06","0.06","0.059","0.059","0.056","0.056","0.064","0.064","0.064","0.064","0.064","0.064","0.064","0.064","0.064","0.064","0.064","0.064","0.076","0.076" +"INTL.33-7-UKR-MK.A"," Ukraine","--","--","--","--","--","--","--","--","--","--","--","--","4.705","4.706","4.706","4.706","4.706","4.706","4.706","4.7","4.7","4.731","4.758","4.766","4.781","4.717","4.746","4.731","4.798","4.795","4.596","4.607","4.608","4.632","4.665","4.697","4.658","4.668","4.668","4.668","4.666","4.43" diff --git a/data/eia_hydro_annual_generation.csv b/data/eia_hydro_annual_generation.csv index 859decf7..b228ddc6 100644 --- a/data/eia_hydro_annual_generation.csv +++ b/data/eia_hydro_annual_generation.csv @@ -1,53 +1,53 @@ -https://www.eia.gov/international/data/world/electricity/electricity-generation?pd=2&p=000000000000000000000000000000g&u=1&f=A&v=mapbubble&a=-&i=none&vo=value&t=R&g=000000000000002&l=73-1028i008017kg6368g80a4k000e0ag00gg0004g8g0ho00g000400008&l=72-00000000000000000000000000080000000000000000000g&s=315532800000&e=1609459200000&ev=false&,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -Report generated on: 01-06-2023 21:17:46,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -API,,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021 -,hydroelectricity net generation (billion kWh),,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -INTL.33-12-EURO-BKWH.A, Europe,"458,018","464,155","459,881","473,685","481,241","476,739","459,535","491,085","534,517","465,365","474,466","475,47","509,041","526,448","531,815","543,743","529,114164","543,845616","562,491501","566,861453","588,644662","584,806195","539,051405","503,7067","542,112443","542,974669","535,006084","538,449707","565,143111","561,761402","617,547148","540,926277","598,055253","629,44709","617,111295","613,079848","627,720566217","560,362524","616,5081462","606,5997419","644,1106599","628,1390143" -INTL.33-12-ALB-BKWH.A, Albania,"2,919","3,018","3,093","3,167","3,241","3,315","3,365","3,979","3,713","3,846","2,82","3,483","3,187","3,281","3,733","4,162","5,669","4,978","4,872","5,231","4,548","3,519","3,477","5,117","5,411","5,319","4,951","2,76","3,759","5,201","7,49133","4,09068","4,67775","6,88941","4,67676","5,83605","7,70418","4,47975","8,46648","5,15394","5,281","8,891943" -INTL.33-12-AUT-BKWH.A, Austria,"28,501","30,008","29,893","29,577","28,384","30,288","30,496","25,401","35,151","34,641","31,179","31,112","34,483","36,336","35,349","36,696","33,874","35,744","36,792","40,292","41,418","40,05","39,825","32,883","36,394","36,31","35,48","36,732","37,969","40,487","36,466","32,511","41,862","40,138","39,001","35,255","37,954","36,462","35,73","40,43655","41,9356096","38,75133" -INTL.33-12-BEL-BKWH.A, Belgium,"0,274","0,377","0,325","0,331","0,348","0,282","0,339","0,425","0,354","0,3","0,263","0,226","0,338","0,252","0,342","0,335","0,237","0,30195","0,38511","0,338","0,455","0,437","0,356","0,245","0,314","0,285","0,355","0,385","0,406","0,325","0,298","0,193","0,353","0,376","0,289","0,314","0,367","0,268","0,3135","0,302","0,2669","0,3933" -INTL.33-12-BIH-BKWH.A, Bosnia and Herzegovina,--,--,--,--,--,--,--,--,--,--,--,--,"3,374","2,343","3,424","3,607","5,104","4,608","4,511","5,477","5,043","5,129","5,215","4,456","5,919","5,938","5,798","3,961","4,818","6,177","7,946","4,343","4,173","7,164","5,876","5,495","5,585","3,7521","6,35382","6,02019","4,58","6,722" -INTL.33-12-BGR-BKWH.A, Bulgaria,"3,674","3,58","3,018","3,318","3,226","2,214","2,302","2,512","2,569","2,662","1,859","2,417","2,042","1,923","1,453","2,291","2,89","2,726","3,066","2,725","2,646","1,72","2,172","2,999","3,136","4,294","4,196","2,845","2,796","3,435","4,98168","2,84328","3,14622","3,99564","4,55598","5,59845","3,8412","2,79972","5,09553","2,929499","2,820398","4,819205" -INTL.33-12-HRV-BKWH.A, Croatia,--,--,--,--,--,--,--,--,--,--,--,--,"4,298","4,302","4,881","5,212","7,156","5,234","5,403","6,524","5,794","6,482","5,311","4,827","6,888","6,27","5,94","4,194","5,164","6,663","9,035","4,983","4,789","8,536","8,917","6,327","6,784","5,255","7,62399","5,87268","5,6624","7,1277" -INTL.33-12-CYP-BKWH.A, Cyprus,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -INTL.33-12-CZE-BKWH.A, Czechia,--,--,--,--,--,--,--,--,--,--,--,--,--,"1,355","1,445","1,982","1,949","1,68201","1,382","1,664","1,7404","2,033","2,467","1,369","1,999","2,356","2,525","2,068","2,004","2,405","2,775","1,95","2,107","2,704","1,909","1,779","1,983","1,852","1,615","1,98792","2,143884","2,40852" -INTL.33-12-DNK-BKWH.A, Denmark,"0,03","0,031","0,028","0,036","0,028","0,027","0,029","0,029","0,032","0,027","0,027","0,026","0,028","0,027","0,033","0,03","0,019","0,019","0,02673","0,031","0,03","0,028","0,032","0,021","0,027","0,023","0,023","0,028","0,026","0,019","0,021","0,017","0,017","0,013","0,015","0,01803","0,01927","0,017871","0,0148621","0,0172171","0,017064","0,016295" -INTL.33-12-EST-BKWH.A, Estonia,--,--,--,--,--,--,--,--,--,--,--,--,"0,001","0,001","0,003","0,002","0,002","0,003","0,004","0,004","0,005","0,007","0,006","0,013","0,022","0,022","0,014","0,021","0,028","0,032","0,027","0,029999","0,042","0,026","0,027","0,027","0,035","0,025999","0,0150003","0,0189999","0,03","0,0248" -INTL.33-12-FRO-BKWH.A, Faroe Islands,"0,049","0,049","0,049","0,049","0,049","0,049","0,049","0,049","0,062","0,071","0,074","0,074","0,083","0,073","0,075","0,075","0,069564","0,075066","0,076501","0,069453","0,075262","0,075195","0,095535","0,08483","0,093443","0,097986","0,099934","0,103407","0,094921","0,091482","0,06676","0,092","0,099","0,091","0,121","0,132","0,105","0,11","0,107","0,102","0,11","0,11" -INTL.33-12-FIN-BKWH.A, Finland,"10,115","13,518","12,958","13,445","13,115","12,211","12,266","13,658","13,229","12,9","10,75","13,065","14,956","13,341","11,669","12,796","11,742","12,11958","14,9","12,652","14,513","13,073","10,668","9,495","14,919","13,646","11,379","14,035","16,941","12,559","12,743","12,278001","16,666998","12,672","13,240001","16,583999","15,634127","14,609473","13,1369998","12,2454823","15,883","15,766" -INTL.33-12-CSK-BKWH.A, Former Czechoslovakia,"4,8","4,2","3,7","3,9","3,2","4,3",4,"4,853","4,355","4,229","3,919","3,119","3,602",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- -INTL.33-12-SCG-BKWH.A, Former Serbia and Montenegro,--,--,--,--,--,--,--,--,--,--,--,--,"11,23","10,395","11,016","12,071","14,266","12,636","12,763","13,243","11,88","12,326","11,633","9,752","11,01","11,912",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- -INTL.33-12-YUG-BKWH.A, Former Yugoslavia,"27,868","25,044","23,295","21,623","25,645","24,363","27,474","25,98","25,612","23,256","19,601","18,929",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- -INTL.33-12-FRA-BKWH.A, France,"68,253","70,358","68,6","67,515","64,01","60,248","60,953","68,623","73,952","45,744","52,796","56,277","68,313","64,3","78,057","72,196","64,43","63,151","61,479","71,832","66,466","73,888","59,992","58,567","59,276","50,965","55,741","57,029","63,017","56,428","61,945","45,184","59,099","71,042","62,993","54,876","60,094","49,389","64,485","56,913891","62,06191","58,856657" -INTL.33-12-DEU-BKWH.A, Germany,--,--,--,--,--,--,--,--,--,--,--,"14,742","17,223","17,699","19,731","21,562","21,737","17,18343","17,044","19,451","21,515","22,506","22,893","19,071","20,866","19,442","19,808","20,957","20,239","18,841","20,678","17,323","21,331","22,66","19,31","18,664","20,214","19,985","17,694","19,731","18,322","19,252" -INTL.33-12-DDR-BKWH.A," Germany, East","1,658","1,718","1,748","1,683","1,748","1,758","1,767","1,726","1,719","1,551","1,389",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- -INTL.33-12-DEUW-BKWH.A," Germany, West","17,125","17,889","17,694","16,713","16,434","15,354","16,526","18,36","18,128","16,482","15,769",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- -INTL.33-12-GIB-BKWH.A, Gibraltar,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -INTL.33-12-GRC-BKWH.A, Greece,"3,396","3,398","3,551","2,331","2,852","2,792","3,222","2,768","2,354","1,888","1,751","3,068","2,181","2,26","2,573","3,494","4,305","3,84318","3,68","4,546","3,656","2,076","2,772","4,718","4,625","4,967","5,806","2,565","3,279","5,32","7,431","3,998","4,387","6,337","4,464","5,782","5,543","3,962","5,035","3,9798","3,343687","5,909225" -INTL.33-12-HUN-BKWH.A, Hungary,"0,111","0,166","0,158","0,153","0,179","0,153","0,152","0,167","0,167","0,156","0,176","0,192","0,156","0,164","0,159","0,161","0,205","0,21384","0,15345","0,179","0,176","0,184","0,192","0,169","0,203","0,2","0,184","0,208","0,211","0,226","0,184","0,215999","0,205999","0,207999","0,294001","0,226719","0,253308","0,213999","0,216","0,2129999","0,238","0,202379" -INTL.33-12-ISL-BKWH.A, Iceland,"3,053","3,085","3,407","3,588","3,738","3,667","3,846","3,918","4,169","4,217","4,162","4,162","4,267","4,421","4,47","4,635","4,724","5,15493","5,565","5,987","6,292","6,512","6,907","7,017","7,063","6,949","7,22","8,31","12,303","12,156","12,509999","12,381999","12,213999","12,747001","12,554","13,541","13,091609","13,891929","13,679377","13,32911","12,9196201","13,5746171" -INTL.33-12-IRL-BKWH.A, Ireland,"0,833","0,855","0,792","0,776","0,68","0,824","0,91","0,673","0,862","0,684","0,69","0,738","0,809","0,757","0,911","0,706","0,715","0,67122","0,907","0,838","0,838","0,59","0,903","0,592","0,624","0,625","0,717","0,66","0,959","0,893","0,593","0,699","0,795","0,593","0,701","0,798","0,674","0,685","0,687","0,87813","0,932656","0,750122" -INTL.33-12-ITA-BKWH.A, Italy,"44,997","42,782","41,216","40,96","41,923","40,616","40,626","39,05","40,205","33,647","31,31","41,817","41,778","41,011","44,212","37,404","41,617","41,18697","40,808","44,911","43,763","46,343","39,125","33,303","41,915","35,706","36,624","32,488","41,207","48,647","50,506","45,36477","41,45625","52,24626","57,95955","45,08163","42,00768","35,83701","48,29913","45,31824","47,551784","44,739" -INTL.33-12-XKS-BKWH.A, Kosovo,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,"0,075","0,119","0,154","0,104","0,095","0,142","0,149","0,139","0,243","0,177","0,27027","0,2079","0,262826","0,300635" -INTL.33-12-LVA-BKWH.A, Latvia,--,--,--,--,--,--,--,--,--,--,--,--,"2,498","2,846","3,272","2,908","1,841","2,922","2,99","2,729","2,791","2,805","2,438","2,243","3,078","3,293","2,671","2,706","3,078","3,422","3,487998","2,8568","3,677","2,838","1,953","1,841","2,522819","4,355513","2,4170639","2,0958919","2,5840101","2,6889293" -INTL.33-12-LTU-BKWH.A, Lithuania,--,--,--,--,--,--,--,--,--,--,--,--,"0,308","0,389","0,447","0,369","0,323","0,291","0,413","0,409","0,336","0,322","0,35","0,323","0,417","0,446193","0,393","0,417","0,398","0,42","0,535","0,475","0,419","0,516","0,395","0,346","0,45","0,597","0,427","0,34254","0,3006","0,3837" -INTL.33-12-LUX-BKWH.A, Luxembourg,"0,086","0,095","0,084","0,083","0,088","0,071","0,084","0,101","0,097","0,072","0,07","0,083","0,069","0,066","0,117","0,087","0,059","0,082","0,114","0,084","0,119","0,117","0,098","0,078","0,103","0,093","0,11","0,116","0,131","0,105","0,104","0,061","0,095","0,114","0,104","0,095","0,111","0,082","0,089","0,10593","0,091602","0,1068" -INTL.33-12-MLT-BKWH.A, Malta,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -INTL.33-12-MNE-BKWH.A, Montenegro,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,"1,733","1,271","1,524","2,05","2,723","1,192","1,462","2,479","1,734","1,476","1,825","1,014","1,693443","1,262781","0,867637","1,212652" -INTL.33-12-NLD-BKWH.A, Netherlands,0,0,0,0,0,"0,003","0,003","0,001","0,002","0,037","0,119","0,079","0,119","0,091","0,1","0,087","0,079","0,09108","0,111","0,089","0,141","0,116","0,109","0,071","0,094","0,087","0,105","0,106","0,101","0,097","0,105","0,057","0,104389","0,11431","0,112202","0,0927","0,100078","0,060759","0,0723481","0,074182","0,0462851","0,0838927" -INTL.33-12-MKD-BKWH.A, North Macedonia,--,--,--,--,--,--,--,--,--,--,--,--,"0,817","0,517","0,696","0,793","0,842","0,891","1,072","1,375","1,158","0,62","0,749","1,36","1,467","1,477","1,634",1,"0,832","1,257","2,407","1,419","1,031","1,568","1,195","1,846","1,878","1,099","1,773","1,15236","1,277144","1,451623" -INTL.33-12-NOR-BKWH.A, Norway,"82,717","91,876","91,507","104,704","104,895","101,464","95,321","102,341","107,919","117,369","119,933","109,032","115,505","118,024","110,398","120,315","102,823","108,677","114,546","120,237","140,4","119,258","128,078","104,425","107,693","134,331","118,175","132,319","137,654","124,03","116,257","119,78","141,189","127,551","134,844","136,662","142,244","141,651","138,202","123,66288","141,69",144 -INTL.33-12-POL-BKWH.A, Poland,"2,326","2,116","1,528","1,658","1,394","1,833","1,534","1,644","1,775","1,593","1,403","1,411","1,492","1,473","1,716","1,868","1,912","1,941","2,286","2,133","2,085","2,302","2,256","1,654","2,06","2,179","2,022","2,328","2,13","2,351","2,9","2,313","2,02","2,421","2,165","1,814","2,117","2,552","1,949","1,93842","2,118337","2,339192" -INTL.33-12-PRT-BKWH.A, Portugal,"7,873","4,934","6,82","7,897","9,609","10,512","8,364","9,005","12,037","5,72","9,065","8,952","4,599","8,453","10,551","8,26","14,613","12,97395","12,853","7,213","11,21","13,894","7,722","15,566","9,77","4,684","10,892","9,991","6,73","8,201","15,954","11,423","5,589","13,652","15,471","8,615","15,608","5,79","12,316","8,6526","12,082581","11,846464" -INTL.33-12-ROU-BKWH.A, Romania,"12,506","12,605","11,731","9,934","11,208","11,772","10,688","11,084","13,479","12,497","10,87","14,107","11,583","12,64","12,916","16,526","15,597","17,334","18,69","18,107","14,63","14,774","15,886","13,126","16,348","20,005","18,172","15,806","17,023","15,379","19,684","14,581","11,945","14,807","18,618","16,467","17,848","14,349","17,48736","15,580622","15,381243","17,376933" -INTL.33-12-SRB-BKWH.A, Serbia,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,"10,855","9,937","9,468","10,436","11,772","8,58","9,193","10,101","10,893","9,979","10,684","9,061","10,53261","9,457175","9,034496","11,284232" -INTL.33-12-SVK-BKWH.A, Slovakia,--,--,--,--,--,--,--,--,--,--,--,--,--,"3,432","4,311","4,831","4,185","4,023","4,224","4,429","4,569","4,878","5,215","3,4452","4,059","4,592","4,355","4,406",4,"4,324","5,184","3,211","3,687","4,329","3,762","3,701","4,302","4,321","3,506","4,27383","4,517","4,17" -INTL.33-12-SVN-BKWH.A, Slovenia,--,--,--,--,--,--,--,--,--,--,--,--,"3,379","2,974","3,348","3,187","3,616","3,046","3,4","3,684","3,771","3,741","3,265","2,916","4,033","3,426","3,555","3,233","3,978","4,666","4,452","3,506","3,841","4,562","6,011","3,75","4,443","3,814","4,643","4,43421","4,93406","4,711944" -INTL.33-12-ESP-BKWH.A, Spain,"29,16","21,64","25,99","26,696","31,088","30,895","26,105","27,016","34,76","19,046","25,16","27,01","18,731","24,133","27,898","22,881","39,404","34,43","33,665","22,634","29,274","40,617","22,691","40,643","31,359","18,209","25,699","27,036","23,13","26,147","41,576","30,07","20,192","36,45","38,815","27,656","35,77","18,007","33,743","24,23025","30,507","29,626" -INTL.33-12-SWE-BKWH.A, Sweden,"58,133","59,006","54,369","62,801","67,106","70,095","60,134","70,95","69,016","70,911","71,778","62,603","73,588","73,905","58,508","67,421","51,2226","68,365","74,25","70,974","77,798","78,269","65,696","53,005","59,522","72,075","61,106","65,497","68,378","65,193","66,279","66,047","78,333","60,81","63,227","74,734","61,645","64,651","61,79","64,46583","71,6","71,086" -INTL.33-12-CHE-BKWH.A, Switzerland,"32,481","35,13","35,974","35,069","29,871","31,731","32,576","34,328","35,437","29,477","29,497","31,756","32,373","35,416","38,678","34,817","28,458","33,70257","33,136","37,104","33,854","38,29","32,323","31,948","30,938","28,664","28,273","32,362","33,214","32,833","33,261","29,906","35,783","35,628","35,122","35,378","31,984","31,47968","32,095881","35,156989","37,867647","36,964485" -INTL.33-12-TUR-BKWH.A, Turkey,"11,159","12,308","13,81","11,13","13,19","11,822","11,637","18,314","28,447","17,61","22,917","22,456","26,302","33,611","30,28","35,186","40,07","39,41784","41,80671","34,33","30,57","23,77","33,346","34,977","45,623","39,165","43,802","35,492","32,937","35,598","51,423001","51,154999","56,668998","58,225","39,750001","65,856","66,685883","57,823851","59,490211","88,2094218","78,094369","55,1755392" -INTL.33-12-GBR-BKWH.A, United Kingdom,"3,921","4,369","4,543","4,548","3,992","4,08","4,767","4,13","4,915","4,732","5,119","4,534","5,329","4,237","5,043","4,79","3,359","4,127","5,117","5,336","5,085","4,055","4,78787","3,22767","4,844","4,92149","4,59315","5,0773","5,14119","5,22792","3,59138","5,69175","5,30965","4,70147","5,8878","6,29727","5,370412217","5,88187","5,44327","5,84628","6,75391","5,0149" -, Eurasia,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -INTL.33-12-MDA-BKWH.A, Moldova,--,--,--,--,--,--,--,--,--,--,--,--,"0,255","0,371","0,275","0,321","0,362","0,378","0,387","0,363","0,392","0,359","0,348","0,358","0,35","0,359","0,365","0,354","0,385","0,354","0,403","0,348","0,266","0,311","0,317","0,265","0,228","0,282","0,27324","0,29799","0,276","0,316" -INTL.33-12-UKR-BKWH.A, Ukraine,--,--,--,--,--,--,--,--,--,--,--,--,"7,725","10,929","11,997","9,853","8,546","9,757","15,756","14,177","11,161","11,912","9,531","9,146","11,635","12,239","12,757","10,042","11,397","11,817","13,02","10,837","10,374","13,663","8,393","5,343","7,594","8,856","10,32372","6,5083","7,5638","10,3326" +# https://www.eia.gov/international/data/world/electricity/electricity-generation?pd=2&p=000000000000000000000000000000g&u=1&f=A&v=mapbubble&a=-&i=none&vo=value&t=R&g=000000000000002&l=73-1028i008017kg6368g80a4k000e0ag00gg0004g8g0ho00g000400008&l=72-00000000000000000000000000080000000000000000000g&s=315532800000&e=1609459200000&ev=false& +Report generated on: 03-14-2024 13:40:38 +"API","","1980","1981","1982","1983","1984","1985","1986","1987","1988","1989","1990","1991","1992","1993","1994","1995","1996","1997","1998","1999","2000","2001","2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014","2015","2016","2017","2018","2019","2020","2021" +"","hydroelectricity net generation (billion kWh)","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" +"INTL.33-12-EURO-BKWH.A"," Europe","458.018","464.155","459.881","473.685","481.241","476.739","459.535","491.085","534.517","465.365","474.466","475.47","509.041","526.448","531.815","543.743","529.114164","543.845616","562.491501","566.861453","588.644662","584.806195","539.051405","503.7067","542.112443","542.974669","535.006084","538.449707","565.143111","561.761402","617.547148","540.926277","598.055253","629.44709","617.111295","613.079848","627.720566217","560.362524","616.5081462","606.5997419","644.1106599","628.1390143" +"INTL.33-12-ALB-BKWH.A"," Albania","2.919","3.018","3.093","3.167","3.241","3.315","3.365","3.979","3.713","3.846","2.82","3.483","3.187","3.281","3.733","4.162","5.669","4.978","4.872","5.231","4.548","3.519","3.477","5.117","5.411","5.319","4.951","2.76","3.759","5.201","7.49133","4.09068","4.67775","6.88941","4.67676","5.83605","7.70418","4.47975","8.46648","5.15394","5.281","8.891943" +"INTL.33-12-AUT-BKWH.A"," Austria","28.501","30.008","29.893","29.577","28.384","30.288","30.496","25.401","35.151","34.641","31.179","31.112","34.483","36.336","35.349","36.696","33.874","35.744","36.792","40.292","41.418","40.05","39.825","32.883","36.394","36.31","35.48","36.732","37.969","40.487","36.466","32.511","41.862","40.138","39.001","35.255","37.954","36.462","35.73","40.43655","41.9356096","38.75133" +"INTL.33-12-BEL-BKWH.A"," Belgium","0.274","0.377","0.325","0.331","0.348","0.282","0.339","0.425","0.354","0.3","0.263","0.226","0.338","0.252","0.342","0.335","0.237","0.30195","0.38511","0.338","0.455","0.437","0.356","0.245","0.314","0.285","0.355","0.385","0.406","0.325","0.298","0.193","0.353","0.376","0.289","0.314","0.367","0.268","0.3135","0.302","0.2669","0.3933" +"INTL.33-12-BIH-BKWH.A"," Bosnia and Herzegovina","--","--","--","--","--","--","--","--","--","--","--","--","3.374","2.343","3.424","3.607","5.104","4.608","4.511","5.477","5.043","5.129","5.215","4.456","5.919","5.938","5.798","3.961","4.818","6.177","7.946","4.343","4.173","7.164","5.876","5.495","5.585","3.7521","6.35382","6.02019","4.58","6.722" +"INTL.33-12-BGR-BKWH.A"," Bulgaria","3.674","3.58","3.018","3.318","3.226","2.214","2.302","2.512","2.569","2.662","1.859","2.417","2.042","1.923","1.453","2.291","2.89","2.726","3.066","2.725","2.646","1.72","2.172","2.999","3.136","4.294","4.196","2.845","2.796","3.435","4.98168","2.84328","3.14622","3.99564","4.55598","5.59845","3.8412","2.79972","5.09553","2.929499","2.820398","4.819205" +"INTL.33-12-HRV-BKWH.A"," Croatia","--","--","--","--","--","--","--","--","--","--","--","--","4.298","4.302","4.881","5.212","7.156","5.234","5.403","6.524","5.794","6.482","5.311","4.827","6.888","6.27","5.94","4.194","5.164","6.663","9.035","4.983","4.789","8.536","8.917","6.327","6.784","5.255","7.62399","5.87268","5.6624","7.1277" +"INTL.33-12-CYP-BKWH.A"," Cyprus","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-12-CZE-BKWH.A"," Czechia","--","--","--","--","--","--","--","--","--","--","--","--","--","1.355","1.445","1.982","1.949","1.68201","1.382","1.664","1.7404","2.033","2.467","1.369","1.999","2.356","2.525","2.068","2.004","2.405","2.775","1.95","2.107","2.704","1.909","1.779","1.983","1.852","1.615","1.98792","2.143884","2.40852" +"INTL.33-12-DNK-BKWH.A"," Denmark","0.03","0.031","0.028","0.036","0.028","0.027","0.029","0.029","0.032","0.027","0.027","0.026","0.028","0.027","0.033","0.03","0.019","0.019","0.02673","0.031","0.03","0.028","0.032","0.021","0.027","0.023","0.023","0.028","0.026","0.019","0.021","0.017","0.017","0.013","0.015","0.01803","0.01927","0.017871","0.0148621","0.0172171","0.017064","0.016295" +"INTL.33-12-EST-BKWH.A"," Estonia","--","--","--","--","--","--","--","--","--","--","--","--","0.001","0.001","0.003","0.002","0.002","0.003","0.004","0.004","0.005","0.007","0.006","0.013","0.022","0.022","0.014","0.021","0.028","0.032","0.027","0.029999","0.042","0.026","0.027","0.027","0.035","0.025999","0.0150003","0.0189999","0.03","0.0248" +"INTL.33-12-FRO-BKWH.A"," Faroe Islands","0.049","0.049","0.049","0.049","0.049","0.049","0.049","0.049","0.062","0.071","0.074","0.074","0.083","0.073","0.075","0.075","0.069564","0.075066","0.076501","0.069453","0.075262","0.075195","0.095535","0.08483","0.093443","0.097986","0.099934","0.103407","0.094921","0.091482","0.06676","0.092","0.099","0.091","0.121","0.132","0.105","0.11","0.107","0.102","0.11","0.11" +"INTL.33-12-FIN-BKWH.A"," Finland","10.115","13.518","12.958","13.445","13.115","12.211","12.266","13.658","13.229","12.9","10.75","13.065","14.956","13.341","11.669","12.796","11.742","12.11958","14.9","12.652","14.513","13.073","10.668","9.495","14.919","13.646","11.379","14.035","16.941","12.559","12.743","12.278001","16.666998","12.672","13.240001","16.583999","15.634127","14.609473","13.1369998","12.2454823","15.883","15.766" +"INTL.33-12-CSK-BKWH.A"," Former Czechoslovakia","4.8","4.2","3.7","3.9","3.2","4.3","4","4.853","4.355","4.229","3.919","3.119","3.602","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-SCG-BKWH.A"," Former Serbia and Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","11.23","10.395","11.016","12.071","14.266","12.636","12.763","13.243","11.88","12.326","11.633","9.752","11.01","11.912","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-YUG-BKWH.A"," Former Yugoslavia","27.868","25.044","23.295","21.623","25.645","24.363","27.474","25.98","25.612","23.256","19.601","18.929","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-FRA-BKWH.A"," France","68.253","70.358","68.6","67.515","64.01","60.248","60.953","68.623","73.952","45.744","52.796","56.277","68.313","64.3","78.057","72.196","64.43","63.151","61.479","71.832","66.466","73.888","59.992","58.567","59.276","50.965","55.741","57.029","63.017","56.428","61.945","45.184","59.099","71.042","62.993","54.876","60.094","49.389","64.485","56.913891","62.06191","58.856657" +"INTL.33-12-DEU-BKWH.A"," Germany","--","--","--","--","--","--","--","--","--","--","--","14.742","17.223","17.699","19.731","21.562","21.737","17.18343","17.044","19.451","21.515","22.506","22.893","19.071","20.866","19.442","19.808","20.957","20.239","18.841","20.678","17.323","21.331","22.66","19.31","18.664","20.214","19.985","17.694","19.731","18.322","19.252" +"INTL.33-12-DDR-BKWH.A"," Germany, East","1.658","1.718","1.748","1.683","1.748","1.758","1.767","1.726","1.719","1.551","1.389","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-DEUW-BKWH.A"," Germany, West","17.125","17.889","17.694","16.713","16.434","15.354","16.526","18.36","18.128","16.482","15.769","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" +"INTL.33-12-GIB-BKWH.A"," Gibraltar","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-12-GRC-BKWH.A"," Greece","3.396","3.398","3.551","2.331","2.852","2.792","3.222","2.768","2.354","1.888","1.751","3.068","2.181","2.26","2.573","3.494","4.305","3.84318","3.68","4.546","3.656","2.076","2.772","4.718","4.625","4.967","5.806","2.565","3.279","5.32","7.431","3.998","4.387","6.337","4.464","5.782","5.543","3.962","5.035","3.9798","3.343687","5.909225" +"INTL.33-12-HUN-BKWH.A"," Hungary","0.111","0.166","0.158","0.153","0.179","0.153","0.152","0.167","0.167","0.156","0.176","0.192","0.156","0.164","0.159","0.161","0.205","0.21384","0.15345","0.179","0.176","0.184","0.192","0.169","0.203","0.2","0.184","0.208","0.211","0.226","0.184","0.215999","0.205999","0.207999","0.294001","0.226719","0.253308","0.213999","0.216","0.2129999","0.238","0.202379" +"INTL.33-12-ISL-BKWH.A"," Iceland","3.053","3.085","3.407","3.588","3.738","3.667","3.846","3.918","4.169","4.217","4.162","4.162","4.267","4.421","4.47","4.635","4.724","5.15493","5.565","5.987","6.292","6.512","6.907","7.017","7.063","6.949","7.22","8.31","12.303","12.156","12.509999","12.381999","12.213999","12.747001","12.554","13.541","13.091609","13.891929","13.679377","13.32911","12.9196201","13.5746171" +"INTL.33-12-IRL-BKWH.A"," Ireland","0.833","0.855","0.792","0.776","0.68","0.824","0.91","0.673","0.862","0.684","0.69","0.738","0.809","0.757","0.911","0.706","0.715","0.67122","0.907","0.838","0.838","0.59","0.903","0.592","0.624","0.625","0.717","0.66","0.959","0.893","0.593","0.699","0.795","0.593","0.701","0.798","0.674","0.685","0.687","0.87813","0.932656","0.750122" +"INTL.33-12-ITA-BKWH.A"," Italy","44.997","42.782","41.216","40.96","41.923","40.616","40.626","39.05","40.205","33.647","31.31","41.817","41.778","41.011","44.212","37.404","41.617","41.18697","40.808","44.911","43.763","46.343","39.125","33.303","41.915","35.706","36.624","32.488","41.207","48.647","50.506","45.36477","41.45625","52.24626","57.95955","45.08163","42.00768","35.83701","48.29913","45.31824","47.551784","44.739" +"INTL.33-12-XKS-BKWH.A"," Kosovo","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","0.075","0.119","0.154","0.104","0.095","0.142","0.149","0.139","0.243","0.177","0.27027","0.2079","0.262826","0.300635" +"INTL.33-12-LVA-BKWH.A"," Latvia","--","--","--","--","--","--","--","--","--","--","--","--","2.498","2.846","3.272","2.908","1.841","2.922","2.99","2.729","2.791","2.805","2.438","2.243","3.078","3.293","2.671","2.706","3.078","3.422","3.487998","2.8568","3.677","2.838","1.953","1.841","2.522819","4.355513","2.4170639","2.0958919","2.5840101","2.6889293" +"INTL.33-12-LTU-BKWH.A"," Lithuania","--","--","--","--","--","--","--","--","--","--","--","--","0.308","0.389","0.447","0.369","0.323","0.291","0.413","0.409","0.336","0.322","0.35","0.323","0.417","0.446193","0.393","0.417","0.398","0.42","0.535","0.475","0.419","0.516","0.395","0.346","0.45","0.597","0.427","0.34254","0.3006","0.3837" +"INTL.33-12-LUX-BKWH.A"," Luxembourg","0.086","0.095","0.084","0.083","0.088","0.071","0.084","0.101","0.097","0.072","0.07","0.083","0.069","0.066","0.117","0.087","0.059","0.082","0.114","0.084","0.119","0.117","0.098","0.078","0.103","0.093","0.11","0.116","0.131","0.105","0.104","0.061","0.095","0.114","0.104","0.095","0.111","0.082","0.089","0.10593","0.091602","0.1068" +"INTL.33-12-MLT-BKWH.A"," Malta","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" +"INTL.33-12-MNE-BKWH.A"," Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","1.733","1.271","1.524","2.05","2.723","1.192","1.462","2.479","1.734","1.476","1.825","1.014","1.693443","1.262781","0.867637","1.212652" +"INTL.33-12-NLD-BKWH.A"," Netherlands","0","0","0","0","0","0.003","0.003","0.001","0.002","0.037","0.119","0.079","0.119","0.091","0.1","0.087","0.079","0.09108","0.111","0.089","0.141","0.116","0.109","0.071","0.094","0.087","0.105","0.106","0.101","0.097","0.105","0.057","0.104389","0.11431","0.112202","0.0927","0.100078","0.060759","0.0723481","0.074182","0.0462851","0.0838927" +"INTL.33-12-MKD-BKWH.A"," North Macedonia","--","--","--","--","--","--","--","--","--","--","--","--","0.817","0.517","0.696","0.793","0.842","0.891","1.072","1.375","1.158","0.62","0.749","1.36","1.467","1.477","1.634","1","0.832","1.257","2.407","1.419","1.031","1.568","1.195","1.846","1.878","1.099","1.773","1.15236","1.277144","1.451623" +"INTL.33-12-NOR-BKWH.A"," Norway","82.717","91.876","91.507","104.704","104.895","101.464","95.321","102.341","107.919","117.369","119.933","109.032","115.505","118.024","110.398","120.315","102.823","108.677","114.546","120.237","140.4","119.258","128.078","104.425","107.693","134.331","118.175","132.319","137.654","124.03","116.257","119.78","141.189","127.551","134.844","136.662","142.244","141.651","138.202","123.66288","141.69","144" +"INTL.33-12-POL-BKWH.A"," Poland","2.326","2.116","1.528","1.658","1.394","1.833","1.534","1.644","1.775","1.593","1.403","1.411","1.492","1.473","1.716","1.868","1.912","1.941","2.286","2.133","2.085","2.302","2.256","1.654","2.06","2.179","2.022","2.328","2.13","2.351","2.9","2.313","2.02","2.421","2.165","1.814","2.117","2.552","1.949","1.93842","2.118337","2.339192" +"INTL.33-12-PRT-BKWH.A"," Portugal","7.873","4.934","6.82","7.897","9.609","10.512","8.364","9.005","12.037","5.72","9.065","8.952","4.599","8.453","10.551","8.26","14.613","12.97395","12.853","7.213","11.21","13.894","7.722","15.566","9.77","4.684","10.892","9.991","6.73","8.201","15.954","11.423","5.589","13.652","15.471","8.615","15.608","5.79","12.316","8.6526","12.082581","11.846464" +"INTL.33-12-ROU-BKWH.A"," Romania","12.506","12.605","11.731","9.934","11.208","11.772","10.688","11.084","13.479","12.497","10.87","14.107","11.583","12.64","12.916","16.526","15.597","17.334","18.69","18.107","14.63","14.774","15.886","13.126","16.348","20.005","18.172","15.806","17.023","15.379","19.684","14.581","11.945","14.807","18.618","16.467","17.848","14.349","17.48736","15.580622","15.381243","17.376933" +"INTL.33-12-SRB-BKWH.A"," Serbia","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","10.855","9.937","9.468","10.436","11.772","8.58","9.193","10.101","10.893","9.979","10.684","9.061","10.53261","9.457175","9.034496","11.284232" +"INTL.33-12-SVK-BKWH.A"," Slovakia","--","--","--","--","--","--","--","--","--","--","--","--","--","3.432","4.311","4.831","4.185","4.023","4.224","4.429","4.569","4.878","5.215","3.4452","4.059","4.592","4.355","4.406","4","4.324","5.184","3.211","3.687","4.329","3.762","3.701","4.302","4.321","3.506","4.27383","4.517","4.17" +"INTL.33-12-SVN-BKWH.A"," Slovenia","--","--","--","--","--","--","--","--","--","--","--","--","3.379","2.974","3.348","3.187","3.616","3.046","3.4","3.684","3.771","3.741","3.265","2.916","4.033","3.426","3.555","3.233","3.978","4.666","4.452","3.506","3.841","4.562","6.011","3.75","4.443","3.814","4.643","4.43421","4.93406","4.711944" +"INTL.33-12-ESP-BKWH.A"," Spain","29.16","21.64","25.99","26.696","31.088","30.895","26.105","27.016","34.76","19.046","25.16","27.01","18.731","24.133","27.898","22.881","39.404","34.43","33.665","22.634","29.274","40.617","22.691","40.643","31.359","18.209","25.699","27.036","23.13","26.147","41.576","30.07","20.192","36.45","38.815","27.656","35.77","18.007","33.743","24.23025","30.507","29.626" +"INTL.33-12-SWE-BKWH.A"," Sweden","58.133","59.006","54.369","62.801","67.106","70.095","60.134","70.95","69.016","70.911","71.778","62.603","73.588","73.905","58.508","67.421","51.2226","68.365","74.25","70.974","77.798","78.269","65.696","53.005","59.522","72.075","61.106","65.497","68.378","65.193","66.279","66.047","78.333","60.81","63.227","74.734","61.645","64.651","61.79","64.46583","71.6","71.086" +"INTL.33-12-CHE-BKWH.A"," Switzerland","32.481","35.13","35.974","35.069","29.871","31.731","32.576","34.328","35.437","29.477","29.497","31.756","32.373","35.416","38.678","34.817","28.458","33.70257","33.136","37.104","33.854","38.29","32.323","31.948","30.938","28.664","28.273","32.362","33.214","32.833","33.261","29.906","35.783","35.628","35.122","35.378","31.984","31.47968","32.095881","35.156989","37.867647","36.964485" +"INTL.33-12-TUR-BKWH.A"," Turkiye","11.159","12.308","13.81","11.13","13.19","11.822","11.637","18.314","28.447","17.61","22.917","22.456","26.302","33.611","30.28","35.186","40.07","39.41784","41.80671","34.33","30.57","23.77","33.346","34.977","45.623","39.165","43.802","35.492","32.937","35.598","51.423001","51.154999","56.668998","58.225","39.750001","65.856","66.685883","57.823851","59.490211","88.2094218","78.094369","55.1755392" +"INTL.33-12-GBR-BKWH.A"," United Kingdom","3.921","4.369","4.543","4.548","3.992","4.08","4.767","4.13","4.915","4.732","5.119","4.534","5.329","4.237","5.043","4.79","3.359","4.127","5.117","5.336","5.085","4.055","4.78787","3.22767","4.844","4.92149","4.59315","5.0773","5.14119","5.22792","3.59138","5.69175","5.30965","4.70147","5.8878","6.29727","5.370412217","5.88187","5.44327","5.84628","6.75391","5.0149" +""," Eurasia","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" +"INTL.33-12-MDA-BKWH.A"," Moldova","--","--","--","--","--","--","--","--","--","--","--","--","0.255","0.371","0.275","0.321","0.362","0.378","0.387","0.363","0.392","0.359","0.348","0.358","0.35","0.359","0.365","0.354","0.385","0.354","0.403","0.348","0.266","0.311","0.317","0.265","0.228","0.282","0.27324","0.29799","0.276","0.316" +"INTL.33-12-UKR-BKWH.A"," Ukraine","--","--","--","--","--","--","--","--","--","--","--","--","7.725","10.929","11.997","9.853","8.546","9.757","15.756","14.177","11.161","11.912","9.531","9.146","11.635","12.239","12.757","10.042","11.397","11.817","13.02","10.837","10.374","13.663","8.393","5.343","7.594","8.856","10.32372","6.5083","7.5638","10.3326" diff --git a/data/era5-annual-HDD-per-country.csv b/data/era5-annual-HDD-per-country.csv new file mode 100644 index 00000000..5f6ad45d --- /dev/null +++ b/data/era5-annual-HDD-per-country.csv @@ -0,0 +1,34 @@ +name,1941,1942,1943,1944,1945,1946,1947,1948,1949,1950,1951,1952,1953,1954,1955,1956,1957,1958,1959,1960,1961,1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022,2023 +AL,73,70,62,70,74,64,59,65,62,57,56,58,65,67,55,70,58,59,61,54,54,62,62,62,68,59,60,57,57,58,61,57,64,60,61,61,54,63,57,65,63,58,61,57,57,58,61,62,56,53,67,62,62,53,61,61,60,60,56,58,57,53,61,58,65,64,55,55,54,53,58,58,50,46,54,50,54,47,49,50,54,74,47 +AT,448,429,391,433,413,406,403,399,376,383,374,416,379,422,419,449,387,393,378,389,367,437,425,406,424,376,385,400,409,413,394,407,408,379,384,400,380,414,405,428,397,384,377,411,420,406,409,378,369,372,415,373,383,334,380,416,371,373,368,343,371,337,370,370,384,362,333,340,351,388,337,353,358,302,324,336,347,315,320,322,358,456,307 +BE,107,109,89,104,96,100,101,90,92,101,95,105,94,102,109,113,91,97,88,89,84,111,114,100,100,91,94,101,102,101,96,102,101,90,97,97,92,103,108,105,99,94,96,99,112,107,108,85,84,83,101,92,94,82,89,110,90,89,84,79,89,78,89,90,87,86,76,87,87,107,75,90,99,69,81,88,82,81,80,71,89,107,74 +BG,358,373,318,329,339,331,304,319,305,299,283,294,334,367,295,363,306,283,320,268,277,303,324,313,323,259,297,292,320,286,300,306,323,303,296,314,287,310,293,326,301,312,295,306,326,313,333,320,276,272,332,303,318,266,307,335,325,303,273,275,284,272,322,277,298,296,259,266,266,273,313,295,258,250,261,265,281,261,232,250,277,372,228 +BA,192,189,158,181,181,174,168,168,167,155,144,164,167,183,158,194,157,157,152,143,141,173,180,176,176,155,162,157,165,165,163,154,170,156,153,168,145,170,153,176,163,154,157,159,167,160,166,158,146,139,174,152,162,143,157,171,156,158,147,132,145,130,156,146,164,148,134,132,139,147,144,147,133,113,137,134,144,128,125,127,141,179,116 +CH,245,231,213,239,230,222,215,217,210,212,214,225,210,226,221,240,211,215,203,214,200,234,231,214,229,213,217,219,225,227,217,221,224,213,216,220,208,219,220,229,216,208,208,227,224,217,217,200,202,200,217,204,208,183,206,217,195,205,203,192,200,189,199,204,209,193,187,194,193,213,179,196,204,178,181,189,191,174,184,176,197,245,171 +CZ,373,366,311,341,326,330,353,313,299,316,297,344,307,351,350,377,313,322,301,314,294,358,361,345,351,297,296,320,342,336,319,326,322,282,296,315,299,330,328,347,316,308,298,323,350,326,346,297,280,283,333,299,316,279,312,364,319,296,288,265,309,285,308,302,311,300,269,275,289,338,281,298,306,242,261,283,287,262,255,259,302,378,248 +DE,1563,1537,1250,1388,1316,1354,1464,1255,1205,1315,1254,1428,1237,1416,1484,1567,1282,1342,1229,1266,1183,1486,1528,1390,1399,1269,1211,1341,1447,1429,1300,1388,1356,1184,1258,1339,1221,1377,1433,1433,1348,1288,1273,1384,1492,1425,1491,1201,1156,1135,1367,1241,1312,1155,1295,1548,1288,1220,1172,1085,1250,1168,1269,1236,1239,1198,1072,1159,1202,1459,1112,1232,1305,992,1095,1188,1148,1092,1070,1019,1230,1509,1021 +DK,200,198,151,167,158,171,185,158,142,164,160,176,152,175,186,190,163,178,154,169,154,184,194,176,182,181,153,170,182,186,159,166,159,147,147,172,161,171,190,177,175,164,155,162,191,181,190,151,137,132,160,147,166,154,162,191,162,154,147,134,161,148,158,151,150,144,134,137,150,193,144,159,160,125,138,145,139,142,134,125,150,193,138 +ES,1045,986,927,1012,830,994,860,807,904,853,962,911,927,948,785,1092,934,848,821,890,742,952,981,967,949,878,890,839,933,899,971,961,943,931,928,928,797,906,887,911,793,787,807,920,860,891,781,783,684,767,911,852,889,724,627,759,605,735,796,750,757,677,746,813,856,714,776,785,736,856,673,810,830,649,684,722,688,768,708,672,728,933,655 +EE,373,352,266,284,307,298,320,274,250,285,297,310,283,297,321,336,278,310,281,305,253,298,323,292,306,324,283,310,333,317,285,285,289,256,245,327,302,324,299,313,289,278,260,271,328,294,332,281,232,240,257,257,285,288,265,306,281,278,262,235,273,271,278,270,270,260,250,230,268,315,253,286,259,254,225,261,255,254,235,209,270,365,246 +FI,3989,3729,3012,3160,3611,3348,3577,3151,3012,3213,3285,3442,3085,3179,3880,3819,3296,3626,3180,3386,3065,3513,3467,3329,3526,3873,3284,3693,3640,3494,3572,3178,3458,2963,3074,3691,3490,3743,3419,3607,3572,3331,3296,3192,3894,3516,3793,3402,2888,3075,3209,3163,3319,3341,3213,3343,3302,3468,3259,2915,3303,3307,3231,3159,2998,3101,3018,3009,3219,3604,2961,3351,2985,2983,2795,3030,3127,3061,3116,2726,3260,4325,3109 +FR,1671,1578,1350,1586,1458,1490,1421,1336,1381,1447,1430,1480,1422,1489,1396,1633,1338,1378,1240,1332,1196,1584,1630,1499,1471,1331,1408,1412,1473,1469,1475,1453,1545,1356,1462,1445,1312,1454,1502,1545,1396,1315,1424,1484,1599,1518,1520,1269,1237,1227,1501,1375,1389,1168,1280,1454,1222,1337,1263,1190,1299,1132,1294,1346,1376,1263,1199,1301,1277,1522,1060,1300,1408,1034,1143,1266,1236,1150,1165,1043,1279,1535,1075 +GB,946,965,835,869,773,860,910,819,781,890,906,934,809,901,925,935,813,890,787,849,825,1000,1046,912,970,917,839,874,894,869,805,880,862,842,838,851,863,873,972,880,896,823,824,838,923,944,903,801,764,746,859,824,857,793,794,886,738,752,745,771,816,726,751,743,748,734,706,790,777,930,707,832,841,683,759,769,720,764,750,725,764,985,690 +GR,218,222,205,217,223,201,166,200,192,177,166,162,210,211,160,215,184,169,196,164,172,182,184,192,199,162,185,176,171,164,184,185,195,186,188,190,160,186,172,191,184,192,190,179,174,180,194,193,176,165,206,197,195,161,179,191,192,180,167,177,174,164,191,172,184,189,161,158,155,143,188,179,148,136,165,150,164,139,146,148,156,230,138 +HR,160,166,131,147,151,147,145,138,127,131,116,140,140,155,137,166,131,132,122,119,114,149,158,152,144,124,133,129,143,138,137,131,144,124,124,139,121,140,130,149,133,132,130,133,148,141,144,128,118,114,143,123,138,115,126,145,132,129,124,106,120,109,135,127,140,123,109,110,116,130,123,123,118,92,114,116,119,111,102,105,119,153,96 +HU,304,332,268,281,302,285,297,277,251,262,236,291,282,321,289,333,272,273,246,248,235,288,321,309,298,244,272,268,293,283,276,264,283,244,255,281,267,287,271,311,272,282,265,274,313,294,305,274,243,238,299,262,288,240,271,310,291,272,267,232,269,243,300,267,289,265,229,230,243,275,267,259,250,198,235,251,255,234,211,228,259,331,211 +IE,219,217,194,196,175,204,220,191,176,210,216,215,191,208,213,211,190,204,184,205,195,234,250,208,232,210,216,213,222,214,190,226,204,213,198,211,210,205,239,212,209,204,201,211,229,241,217,200,192,192,209,209,213,201,195,219,180,186,191,204,206,189,192,193,187,188,173,202,204,241,192,205,210,186,205,199,184,200,191,194,189,258,166 +IT,806,757,663,758,734,704,693,675,666,652,652,683,660,700,615,778,640,653,616,631,587,715,726,674,710,665,655,656,682,672,678,639,695,659,655,670,604,681,675,729,684,645,657,695,688,671,676,630,616,610,712,626,650,555,623,639,578,618,619,563,585,546,629,612,675,597,548,568,590,637,562,602,587,481,541,528,570,529,540,520,577,746,495 +LT,439,424,323,336,358,357,382,323,303,336,344,371,342,368,368,405,318,353,329,346,303,355,386,362,371,354,322,365,401,372,330,345,339,312,289,388,348,370,370,382,342,326,301,326,393,352,397,335,275,272,313,311,336,329,325,377,335,325,306,274,323,313,328,324,325,316,299,277,316,366,305,332,314,298,273,308,296,302,267,252,327,431,273 +LU,10,10,8,9,9,9,9,8,8,9,8,9,8,9,10,10,8,9,8,8,7,10,10,9,9,8,8,9,9,9,9,9,9,8,9,9,8,9,10,10,9,9,9,9,10,10,10,8,8,8,9,8,9,8,8,10,8,8,8,7,8,7,8,8,8,8,7,8,8,9,7,8,9,6,7,8,8,7,7,7,8,10,7 +LV,489,461,348,366,392,389,413,352,325,369,383,402,372,394,407,439,351,391,361,384,326,386,420,385,402,401,360,401,434,407,361,372,371,337,316,424,385,411,392,411,372,357,330,354,425,379,428,362,300,301,334,337,366,366,348,402,365,358,334,302,353,346,357,352,354,337,324,300,346,402,330,365,342,329,297,337,330,328,297,272,351,473,308 +MK,83,80,72,80,83,75,69,73,72,67,64,66,74,79,66,79,67,66,71,63,64,72,73,70,77,66,69,66,67,67,70,67,75,69,70,69,62,72,66,73,71,69,70,67,69,67,70,71,65,63,76,68,69,59,69,70,71,68,64,65,65,62,69,64,70,68,60,59,61,59,67,66,56,53,61,58,62,54,55,57,63,85,54 +ME,52,48,43,48,49,45,43,46,46,41,40,43,45,48,41,50,42,42,42,39,39,44,45,45,48,43,43,42,42,43,43,41,44,43,42,45,39,45,42,46,45,41,43,42,42,41,44,44,42,39,47,43,43,38,43,44,42,43,39,39,40,37,42,41,45,43,38,38,38,38,39,40,36,32,37,36,39,34,35,35,39,53,34 +NL,132,136,106,120,109,119,129,107,106,119,113,126,110,121,130,137,108,118,107,107,104,132,141,121,120,114,105,119,124,122,111,120,116,104,111,116,107,120,133,122,118,112,110,118,134,127,130,99,99,94,119,106,113,102,109,138,111,102,97,93,106,98,109,103,100,100,87,102,103,131,93,108,117,82,95,104,96,98,93,85,105,128,87 +NO,3658,3823,3379,3441,3537,3479,3726,3442,3268,3480,3339,3541,3020,3307,3639,3731,3315,3547,3122,3353,3400,3807,3641,3496,3783,3951,3259,3533,3425,3431,3339,3128,3312,3028,3094,3405,3369,3436,3457,3380,3502,3181,3141,3117,3541,3359,3449,3200,2890,2848,3057,3032,3196,3207,3170,3301,3060,3203,3062,2878,3166,3028,2983,2943,2874,2828,2918,2954,3018,3442,2796,3157,2946,2754,2793,2859,2963,2930,2981,2711,3035,4108,3040 +PL,1615,1584,1275,1356,1364,1378,1517,1290,1230,1296,1249,1438,1294,1478,1446,1586,1279,1349,1269,1305,1215,1435,1527,1450,1467,1280,1193,1321,1504,1431,1293,1347,1314,1200,1184,1419,1270,1393,1431,1491,1336,1265,1178,1304,1494,1377,1507,1255,1077,1078,1312,1252,1310,1197,1302,1512,1326,1249,1171,1052,1268,1192,1296,1230,1262,1244,1117,1092,1220,1426,1177,1274,1251,1068,1063,1163,1158,1120,1019,1017,1252,1599,1033 +PT,114,114,102,113,86,110,95,79,94,96,107,103,99,109,80,131,106,93,93,97,78,106,104,115,103,98,103,97,112,110,115,116,106,109,109,109,99,139,107,102,89,90,95,104,99,108,85,89,75,87,104,96,104,85,66,88,62,82,96,90,90,78,87,97,103,90,91,91,84,96,80,101,100,81,79,87,79,96,81,72,78,98,75 +RO,931,939,801,813,874,851,839,815,799,780,736,811,883,948,822,968,810,789,826,731,744,826,890,867,873,706,791,776,858,792,787,798,838,782,752,855,772,836,780,880,801,811,763,807,917,815,882,827,710,700,846,801,848,701,806,869,842,805,740,710,760,721,844,754,799,780,684,695,695,757,793,781,709,662,681,721,720,690,624,648,740,973,611 +RS,292,305,246,274,285,276,265,263,261,239,222,252,273,300,249,313,248,249,254,231,226,270,283,271,274,233,253,243,267,254,251,246,272,242,243,265,233,265,241,273,251,255,244,253,275,256,268,254,231,220,275,243,263,223,250,270,259,250,237,217,234,217,262,238,265,247,217,214,222,229,251,244,214,192,222,223,232,211,197,212,234,306,192 +SK,221,219,199,207,209,201,212,202,189,189,178,210,195,217,208,228,192,193,185,185,178,208,213,211,219,179,187,192,203,200,192,188,195,178,178,197,188,204,193,215,191,190,180,193,212,198,207,187,172,173,202,186,191,171,186,204,193,184,177,162,184,174,190,184,191,182,163,161,170,188,175,179,174,142,161,170,176,157,152,158,180,231,150 +SI,81,77,66,74,72,71,72,69,61,65,61,72,67,73,69,78,65,66,62,61,59,75,75,73,73,62,65,67,71,69,67,68,71,62,61,69,62,71,68,74,67,65,65,69,73,71,71,63,59,58,70,61,63,55,61,71,63,62,62,54,59,55,65,63,67,61,54,57,57,65,58,59,59,46,55,57,58,53,52,53,61,77,51 +SE,4509,4537,3713,3939,4134,4059,4374,3918,3633,4015,3891,4219,3560,3919,4426,4488,3950,4223,3662,3988,3814,4451,4260,4021,4358,4613,3929,4280,4255,4254,4043,3806,3975,3634,3625,4238,4132,4314,4246,4287,4301,3913,3840,3819,4588,4139,4376,3931,3476,3446,3785,3695,3893,3991,3916,4073,3757,3950,3781,3446,3898,3778,3755,3769,3632,3561,3606,3590,3806,4397,3474,3935,3675,3452,3421,3635,3693,3705,3689,3247,3807,5084,3769 diff --git a/data/era5-annual-runoff-per-country.csv b/data/era5-annual-runoff-per-country.csv new file mode 100644 index 00000000..862ff743 --- /dev/null +++ b/data/era5-annual-runoff-per-country.csv @@ -0,0 +1,34 @@ +name,1941,1942,1943,1944,1945,1946,1947,1948,1949,1950,1951,1952,1953,1954,1955,1956,1957,1958,1959,1960,1961,1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022,2023 +AL,18102.536984526414,9362.260840506311,6061.862820153789,14283.904408113205,9819.184491956023,10228.254519456263,13788.284450369407,9581.270108803017,7938.384836547995,9040.045836743586,9921.845743062182,13900.722955927391,9710.931748556879,10574.663320350839,19341.246363906943,17818.135264440367,11442.713887022777,18086.801216289034,15090.128262650946,21363.26299103702,9091.698545327283,15645.252953546536,26744.08978638467,15193.032691775385,15984.127347228396,21350.165383112253,12855.252167834218,13420.929958145927,18491.92173822311,23127.97840031047,15647.322475185589,18621.138651647332,15899.845988546795,17394.46706381624,9941.517004312835,15056.517227865626,13546.145796746823,19078.487245825123,21734.889469128113,18681.018783118114,17330.7404590047,11854.58505075189,8621.019687739607,10958.633158319248,11928.114867207425,14053.437518158384,11478.547049310142,9794.162917024925,9309.704198107493,7531.383319836307,13417.032891488734,9396.693750356952,9409.269914411027,9602.245346872442,13721.093186096236,18546.848247572612,10075.274288997707,13160.130634381218,14979.3178626011,11173.442130939595,10656.167120597063,11333.102860367679,12218.649211273396,16968.134523176956,13531.444805168734,12351.485474236735,7777.709342414358,9395.578415692386,13162.74078695902,21279.522933687273,7131.262505063414,9979.8596278608,14852.708775826433,10550.911120633964,9976.081734060937,14930.01135394502,8322.107101243482,14871.511785547276,8735.240828081134,9201.521447196284,15965.927842545672,17077.541703886527,14104.446634983404 +AT,86531.51734236983,70042.82795415918,55673.922583448926,73201.56141483008,77917.51024365649,61393.73261184873,55048.20583071285,91666.41522182143,63572.87951011166,70463.32537160354,82083.36248472283,77811.80736180999,72623.28291775129,92135.9214601233,96990.37023646115,85097.60090834703,90540.2512560922,87926.80279582532,87840.33332785149,95771.51050456028,80100.0597367668,89961.83970292915,94250.95465501286,83836.27750897587,127502.64206362939,114305.16098508416,109841.9915507258,94950.56912780282,78607.44609072198,109558.29937241938,70075.51296406337,91798.91825728992,87743.39136454638,99172.3185471974,112079.92282492586,77403.35720896415,141634.05110901658,127491.44958971704,103513.31397365611,94072.4808516721,90719.06004331182,74178.70850428617,71954.43763473642,75430.93523721115,83080.51606488624,68680.54577957527,88997.37641681913,78365.83036558417,88108.35920540475,70055.7892534604,83590.790849829,80943.05621285089,88054.73669568839,64323.72991620271,76516.31784700113,88537.52869871816,74980.7275243927,76703.23963260197,90699.65433828124,82618.46892777775,70279.06284438398,76864.90473420777,38883.771847486016,64333.37459685728,71147.24783545699,61897.64030624322,56722.92181769331,64364.47029441406,80300.64217575881,73731.45518976511,60840.309523971024,77575.9681355992,74925.98195655535,87076.27014563217,55659.14157544586,71826.27744178762,65790.17399828041,62758.61938425757,63451.07652587882,63443.71962749899,60531.78695214822,73567.34222821795,63977.67627260971 +BE,1961.4766789766115,1152.2739476527088,818.2925521347668,792.0469723263222,1593.7272156934505,1164.481755817633,873.079191781097,1304.591358175381,1034.616549532147,1222.8331458954544,2265.3933295931465,2299.7187477495568,1601.6287456097295,903.9162052526016,1681.6717933948885,1553.0826978024484,2040.8513201770277,2596.453742857211,1572.1256345899858,1777.5813929080084,2626.96550179367,2109.9419394561014,1095.552430877252,1153.3090754765356,2926.2047527605378,3273.3444811813465,2455.1462895729287,2709.4254348359373,2044.223080485547,3206.6821776003253,1403.4368531879622,1739.01009962226,1509.0861879549796,2489.4865224203572,2440.1073430603583,1152.6261514941527,2218.937185944037,2759.156597708309,2657.6330859359005,2904.764428136616,3657.83503348807,2493.2486286738635,2917.270148862828,2660.3113890881846,1897.0141168954808,2027.0116832611525,2822.5902787959144,2964.8457470488684,1943.761175974426,1192.1713008070133,1419.0119225208746,1475.2557281855488,1769.9742186132778,2647.8931490311224,2842.9882619071104,880.5015070232031,1197.5019163599245,2093.4616667083587,2614.387699318041,2455.605567526133,2964.8707630509653,2512.265466519676,1585.2842393930487,929.2255532772314,1281.2863306004592,1121.936801130382,2183.018089275124,2209.175694512598,1603.8823576630593,1544.7801250586733,1495.8450265096008,1698.4561860883377,1826.3476500921734,1803.4120415623293,1828.4613155494794,2424.4009518987536,1091.1596112517386,1814.2183897620703,1171.4609814371709,2056.8674333110275,2031.8962335362714,2907.836627473,2067.170862372187 +BG,17071.604805088016,14499.026058918833,7232.927769745818,11963.806716811183,6134.985167502051,6976.20752617876,9144.48113078865,10476.930594688338,7275.174931794049,5983.600586215025,7806.165358407907,7617.879584469116,9313.915986883927,12665.103843530671,18165.703005314856,20925.111917073817,12315.95137614649,14307.215196342033,11652.743586791588,16042.778215050708,15000.011504505326,15411.367697482856,23959.284980030086,13749.537818599603,14477.782616320155,19301.913180173906,18779.64649297375,14471.754083245778,21905.976678210158,20337.934359162857,20009.368056844236,22642.8256575709,23291.544303799303,14596.266505770593,16672.574320967968,18465.93977853061,15123.0740003407,16401.953223276385,17509.31598177905,19228.19892146255,14599.539868168713,16192.551398587811,14129.882713144674,15395.87601647586,11853.583066311567,12273.885630367266,13296.879487675578,12962.17913948413,10178.141531948864,10101.748861881337,17833.95419276747,11290.360773903032,7050.797194092144,7109.831367133397,12879.727662239184,15849.231621819281,14036.330428384765,14600.701492135015,14377.532715491632,7705.7385359122845,8414.14109970378,12768.14503989922,11390.56673556146,10492.153643864705,15665.526881990423,13518.231074072748,9896.670696858979,8514.724851169687,10495.74229946015,14331.982881842356,7228.669024198985,6779.132502990354,8418.829006301234,16447.848686603495,15740.35987338571,11179.883037182273,8789.581276892988,11591.504961624405,6993.71096793164,8046.946588923506,10797.966046345515,10462.980330774792,8581.638814176174 +BA,31614.29255116213,25762.18251896169,12898.683737355397,30643.34571996033,20289.25506629573,16692.532556519098,20416.06449106326,23823.10383354778,16264.762900161431,21522.105898137404,23467.52324346034,27732.028396263388,21355.39127202247,21518.57483169428,38198.02048713121,26864.382517947488,20815.045478598513,30820.77172304343,26041.454718573805,32434.848569531026,20613.68138756547,29427.6067241259,33216.04695823398,35126.00882914745,35434.02869793576,33198.240125653894,27292.13451964922,28870.288475787063,34855.527471322755,40158.11721181478,26000.45986387781,30818.815452611776,25386.059210704487,29741.547122355205,28211.934365516674,30030.474291377286,28262.88980319508,36551.088477619196,29006.131913946814,27659.528119372964,23600.138376555304,18313.888493525537,16009.416242480263,24722.92263318226,19189.890721764787,24683.705818314953,20989.732351804592,20569.866983400992,18470.90664070533,14851.755901997987,25891.954823722892,19819.186826010166,18921.820235243023,20522.576037546467,27525.961934821684,30713.034955646464,21412.918614981576,21344.870107122344,26543.94285666588,19942.69654945001,23180.52553450498,25225.421682786062,17217.841128593398,25342.758536033023,22647.27040540609,20311.586220306715,13360.715771642963,18725.9052067595,21814.111731615874,31110.03238839381,11170.209355889527,11624.972537489217,20618.808518471225,26321.87815471064,18023.46871599901,20293.60245322545,15351.515268991054,23732.8659965382,19641.494693119246,14018.69083089969,18476.061736021245,20106.92588679312,25396.55419340511 +CH,71147.26157637448,54904.32201155619,50017.90208838156,60290.759721214985,73027.45831149221,73481.642346746,55618.537744757035,90032.09246974687,51962.38939916655,67177.84116696635,96134.70662377033,78280.50065735803,75567.82823680741,75624.52378636891,84437.3353706146,82858.22758683441,70824.92066589095,89311.09471805945,65693.73271567532,95736.9646979805,81823.38979168108,71941.93730231738,93291.72439376163,67130.87226350856,103459.16116112942,100678.84780651607,97724.23622167954,103707.47043615916,84031.35407618036,103162.60292359442,67550.72288370335,80436.85045155323,87045.62560332651,91431.67032188186,103676.61542793321,68303.04781353423,112322.8911282786,90003.61666202937,91028.65800738563,79435.56403729375,84362.27602442491,71967.17653868764,71356.17066607098,70327.74412176231,72852.24726422041,60501.17532436925,83372.98013161327,74234.0349385613,61890.14596593614,54336.52856303606,58271.018596959824,62644.46734396249,75790.31562737533,69637.43067630196,63290.99710001813,60360.96649160057,57476.7610131337,62343.013781750116,78693.52307555292,73667.98513148604,75122.7031133837,70327.33051388402,33043.59766466136,48968.55545142022,47632.62274536815,48588.59714178414,58398.342285916435,57742.38347363473,42559.51792407362,59408.23894223828,57129.80243604273,71163.63652517287,70858.4268509181,119263.66699527185,55000.17388464577,60563.73895679769,51249.64855691976,45607.547672961526,60542.786248766475,50488.945849740405,62587.04259357905,67091.5970044437,65549.52567888667 +CZ,21212.78579882691,10928.058379661825,3384.888399390222,4480.397678992209,6399.719467990146,6768.6515946008185,5799.446185009577,9411.786777037812,5860.23463242501,5599.396992120427,5741.811363153907,6430.8701452824125,6608.002211449021,5547.98412657507,11139.03676570244,9975.869136026628,10340.80877548634,13532.930674695574,7705.735677184478,7812.801872929193,10964.355351867012,10646.504003233118,9719.560436989883,9722.51782291833,19015.37334991238,15549.667939948402,15927.255735311868,12882.876953226223,10353.403919517015,11853.576269226234,10677.61771918624,10413.95103805089,6895.283030051313,9424.903567027312,12922.392855988994,9137.874616036986,17532.348544152504,18533.830228515635,15250.542039808313,10190.043701741004,11537.335951365127,9452.574932230946,5833.228549596612,5329.206957903766,7580.6976645678615,7069.4638232187235,11641.01844987317,9790.145937530902,7320.767578365769,5406.873869997462,5625.637231646826,7239.835546851702,6440.471526964078,10089.617439184036,12468.55796361433,12603.058166229213,11520.381274473184,9364.18293733304,10411.740148030329,8903.525346614548,9434.353552398077,14463.707947162726,8297.335864348554,6305.0428212307515,9383.102792968806,9420.905095490103,7053.530866655102,7934.504156979587,9131.766698685136,11027.709419784474,7973.011990884055,6406.065926850977,10728.184944007047,6645.433895571247,6345.82145823084,6499.675988954033,7016.463805946017,5619.135413529,4807.786670051716,5785.96568493957,7986.497102061061,9207.147123091028,7072.340654546769 +DE,60501.03103951161,38175.41955606091,20879.426305645527,28284.75340336476,36103.32351052005,34891.089702429555,28788.726755549767,47593.248184570766,29278.51011479424,29170.60074491641,37514.09293381361,41854.44001450301,38958.042582957896,35908.26729908446,54302.825387264325,53900.0627595872,49537.22949846474,58655.764340031456,37627.8257805545,42967.11131060519,58392.899587658314,47246.42956098116,36543.949785170116,36184.44504543948,76454.75505815547,77563.28089874369,65706.46393994696,62713.75591115003,50646.052126013274,74501.93068438585,33965.80961685558,36936.195615288314,42537.597915180464,56616.68921207296,56744.68927682596,34807.98202654087,56642.02192030585,72249.78534694383,61866.53633607989,55155.47864420242,60392.13262306467,54016.67652252674,53772.61604029787,44957.681101902934,45340.54136937088,48213.458592237475,66703.44582319073,60862.17019199549,41014.24992391146,32890.656129618095,33561.7114818598,35496.398391557115,43529.12222860253,55750.09930602895,59293.24549493131,37708.06634825865,37073.71374151417,42227.76427477898,55642.269571505705,50261.32889407123,50099.1653624443,63663.80803911536,33932.84222448403,24148.0930626651,39327.588161226195,34424.75266875371,37774.7468462978,39904.36386097259,38761.37664135111,43371.39725264248,35879.97041420226,37163.99533546333,50391.241894760955,37073.97568394232,36139.75677566728,42435.96482981362,34097.15586833773,36767.50341472353,28228.762854078665,31841.550922412007,39703.58319343355,47521.52513933251,37833.900158720804 +DK,200.778317198711,183.58783222570864,184.92589840547043,103.64850861603549,315.2220887719417,314.1000544312628,214.28103955721934,161.43191114338694,185.2111475627714,358.69279426367336,540.6494487174206,225.87222027671936,240.57818634617777,370.37587945377,297.48979685029366,176.69208006156848,208.79213770008658,418.4554011794129,292.8102234525168,354.1220188699609,405.97396851309696,497.6042583966885,394.062076725445,291.3295691524654,304.5438623304028,591.4756757289098,629.1912050409122,479.19182164984767,353.75286712188796,454.0799499391422,338.48620130918835,357.676218196066,271.5200039604648,288.04880490856954,389.1757124784286,200.03398963977796,391.555913715114,397.29527898061724,379.8043799580585,514.7861201159348,505.7304478209306,381.57816134966373,489.3522161731293,301.717309458533,340.1024114463131,377.00536197863613,349.05617800662674,489.4227950883326,223.23589869898433,269.17110101837187,288.12441629799764,229.5796362727797,293.635637901186,464.89056739838395,386.583686695923,100.62743648672041,155.50324274468716,357.89273417380645,451.04208007822723,403.3432275318107,376.8995908622044,395.3896113505339,169.30459202493293,211.16687292699712,238.87912856891634,225.9981737610454,469.5229598902774,334.5021223766927,213.88993390188736,267.6283833775911,319.04746406617295,376.09256801086406,317.617725512616,353.26516627601364,404.2285931109035,408.5235836651241,351.86517079981496,305.15203377590285,361.8170659996248,441.45504335438466,312.14518786073575,466.99320427604425,510.02659659134173 +ES,78876.635611407,38950.09623429436,33841.47292664868,23156.438140457663,24925.319205598968,36553.92702862067,59420.41145835762,44448.20001757085,23003.718514410986,25989.029770929596,49381.7181466273,42506.42487287232,29823.22966835847,33520.89863652133,42189.87541544434,45018.59980204108,27387.4917761516,45602.58407624375,62626.425244916936,94141.56249288519,66884.65469167147,67115.06386526441,82553.38984370838,62483.82642486853,54633.78478265801,97009.71651609142,47073.863176177474,55800.125131342014,85746.55555964066,72028.42027117872,75395.28981871007,97165.39237066667,52196.13272216457,55183.75674414718,44434.367388636274,45688.29479796517,107175.1885102772,376352.4882722023,122222.92956502433,46652.32256081546,42283.296882354574,45745.627884904505,49745.69531403085,62249.891346371536,58920.93920695715,48705.91089393512,49506.55303300167,60104.870297070986,41417.79510072781,40561.22111509541,48584.70074175896,41980.59062088254,40901.640882458836,43899.80095828514,42846.07384845135,68844.74333042726,58684.98840324564,44556.941393617584,38670.78308215774,46815.51322723987,68212.19494117319,35011.91030341564,50475.76713743041,37227.78564631605,21874.579431867896,35771.95151620915,29462.81813629621,28957.890173503936,32351.242924601538,48787.03990347552,31409.59113067243,24367.630996980257,54440.48961905899,50212.19464108009,33563.81874118821,47043.00929278407,21662.04001863739,49011.344970588914,40719.843457258816,40971.905822060144,45241.676643808205,43434.78481736922,37030.44475893774 +EE,539.9157561831089,506.40067622283726,641.2579701177458,634.8633672657788,801.3715228148297,966.9758840524717,584.81736437269,583.7023862826877,945.7850249371241,794.7615397417561,1013.5168183220601,680.8524541483096,896.074617741102,657.0814978199126,1138.8891299468455,1033.2360728791764,1360.240006093535,1004.6950457068739,934.8774758153143,610.9977141696028,1048.0576885162222,1477.5046721309996,819.8347754595931,595.9919701101757,664.1717649435486,1178.672290642306,1112.7122616720162,1003.3658496852601,739.8499843054013,970.7396668771637,1156.5986195943308,988.0643633892423,963.0153554345143,973.0118510683724,980.7669857762866,637.7376743732589,1121.0664931638019,1485.3090062847518,1014.4287893275732,701.3843644468006,1196.8127434491068,1136.1215677919786,956.0911286121622,761.6620263988196,1109.6559596693944,945.8137139982223,1246.4435124663337,1081.0063689141978,1180.9951778617858,1433.4526339800395,1044.4962572791317,875.9289554452553,699.3911738148247,770.5276589427534,923.6083617038047,353.16555295606884,572.4961222214871,770.5595680360855,811.3520528619258,480.6204379373417,526.2783934002991,624.0252970776874,317.7449195254128,502.6073570248072,523.2149143909173,201.10844695565802,516.5064948459781,787.1638506300568,929.6905631193786,943.6792182991617,682.831069893961,933.1072250631543,692.097322772721,449.72254117947216,392.4173074585239,635.9676582877272,753.2897252561945,692.2577760060705,496.359994444626,685.8431112730673,512.1207984775516,963.0464221974397,483.833486345915 +FI,19863.633124385666,19889.46363566243,43612.48513693398,40476.15110774358,37963.60387525601,30946.988617446677,26642.94999177761,35266.32524904611,44524.796955378566,33134.502072929165,37605.30609105331,44212.92045299994,43354.55443946037,40909.90249435943,45502.4817028443,31224.476747778986,41198.39417754888,37433.6293362709,28205.152814527217,22733.614552593714,36790.55908786393,45364.155643266684,28818.806516416316,35920.468538637644,43898.27049057533,41064.81684006838,48740.383821508905,37939.33952866352,30918.72594548001,32676.177851908644,33754.05667642144,33605.2810320407,40213.26757169686,46999.65997165162,40988.91471061698,26205.15827640759,41167.74001571865,25866.031354509407,34538.39918373158,28474.82496101674,52752.27216474974,31970.598763267328,35752.18054352296,34628.515223969094,27958.317625583102,31764.196982878522,30015.16030263079,26346.04561795855,37548.80632649389,27745.021972655202,27337.35846234453,42346.93155636462,32896.698992597754,22520.879762748657,23208.41547315833,19884.31515309925,22475.195958848508,33947.011695157635,26939.65172877202,32198.274008146243,28319.0878193112,25068.601753935876,16735.56947336933,29510.104024439464,31518.153863523567,22455.334677451585,28461.18834411878,38706.997170480194,29619.48059832548,29700.763598820195,30127.95453034573,45678.82538666528,31580.034602099753,29256.819253905658,43076.06538538077,49489.04536337738,37281.852766328535,33818.560580958125,35465.53242057822,41410.545080415,41567.72109759498,59630.270126901065,45592.22770628446 +FR,104970.07553291276,54987.520121202884,58127.609672438644,63952.96688103039,76761.15501948349,76267.11605975665,80986.06867218221,93733.93578796726,58479.255039973585,73543.6737690567,135094.49763612094,106772.95460342853,80280.31347296045,74490.3783289433,106687.24245156738,78827.6552216503,82482.11027901788,110101.24506868717,102231.57173723538,134019.59611286796,107848.78158188045,101612.59295931137,120079.53146371644,89568.66448178052,139585.30725052828,153772.55334552698,108799.90779570332,132530.25240249874,145056.24077647954,157344.40874499045,121182.68180087653,135378.15307040678,100408.75538533325,132164.7891142495,122382.6612084835,103706.50937621434,174240.52624537304,149853.10658224527,139992.58441391244,124892.14391178945,138893.07653615897,136102.17909683278,136298.85434619425,112484.75227834118,103842.75366698352,105453.40277549413,121935.76619549586,138379.45234918175,71924.60006455882,68830.14474115185,82603.77634528138,113556.04015089742,104293.46882473862,133822.28230897718,106263.7222156586,102481.34621013676,87080.48957235571,78895.08319488679,105735.9820525235,105626.7734987112,124313.3006359069,92144.11932119,73537.29646152578,78137.60525599915,58286.814348802,68928.05802270533,75743.7796112372,83489.37862547796,61518.93775410048,71311.23184506799,58810.65089438103,62581.66694545332,108566.1756738168,106810.3035898108,64981.51870529863,80829.27869961815,55203.39988738071,99034.47679069567,69318.43286028007,80169.78942387344,91015.86505778233,86376.11895764538,79293.98493428944 +GB,22702.364220315787,23680.52284249295,26272.032009932474,26333.09456007779,26845.290009259897,23656.475751452526,24568.79566354481,30136.97835561064,27079.44100895935,30643.318549445074,30949.55733300973,26701.9428167181,24834.423901258084,36292.29933386541,21223.79048684261,23695.358277919488,25987.57619398849,29907.138025215598,20532.262685247148,29689.355990879158,29755.7349738537,26456.609914217806,26827.642866860373,23873.145060634775,28779.074240844395,32368.78110647752,35977.394307138864,31071.326823875082,26446.212227329706,32888.07880400224,26015.735528737063,30161.596277105604,23017.874991984525,33287.290870143,28286.810808036254,25581.658649545065,33234.01349421005,31886.616851943854,36147.13303532815,34133.94123938026,36553.158784009356,34512.41082508442,31536.090754720717,28572.314934249887,33275.60304522956,33665.001595891976,29575.879486696827,34513.91395990196,28917.71257427291,33609.176553253485,26240.381135352633,32589.192149993025,28862.78621634025,32807.4074928576,28488.125666145854,20034.93309228305,23116.389736102017,36396.90134863421,35532.192401610075,39288.48780281492,29384.81519197965,36157.931892943554,21622.405196644995,32040.743170655453,30471.91526631006,28610.855281883465,33870.42864117695,37281.10394972126,33227.13302814586,22275.90424618414,30588.672790028984,34121.358436958675,27706.739428180073,37230.134388487466,33699.78058075289,31403.25713954576,26791.643973463473,25928.83232662774,30294.933954035565,36884.703709615125,29006.326905891798,39928.21741502616,32593.01729082114 +GR,21241.640655847503,17837.993739159116,8331.553303186349,18980.73391736116,15389.589824927982,17680.338927422024,17876.06808235439,10232.387409542045,7698.396804685536,9934.422230608423,10056.853784361187,12820.948661755581,12880.09713386794,18009.367077161973,21378.182658177288,29473.438101212807,11770.791493072751,17561.986308683798,14068.336209442465,21036.22984593777,10600.651816726047,19253.94027167335,33903.71393281602,18899.558983910043,17444.093386496537,23383.013428670623,15695.74724949063,19981.88173635776,22837.72155515553,20477.278624856677,21882.21250746782,25630.65513791745,23207.036995448034,22044.998521881807,12010.599831751206,19432.667576547075,10483.216379681078,17709.119022396855,23856.615736538963,24868.152661457774,21212.948012018598,19349.21918097175,12988.230031417628,16765.76278511076,15711.921661553248,14768.388537620414,15953.652684601051,12434.694106124754,8265.22849298033,8609.897588471382,14667.4570613014,8511.641075667938,10022.119539267658,13526.966234241496,13788.166042309897,20514.65575034389,13890.655920759986,13637.35506363307,17713.539092926785,10603.937587040999,9085.188935046803,13431.534902143163,18237.093734480102,15007.784707319466,15322.74478671021,15189.508916238075,9627.363919501071,7964.610286111404,17730.814440852053,21625.83796282357,10692.331430067703,11630.957871879938,16037.311332863352,13770.795305947142,17211.823293162783,14042.921372422215,9179.230226001768,15781.789228215457,11036.807907632012,8148.5700551254695,17837.056638784255,15741.120020191216,14783.113316616393 +HR,10616.868981781858,8773.16604903597,5244.73498702366,9536.576803956808,6559.078522391053,5970.043528122038,9472.615822894124,8274.401876717038,4545.145600490753,7872.286618923516,11579.119396857217,11616.785819262946,7841.411279590863,7071.274416413621,12992.083400898755,8164.444623202054,7347.788717816701,10332.483465428491,10537.238741811467,14479.677080073134,8479.661662510043,11787.072656273116,12862.758623825672,13371.003032039336,15990.668161905827,14470.61427957732,10644.752476691014,11929.91211438134,13611.162472968235,14159.968181810045,8748.9998436583,15752.17883640428,8576.387915317433,13121.48137750286,9754.915357083264,10574.934113880938,12339.192872828058,13526.82683401755,12407.125261525374,13314.467203516846,10589.407510915647,8240.264291214513,7438.667886688274,10422.72691638502,8167.94266416981,9179.047407118169,8517.079324039581,8574.526688740114,7000.526114186264,6044.501432950472,8424.638032244015,7640.079353180473,8493.38568955205,7673.466816227631,10113.96973112034,10475.583854629525,8161.447547783003,7730.598307694147,9688.392563588315,7721.732973546704,8547.91442119075,9110.630144082634,5654.409914846469,10449.908940883637,8544.776426500835,7703.363550389989,5356.181480736791,7888.76450496884,7981.496673656482,12159.302077747214,4232.3935344941665,4890.509308451722,11184.320641158047,12936.9950624162,6984.475921997467,9245.055929521772,6446.204647314787,9728.03403894923,7468.897556955247,6117.536838412799,7342.634549696792,8157.817278196762,11474.85293662869 +HU,1086.1958142470096,978.3249526234807,212.97414604675294,340.3983517473703,430.1881201465951,194.65656630284064,473.19850543225607,261.9192709197555,196.10367582160737,215.18183697898402,455.56009955160323,446.6314758325084,756.8984035642842,450.9800559529234,596.2398492658431,618.0772176598714,433.5306920564666,626.6699838105968,736.6623236995524,901.6628598550868,955.0588428000881,1010.9577650474141,1739.1969813800204,989.7527773708749,1794.409065735425,2141.779921355298,1587.224990709883,671.8419282109866,1725.8355118081338,2716.2211930211206,1024.7764676942138,1478.4297579371664,1174.856199808959,1380.70450266654,1412.5023358393119,740.4722294240396,2077.892875467242,1382.2122922975911,1644.3319455706533,1323.8387691522125,1268.4319727220777,1050.1561979992803,938.7640185853782,701.18673897032,1176.2218406698482,1296.5341770126697,1360.4975641937087,1113.61620590335,686.0888933448485,340.9577501900905,585.7771480398817,710.4671894364953,683.5190908209308,901.0768559915952,882.6855336512624,1696.8667744778322,1072.391017559335,1091.9628754375356,1986.255314199332,1224.1891103332646,723.3105004079888,525.4091579835733,484.84230756145246,965.526984284363,1182.1048527111184,1188.8833398485922,413.588170351592,480.2721461089437,695.3047160171476,1875.1966820116616,1424.9690036241302,335.4105482008909,1504.9089491398734,1485.372787096725,1238.0161031995012,1379.3214547239281,769.0933902263423,1164.4795232335282,401.1056623458921,586.967649321741,686.5594518266915,503.19975913371877,1084.6060135452235 +IE,2938.9562959168748,3198.9169680516616,3187.5912945813584,2803.8552573374072,3777.1449651405105,4362.104175198847,3412.9484028266015,4033.7677356343165,3724.1620931207913,4522.92029724601,4646.742323576806,3498.3840852321937,2544.6578209214726,5419.077105100732,3195.826410242899,2135.0862825425293,3896.6031468108818,5166.0575828547335,3059.5430902455864,4420.523688067632,3920.565970045664,2677.658691610785,3238.0778129090736,2814.899041865502,4169.640537659651,4975.659864222032,4131.237385306254,4051.6063579500055,3501.9810380506974,4162.222738923252,3277.97066925379,4149.907266073347,3535.126191101705,5127.397347805918,3672.8392481603883,3233.4968306810615,4670.955668853978,4602.662496185422,5486.794576286672,5974.860239294209,4958.279351735843,5539.220771177308,4814.939874910363,4704.080605250908,5214.372862088697,5579.052373684467,4424.669817520398,5645.170219782959,4099.107666436778,4291.534915722901,4394.7348113611615,4474.842169491055,4574.431496810677,5809.6920444413845,4586.044007664658,4500.049573188483,4158.002042556449,5864.151573833777,5389.317421868625,5602.682945402753,3910.7950042057882,5664.313605042361,3809.963053559503,4119.424688220842,4183.3641097691625,4756.243128847813,4885.19842669336,5839.576896771334,6529.030637616166,3851.468550158534,4739.247453475783,5116.873230654843,4346.868803644192,5780.6460718128965,5283.18966576195,5253.677788447396,4182.318532051922,4579.528423701424,5332.55258145869,6160.501534866154,5064.7000821427855,7011.136828867076,6289.684272273017 +IT,111872.14392769105,83456.12455924545,65177.42783168641,92174.45776985734,93705.54545517343,102552.6760192628,102231.02170173851,123449.91092719955,75535.87312511557,87009.71778444933,172598.9057049173,100312.33974168803,109683.21050744873,113141.95504995789,104153.60992530867,105687.75931391625,108887.88674499927,120408.08444781008,120463.4718424679,176044.98820314519,110983.47718210719,108661.3888675,154479.99952596257,116866.21274767391,140970.07450678368,148795.1431459142,123467.91624840492,141856.2012294592,135145.74559391223,121025.70232844367,123064.2692081519,158675.8479818625,126408.22188494769,122698.61897984576,134048.90222163082,141351.40587318933,183551.69201916258,152672.0987165877,152463.24070715246,129214.69623803807,126406.52003570941,111266.52387106014,105493.22493839792,124930.0152276459,118811.00097800085,107475.13484215035,108705.61617459086,105553.12911266208,89696.47047566871,79022.57929325203,102862.21537887226,110384.19134436303,114736.91039530469,100806.28542965792,84803.88252804938,120635.19844583895,89284.70639754581,91327.36842742827,112272.49254052607,121214.98394688763,100240.69732658492,110327.24980482439,67667.3134977533,93496.05161894935,74054.55044130566,77775.69469931335,75858.44303177306,92083.61659880447,94760.12813659209,130298.81027721112,113068.08204604911,102862.27823880903,124544.42911673438,173919.9577739064,87247.90399947706,91232.2769509342,68511.40136893332,110593.38607511377,103785.63569256017,87869.12433673623,92697.61009264145,89877.21983741673,96543.33467044581 +LT,2330.311115607803,1278.321859711663,1329.382406669865,1835.9539018834093,1806.4585215317375,2463.9928727279507,1216.0039899784208,1896.293414052239,1974.9185999663864,2624.3584436168967,3126.90226977831,1475.6787373817235,2475.307120672793,1105.461669987713,2895.355169108127,2328.291848466701,2773.100951693534,3565.1300060634676,1726.340017569786,2441.2869150017605,2739.255286641479,3393.627534013301,1948.972938917477,1503.9304931932238,2081.2181025571476,2387.4356286402945,2953.7202011167815,2442.643404228762,1575.9037853337413,3106.077653607497,2438.6998492607095,2321.3335398002305,2455.4955733417105,2703.979150849733,2983.428649152806,1271.81865757996,2291.2165997279003,3624.3054978962264,3066.52482874976,2692.4730181815085,3077.131323187848,2583.193918114293,2346.227785803459,1679.5438811966314,3224.9829735377853,3118.546035682447,2841.2519919875017,2793.689370450914,2688.2457239306805,3241.443663108373,2559.3234295762136,1702.002348908642,2110.46892607847,2728.2093372090403,2507.383271401867,987.8894968242066,1210.6320667482842,2619.0625969612056,2041.7494156189375,1077.1024719580128,1501.0630179433206,2093.0805484121715,761.1714789679556,1438.6161577229564,2052.75406588365,780.7629955514127,1806.053023318598,1637.6952437203324,1294.75789506843,2045.7605537026564,2014.9263059368886,1513.2399919084748,1700.3251444304037,1310.6134461272318,707.068706016363,991.8607300754669,2210.5243412620116,1945.761381971759,656.9514438734058,938.7049363615082,1091.9224609932446,2359.271808740987,1316.3428075862207 +LU,410.92374814105005,160.31307322354917,118.4486719933025,98.1780404815946,219.99495836090017,162.18791666671726,138.32051761008012,209.31021249802274,188.8584966582342,199.41682878475427,418.42920407960173,402.65326898169104,245.66041042705334,140.5213247873591,281.65083355306945,239.8578158452654,369.22101543741394,483.78479198242695,296.9130979040409,276.1232951910135,459.8791399297912,360.88050800564,165.48000564979304,183.08661927563494,549.4327262552889,615.540914852196,430.5724249074097,464.9649192455573,366.43220700786026,562.8555247965114,228.93628708423907,211.2546971696525,190.68337965968735,337.11947715404034,376.86900759463407,153.06162250371963,361.9412034686643,457.17182659872975,474.4467062976349,501.35146460886807,586.4359860050876,446.87720467222556,554.4763793156474,495.34599197145104,293.22338952418016,311.9404169698979,470.26963100793296,539.1966800252237,318.1508533879876,216.80440341571304,213.96493869659722,161.79868975175182,244.05142665329072,449.452274496719,514.0163752203341,140.3179752581265,173.96942733863747,312.73695003201067,389.83220575942556,401.09422881834865,533.6829497064967,363.9324325652479,243.9448809708681,141.83825708815394,174.8785415794147,180.93947242990794,337.92060394534246,342.3576188750888,243.21751195509597,234.05807708972188,220.32797632404777,264.38928306703747,336.6463620917912,349.32621250528973,289.85643977541105,421.5786735040934,195.16848080033785,348.6723038250213,181.01976524286118,391.4073725411393,335.53788814460387,454.30136063817054,354.9932023222274 +LV,2075.0062187312337,1183.1651412244635,1388.2608755110323,1633.9975845005233,2100.728221746779,2307.307714455485,1183.142585833944,1669.1622848941527,2049.0129879285278,2215.5671267410553,2742.6556866506367,1837.8325027812643,2554.170761349389,1408.345733539928,2983.189997195677,2825.061612760662,3252.3974243255384,2886.2924715658505,1956.4363533791625,2047.5738516633846,2396.823663944175,4066.8157610189646,1865.1937720377932,1213.3814003625498,1799.0008428733133,2700.1570058485836,2685.678969351212,2489.884608424664,1643.1417843905015,2409.13500668555,2592.9272177648704,2235.207139152776,2147.227486416904,2582.289866216919,2592.1022950383003,1400.2389662856958,2360.0229699498527,4017.8529783072768,2384.614549536931,2229.7220033855633,3208.439621353169,2678.7332051326716,2668.5315414635693,1705.3124319563372,2816.1467857724074,2779.7212753206704,2548.8356765576805,2583.5815153824487,2861.3233286998366,3801.9993997175525,2885.329135114707,2265.350140707051,1837.2956559244064,2087.5337557102102,2249.8356476827344,853.5386928200795,1689.684828383291,2653.3751344813045,2109.0096432617606,1182.3316650686795,1487.9132295244858,1974.1178903388873,1027.3242775723352,1736.361793878124,1741.9204323497202,701.3946960765046,1526.0106468283361,1844.9956663852768,1722.690767934754,2194.735679264339,1757.0892689497684,1974.9007736328324,1883.6056453393867,1405.6090440523008,1355.4439688992024,1295.7921755717964,2182.9434502570357,1620.95881111662,955.2966396067089,1490.1655775988,1270.5915407733335,2073.003327028273,1565.9994997763404 +MK,6885.807602475421,5074.413420314535,2741.8766807152024,6785.078522769966,3818.8527078184798,3928.4886922987157,5350.383608273902,4779.5936944428995,3468.062322874052,3554.9350756385556,3934.258904454459,3692.0813330320566,3668.815892166788,5481.091007849558,8488.277349663611,8514.814821434962,4861.071723840239,6878.625964986481,6399.572820343104,7510.160197503697,3410.068805763449,5394.1653784487335,11852.098687247973,6238.562291909563,5957.282159254423,7239.168545674971,6704.994620967638,5343.254672711665,7690.122171456297,7543.1707758996035,6080.923842798452,9518.22452251825,7931.36999814296,7898.373553315725,5296.06496225987,7158.993856907015,5568.700579027411,6653.618095140364,7658.60790313609,7384.3372015299765,6359.355811805509,6311.4116310067075,4303.248848133382,4384.047563174607,4091.082807237865,4939.915122052565,4226.085259691547,2768.0039025432975,3428.9467644601264,2294.222990992098,4834.495252793617,3325.6396521134557,2522.690466358069,2976.927612194815,4721.490759581051,6218.919122814915,3840.4729075548557,4211.680380445357,5623.802967061423,3683.311545054556,3010.5387111938044,4030.600677147787,3746.016920616422,4369.907262961726,4375.170573617275,4171.022704527618,2702.082556773276,2500.5709190188645,4370.4076446296785,5204.859735574909,2501.3689966568136,2522.4142074268602,3898.568577966785,4120.496629723428,4145.529030725163,3980.5145191045517,2967.8383253548755,4325.530795978238,2351.320803369623,2869.298937488066,4644.825806471018,4443.426947798344,4862.542134317402 +ME,17887.124634490927,10608.377394539566,7624.148328862119,18300.25523347599,11276.651201832965,10903.50785537606,13234.193237094172,12631.716364532,11223.474429487562,12340.195715253074,11787.036249138713,18552.496612764233,11097.228684668844,11567.930061084586,19293.29710814638,13959.20571478613,13046.18440612437,18177.491235848003,15681.709335959564,19349.950981118494,11658.5954682407,15979.557715971854,21966.784229466284,16319.44299708603,17409.40677448169,19267.118980997075,14169.834731486397,15882.152400499632,19649.60512617265,23317.326436161944,15993.83236649745,16724.197023812518,13680.494674103275,16947.38155713745,13396.216553276576,17144.464963358845,16187.204710073258,20417.876685607593,22530.120155126962,16197.908505488407,14818.545420474018,12719.274460270883,9622.464989039141,13369.875886578782,13078.849292203078,14951.85122042927,13681.691957367619,12435.207850184568,11885.89235643587,9516.042262408026,16926.063160570764,13462.909968102742,10042.999218445517,11916.714437943356,15158.632522247093,19735.53511234594,11491.695345503264,14075.890065382568,13952.132515246369,11677.32833931845,13022.018989752623,13358.031703205903,11534.177624564669,17635.48411958614,14124.491791464301,12392.738752899031,9187.905148236658,11106.432128782126,13780.482580236776,19889.70854314474,7004.765863876614,8204.168680103983,13925.208337027952,12712.182323875111,8528.030231315688,13924.890575378206,9037.809310147335,13972.095665379566,11360.358971424077,10280.43536084397,13775.545442858487,18675.41168703515,16242.925847180693 +NL,99.18845393334344,83.88743307052296,56.20087850777987,37.585642699707755,99.89955066870836,85.24103376999277,67.24342284849837,62.39508342046794,36.26442939184667,68.54685542245758,132.53876435280281,105.25304624029256,97.45390390948565,59.629996943856206,126.17294913858377,100.92785922135535,110.87608266899032,166.47617616014315,115.03831975395074,102.21679880589629,222.71184206736731,164.32395798051633,88.95057195947412,70.61286255591416,116.64203846425596,229.2598570422647,176.71083656110923,163.34614915846544,135.23541074052787,153.03725726999411,84.2996330000842,75.50229060079856,87.79575328623356,149.0911400069578,185.89892961221338,65.86854604814957,122.15247506918442,163.86971747802477,177.38160465294413,173.97671792234624,210.94537197363266,138.15528852664815,157.7989701791954,164.8431327190436,156.27033317337316,144.7881412408737,172.55396177597567,195.57239229094031,113.32208112175368,57.91841778157319,69.80575559844598,81.95247611107445,154.25504072397393,213.16593963287744,185.6861094126344,50.88580789479815,88.6770750590963,180.59511129061775,175.09641999581325,134.73866646771805,175.89672487625361,170.10222446753016,112.93557838603432,69.46018341018433,89.82211025011819,57.645723008783136,122.86182387892966,128.20825436811367,82.72980960226074,90.07937846739043,113.22245968360778,109.86527486394354,113.13774281842522,111.87536788688416,113.53271254663122,140.01537175074324,68.64291161119651,101.90820271032081,52.313311323047174,116.8375292031251,120.98114114085548,164.29761287763637,131.20083352074832 +NO,210079.934684001,272449.26769557595,393598.97632244934,344434.1421584134,309228.23588460323,302048.4910019859,239994.31574717446,322040.9441520717,407801.21594778565,321823.44753004133,268338.23623559956,311516.6976002237,354741.82081862347,287750.3093395867,277336.7794878445,277384.3177020413,343804.7636848019,309909.39898200217,279870.8316011512,240316.6988310159,337135.04130569333,320967.163231472,302346.276731951,373063.16157364036,335043.0981929385,271478.1367425455,378240.8226290755,318135.4534804216,235625.6466770364,244547.82624561962,342177.033729037,330660.24479289196,373161.4848287328,318697.1789732917,345399.49658985186,314419.8189439592,230963.73709983748,236070.00307120252,312676.53469423676,195102.01076090848,231756.2941107393,196457.66930311252,255890.52889680286,207980.18388759278,250218.19793663023,221472.83820330602,212315.28230042476,222774.69138098884,210372.25250462766,210846.3448041017,177438.35447152023,214108.95671057343,190417.02324135366,175558.31404488848,199041.30532793878,154342.13931409398,175902.47007757265,215120.2640019617,216733.8944940455,261370.58822336013,212333.63219332727,174324.40313248086,166539.1770850326,202012.88317391058,238213.58919108333,170401.06338583003,217109.81457913358,185684.48673203168,210764.408869451,180452.94255978477,359959.68381729786,220133.80814962948,201815.91045998363,193256.2376798832,225141.35101977925,194502.14845672756,241700.45583039062,203049.47691663646,189440.21996991744,241270.48318453485,196628.83730256284,369452.758534526,226746.71170530186 +PL,19040.141287526196,13600.003649707409,4232.940272903036,6263.550300160282,11725.980915175824,9423.536450722735,7520.9931031649085,15128.087437533712,12302.274212909391,11903.809643162125,10323.210329129464,10102.880830361863,15243.556644277647,6316.576710839603,13844.440254769646,15125.266765937784,15697.445163802722,20547.692524530372,11483.751822471613,12736.801698643669,14242.313560202334,19975.533453959804,14197.61841088385,13385.258603233342,17555.946503015533,19526.389852009346,24990.22970093532,18554.95543612019,10610.557656768206,19356.459381427685,19657.930603206125,18732.50311338716,16842.606570419066,21220.5523273715,24157.44207211756,15607.0096942104,21677.41127186343,21298.26291533998,22732.35472759213,23096.919661671975,20539.587419825057,15690.593097829358,10514.527177352287,9757.003902443616,18214.127020951153,14743.470132504332,14160.372458977477,15574.81487531224,12340.373515759937,8851.92371521615,10791.411372967183,10330.247763133175,10795.54750377256,14738.227630388099,14374.438386354948,12177.648415014886,14450.3889852054,16473.394758962142,14491.781691174727,11650.64866730713,13724.64695911459,14468.081620316902,7425.533290488975,8039.550610683221,9468.54351850099,6656.34004885665,7842.310345436549,9593.638690519654,8828.256800239144,15874.058827778785,13537.338641985261,6174.396712827303,9509.500411581042,9436.102173346171,5360.954908688417,6309.399306468634,12402.024613286878,10602.512257049677,4725.103685514244,6963.3130148445025,9124.964758601689,11007.49968811077,9338.109838427506 +PT,17389.910347609384,7956.13238827081,8339.728858321132,2075.556471824434,3081.9400195534536,5428.965709165979,13519.093138173732,8266.183051247071,2305.611210847845,4751.414112923152,8372.129135066036,5969.31083523099,2397.6586063764958,3916.9962424030286,9654.699566273128,9603.363148222781,4163.162870337118,9179.465624121007,12131.854156488425,23406.646477036124,10635.077955993413,11029.404714084018,16475.917880350513,12087.55769685171,9804.492049893286,26178.184180984652,5669.820254013018,7559.528320921835,15405.930172416956,11163.387795923936,7088.025310785357,10191.330496756716,7128.069257230898,9335.631563569985,5236.13128329125,4572.251664471752,32605.577182903042,149323.86523586328,34294.81803755196,6061.11721308257,5836.654497517657,4843.7020037722705,7814.782411245904,9395.759000250582,14057.157961109715,8192.288977642056,7655.760207241933,9203.645337709624,7637.7450966202805,7971.812432879156,7421.37678917759,2935.2871895485287,6329.939163930326,8820.796560748415,8944.109217212255,14719.27363438586,11050.779819799696,8379.75073967695,5590.525909815882,10485.131539563748,20476.028147427794,7715.162648805314,11939.81827695821,5116.2596877306005,3047.0903835389504,9273.489011971804,5135.313604526994,3331.418010762715,6338.048693933388,12105.616460835923,7097.468638389797,3348.213951094401,11773.920694867731,13465.586888738457,4379.602415677836,11394.463621230614,3104.8789319036796,7216.150544702967,6653.765864361187,5720.580737490315,6685.493238874142,8486.079863634486,8329.192253601708 +RO,46545.50823036229,35517.718755954746,12007.97895282227,19448.903832688356,20440.973051620913,15132.394486734484,21523.767710605425,32075.68337603852,20312.088233477134,14653.654680967496,19346.344374357534,19295.50358149612,27702.617554877004,24644.67023787785,52797.882559506776,42262.7162454671,34689.01143849651,42583.74114324521,28973.147767939397,36882.64868179707,29063.471368254675,39575.626278602576,31898.858414072816,36283.98261870408,42207.50410750205,41714.957670180156,43270.313090823794,40474.54716426176,53565.91349208634,74865.02611452111,48190.11154819992,57257.26135564704,46523.80416861758,44300.83791323246,54901.1701096736,47361.27151969814,49156.09266449866,56461.30009557872,56062.45724028393,58506.511677323855,48499.732173446595,45901.64074770707,33171.12855766741,38310.49272614435,39377.41586450999,28638.803870927226,29424.553978129992,39429.14347226504,39300.73151290347,27525.04968129815,47095.77682827757,30948.90232192865,32939.14262154816,30155.99148400138,35067.04977301419,35592.25066656211,45790.67404045973,42596.77718301701,39223.35618808953,25767.93476452563,30963.38609617001,30034.908201595936,23323.05819720473,28517.276888551194,41560.85308154171,43999.15897330077,21427.19650317167,25084.131072037453,18450.764512377853,38182.39266803458,20422.517191285966,14090.243196787245,21998.55223596247,30809.187151501046,22071.90474198888,31906.454754357772,20763.073093214778,25620.747974742615,19248.368008063957,23289.011422937554,26669.846131314298,27207.74766143675,29386.08741874402 +RS,12898.821957973714,11760.375115749226,5762.823681912337,10868.645457667955,6851.647476795101,6489.091511565737,8016.427124526303,11096.059611708006,8784.092785314235,6844.044739629848,5968.673897869134,7450.873145908507,8457.218084424714,10466.134979346216,18700.932768467817,18783.264457747777,9647.157790116991,14650.396388121844,12360.14810912279,11640.673528541236,8882.446980807292,14762.619600006037,18399.930168952076,13646.806400326906,14489.119148714197,14104.804002759012,15462.34970923992,10894.228431882033,15095.088711819322,17853.281396922546,12916.46431873845,14207.379133135786,14418.834370720482,12752.5957180177,15544.588217112372,15820.904121022999,13534.055257847373,15215.146639151637,12996.626380040323,13029.919161637963,10196.668003968522,10284.440766030819,7022.693160251435,8401.249663323453,8435.803252725058,10265.915943082338,8623.546341166757,7782.429841191159,9309.259412625106,4849.32865233776,9480.40891035497,8237.330612702019,6688.9341497267815,7113.796951355988,8945.392918392941,11391.406447517316,8770.78174795334,7797.074404297926,11205.567652241964,7490.638744688466,6890.626375097134,8721.152535625733,6986.899718851523,10014.515393386338,11863.685949406314,12576.12226693591,6930.903954576986,6933.68818306879,8436.351253425808,11680.836931112251,5673.257881932308,4512.930178739657,6094.8735580904795,11616.485592765608,8423.704814584986,9582.260510301834,6280.960312130538,10196.683253538626,5869.428152205924,6637.126459584064,8223.324863109141,9163.49600405833,11744.973584261521 +SK,10526.703178382486,6581.841291454129,2779.6437301689157,6043.293021386589,8068.711762338472,4716.572547513843,4263.788112757618,7436.446538154413,5733.7942336519345,5091.94458281141,6885.47211985414,6956.591212284293,7116.928783228905,4573.269245439668,8169.130903254093,6799.092645889461,7190.199390340817,8819.311158887818,6679.080394471699,8681.76846617681,6714.688996801623,8780.135433744465,9674.052829336924,7510.89080672348,12315.672504913706,11551.288938768861,10658.095118642217,7760.467099195778,7334.701885838525,11743.053621084162,9101.455526104724,11252.254293590808,7089.2297957961755,11692.575497480175,11870.620362608648,8320.411050776946,12664.612697391287,10840.053026355938,9544.145158945727,8285.851845920657,6554.955003499832,6014.385824757616,5119.229272436076,5566.197178944401,8283.751441606055,4576.739002618681,5529.727054286627,4884.285979430572,7030.767074231632,6380.185010419688,6092.06334940043,4789.6919845964085,3988.728835990378,6100.445127127096,6174.336930810738,5839.492530664778,6739.897695678873,6073.547495791424,6239.663814901599,5985.626769812626,5946.910319392802,5350.4706690989715,2897.742723724252,3812.447551889599,5110.734824948982,5466.7571969447445,3822.1224333717755,4189.745228198809,3608.9397299109723,8925.718578619328,5520.297117401385,2338.023634704523,3634.014208205775,5079.021357670912,3789.3939110245783,3599.858750509022,4222.707040620564,3096.1701009538024,2158.8092391216674,4542.203594136759,4220.210061470592,2961.7796555712353,3705.0055429796116 +SI,8932.252857660058,6769.672897729603,3971.443731895907,6727.0196008878875,5996.060296331706,4552.96378417224,7787.375795346372,9548.071392837826,4890.204133819321,7094.028640574649,11752.798116380785,9378.602450871665,7169.30972105447,8221.267552376507,10568.142173580174,7705.119699907013,8559.440940655051,9496.2682906945,10641.418485889739,13644.09405840664,8756.865265119286,10877.994032024846,13095.473005855994,10408.222250010325,17110.767148034232,13257.44359305617,10163.420442559087,10743.809603872312,11213.570708968457,12419.028940460184,8518.188189494229,14975.496301661744,10217.449212514946,10521.007791805137,10759.209828867228,9699.803414833696,14530.647692616923,14530.715084460895,12439.143395173185,11990.046930204517,9001.647305603035,10111.423546501868,6824.172499114327,10267.968760960974,10233.709492652128,9562.175707725224,11231.941186997657,8178.692540046128,8568.526412713521,8103.883097000008,9639.895318751553,9157.43571368136,6814.50158997294,8089.913064615838,8170.645053083449,10765.044719853075,6713.170108103339,8936.8362631332,9542.147586182518,8964.110093884974,8653.114721452737,8353.75428643612,4558.589088593759,9432.405903987477,6927.92152111369,7408.760392367348,5834.871486466167,9336.912851251047,8839.494837985663,10766.693939731143,5073.937522450624,6475.598951785893,9482.419717110955,13414.771212079502,5906.5584945059145,9942.151265372298,7696.092255607837,9521.835825354157,8476.124990692872,6361.010018809483,7504.908031281684,6349.444363482254,12494.86342976942 +SE,97729.33612275522,104893.13349064675,152164.30605691054,147567.81837152236,172633.569885874,142454.06079467706,109264.12507808255,145966.80578917114,158733.59492028368,161670.6171803483,134822.12256461562,136958.6213484757,175679.6273876132,133962.36617697845,126787.36233921513,130861.49290945902,151133.2982760823,140247.52694114187,130508.52271890236,137421.63579450184,164132.0529341604,142467.57286064525,135886.87305352974,163762.3962091317,161128.38161511483,148196.28633931337,167892.87677861055,132017.696130452,105953.99289036485,119407.22923207263,147139.6039546916,151309.55114963037,164893.61323375857,137608.18237925606,149284.78771947857,108749.15747050248,131859.531763871,117290.48017785426,131751.97960784068,111117.27409251402,110551.615114603,81550.1135393761,114677.940027603,102356.46697333359,128977.80283219383,94483.98267503879,114937.96457142044,93256.91327642264,96751.29026217168,91074.34139788701,87931.96567208324,96560.83822983531,116746.94389793588,64626.25905820224,75156.91767736348,51489.37512699482,74729.26978432719,134673.01513808029,110440.65771030058,149033.9827088587,143576.83069821307,93040.02354987273,64075.160200006496,102493.08906248632,106664.82884715112,90351.43771324471,94801.81222609026,92445.93458012305,88919.20770998915,108111.72872590217,127440.80298482065,124444.06618883039,98961.91573013023,87913.90602563386,115956.48217115598,95526.84265190935,103459.08985036932,86058.90377748368,99040.9103276256,110493.22773914765,94182.7249029169,169752.66741900297,99896.09236620825 diff --git a/data/existing_infrastructure/offwind_capacity_IRENA.csv b/data/existing_infrastructure/offwind_capacity_IRENA.csv deleted file mode 100644 index d2a3f0f1..00000000 --- a/data/existing_infrastructure/offwind_capacity_IRENA.csv +++ /dev/null @@ -1,34 +0,0 @@ -Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022 -Albania,,,,,,,,,,,,,,,,,,,,,,, -Austria,,,,,,,,,,,,,,,,,,,,,,, -Belgium,,,,,,,,,,31.5,196.5,196.5,381.0,707.7,707.7,712.0,712.2,877.2,1185.9,1555.5,2261.8,2261.8,2261.8 -Bosnia Herzg,,,,,,,,,,,,,,,,,,,,,,, -Bulgaria,,,,,,,,,,,,,,,,,,,,,,, -Croatia,,,,,,,,,,,,,,,,,,,,,,, -Czechia,,,,,,,,,,,,,,,,,,,,,,, -Denmark,49.95,49.95,213.95,423.35,423.35,423.35,423.35,423.35,423.35,660.85,867.85,871.45,921.85,1271.05,1271.05,1271.05,1271.05,1263.8,1700.8,1700.8,1700.8,2305.6,2305.6 -Estonia,,,,,,,,,,,,,,,,,,,,,,, -Finland,,,,,,,,,24.0,24.0,26.3,26.3,26.3,26.3,26.3,32.0,32.0,72.7,72.7,73.0,73.0,73.0,73.0 -France,,,,,,,,,,,,,,,,,,2.0,2.0,2.0,2.0,2.0,482.0 -Germany,,,,,,,,,,35.0,80.0,188.0,268.0,508.0,994.0,3283.0,4132.0,5406.0,6393.0,7555.0,7787.0,7787.0,8129.0 -Greece,,,,,,,,,,,,,,,,,,,,,,, -Hungary,,,,,,,,,,,,,,,,,,,,,,, -Ireland,,,,,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2 -Italy,,,,,,,,,,,,,,,,,,,,,,,30.0 -Latvia,,,,,,,,,,,,,,,,,,,,,,, -Lithuania,,,,,,,,,,,,,,,,,,,,,,, -Luxembourg,,,,,,,,,,,,,,,,,,,,,,, -Montenegro,,,,,,,,,,,,,,,,,,,,,,, -Netherlands,,,,,,,108.0,108.0,228.0,228.0,228.0,228.0,228.0,228.0,228.0,357.0,957.0,957.0,957.0,957.0,2459.5,2459.5,2571.0 -North Macedonia,,,,,,,,,,,,,,,,,,,,,,, -Norway,,,,,,,,,,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,6.3,66.3 -Poland,,,,,,,,,,,,,,,,,,,,,,, -Portugal,,,,,,,,,,,,1.86,2.0,2.0,2.0,2.0,,,,,25.0,25.0,25.0 -Romania,,,,,,,,,,,,,,,,,,,,,,, -Serbia,,,,,,,,,,,,,,,,,,,,,,, -Slovakia,,,,,,,,,,,,,,,,,,,,,,, -Slovenia,,,,,,,,,,,,,,,,,,,,,,, -Spain,,,,,,,,,,,,,,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0 -Sweden,13.0,22.0,22.0,22.0,22.0,22.0,22.0,131.0,133.0,163.0,163.0,163.0,163.0,212.0,213.0,213.0,203.0,203.0,203.0,203.0,203.0,193.0,193.0 -Switzerland,,,,,,,,,,,,,,,,,,,,,,, -UK,4.0,4.0,4.0,64.0,124.0,214.0,304.0,394.0,596.2,951.0,1341.0,1838.0,2995.0,3696.0,4501.0,5093.0,5293.0,6988.0,8181.0,9888.0,10383.0,11255.0,13928.0 diff --git a/data/existing_infrastructure/onwind_capacity_IRENA.csv b/data/existing_infrastructure/onwind_capacity_IRENA.csv deleted file mode 100644 index cd5ac19c..00000000 --- a/data/existing_infrastructure/onwind_capacity_IRENA.csv +++ /dev/null @@ -1,34 +0,0 @@ -Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022 -Albania,,,,,,,,,,,,,,,,,,,,,,, -Austria,50.0,67.0,109.0,322.0,581.0,825.22,968.27,991.16,991.97,1000.99,1015.83,1105.97,1337.15,1674.54,2110.28,2488.73,2730.0,2886.7,3132.71,3224.12,3225.98,3407.81,3735.81 -Belgium,14.0,26.0,31.0,67.0,96.0,167.0,212.0,276.0,324.0,576.5,715.5,872.5,985.9,1061.3,1225.0,1469.3,1621.6,1902.2,2119.0,2308.0,2410.9,2686.6,2989.6 -Bosnia Herzg,,,,,,,,,,,,0.3,0.3,0.3,0.3,0.3,0.3,0.3,51.0,87.0,87.0,135.0,135.0 -Bulgaria,,,,,1.0,8.0,27.0,30.0,114.0,333.0,488.0,541.0,677.0,683.0,699.0,699.0,699.0,698.39,698.92,703.12,702.8,704.38,704.38 -Croatia,,,,,6.0,6.0,17.0,17.0,17.0,70.0,79.0,130.0,180.0,254.0,339.0,418.0,483.0,576.1,586.3,646.3,801.3,986.9,1042.9 -Czechia,2.0,,6.4,10.6,16.5,22.0,43.5,113.8,150.0,193.0,213.0,213.0,258.0,262.0,278.0,281.0,282.0,308.21,316.2,339.41,339.42,339.41,339.41 -Denmark,2340.07,2447.2,2680.58,2696.57,2700.36,2704.49,2712.35,2700.86,2739.52,2821.24,2933.98,3080.53,3240.09,3547.87,3615.35,3805.92,3974.09,4225.15,4421.86,4409.74,4566.23,4715.24,4782.24 -Estonia,,,1.0,3.0,7.0,31.0,31.0,50.0,77.0,104.0,108.0,180.0,266.0,248.0,275.0,300.0,310.0,311.8,310.0,316.0,317.0,315.0,315.0 -Finland,38.0,39.0,43.0,52.0,82.0,82.0,86.0,110.0,119.0,123.0,170.7,172.7,230.7,420.7,600.7,973.0,1533.0,1971.3,1968.3,2211.0,2513.0,3184.0,5541.0 -France,38.0,66.0,138.0,218.0,358.0,690.0,1412.0,2223.0,3403.0,4582.0,5912.0,6758.02,7607.5,8155.96,9201.42,10298.18,11566.56,13497.35,14898.14,16424.85,17512.0,18737.98,20637.98 -Germany,6095.0,8754.0,12001.0,14381.0,16419.0,18248.0,20474.0,22116.0,22794.0,25697.0,26823.0,28524.0,30711.0,32969.0,37620.0,41297.0,45303.0,50174.0,52328.0,53187.0,54414.0,56046.0,58165.0 -Greece,226.0,270.0,287.0,371.0,470.0,491.0,749.0,846.0,1022.0,1171.0,1298.0,1640.0,1753.0,1809.0,1978.0,2091.0,2370.0,2624.0,2877.5,3589.0,4119.25,4649.13,4879.13 -Hungary,,1.0,1.0,3.0,3.0,17.0,33.0,61.0,134.0,203.0,293.0,331.0,325.0,329.0,329.0,329.0,329.0,329.0,329.0,323.0,323.0,324.0,324.0 -Ireland,116.5,122.9,134.8,210.3,311.2,468.1,651.3,715.3,917.1,1226.1,1365.2,1559.4,1679.15,1898.1,2258.05,2425.95,2776.45,3293.95,3648.65,4101.25,4281.5,4313.84,4593.84 -Italy,363.0,664.0,780.0,874.0,1127.0,1635.0,1902.0,2702.0,3525.0,4879.0,5794.0,6918.0,8102.0,8542.0,8683.0,9137.0,9384.0,9736.58,10230.25,10679.46,10870.62,11253.73,11749.73 -Latvia,2.0,2.0,22.0,26.0,26.0,26.0,26.0,26.0,28.0,29.0,30.0,36.0,59.0,65.89,68.92,68.17,69.91,77.11,78.17,78.07,78.07,77.13,136.13 -Lithuania,,,,,1.0,1.0,31.0,47.0,54.0,98.0,133.0,202.0,275.0,279.0,288.0,436.0,509.0,518.0,533.0,534.0,540.0,671.0,814.0 -Luxembourg,14.0,13.9,13.9,20.5,34.9,34.9,34.9,34.9,42.92,42.93,43.73,44.53,58.33,58.33,58.34,63.79,119.69,119.69,122.89,135.79,152.74,136.44,165.44 -Montenegro,,,,,,,,,,,,,,,,,,72.0,72.0,118.0,118.0,118.0,118.0 -Netherlands,447.0,486.0,672.0,905.0,1075.0,1224.0,1453.0,1641.0,1921.0,1994.0,2009.0,2088.0,2205.0,2485.0,2637.0,3033.84,3300.12,3245.0,3436.11,3527.16,4188.38,5309.87,6176.0 -North Macedonia,,,,,,,,,,,,,,,37.0,37.0,37.0,37.0,37.0,37.0,37.0,37.0,37.0 -Norway,13.0,13.0,97.0,97.0,152.0,265.0,284.0,348.0,395.0,420.7,422.7,509.7,702.7,815.7,856.7,864.7,880.7,1204.7,1707.7,2911.7,4027.7,5042.7,5067.7 -Poland,4.0,19.0,32.0,35.0,40.0,121.0,172.0,306.0,526.0,709.0,1108.0,1800.0,2564.0,3429.0,3836.0,4886.0,5747.0,5759.36,5766.08,5837.76,6298.25,6967.34,7987.34 -Portugal,83.0,125.0,190.0,268.0,553.0,1064.0,1681.0,2201.0,2857.0,3326.0,3796.0,4254.35,4409.55,4607.95,4854.56,4934.84,5124.1,5124.1,5172.36,5222.75,5097.26,5402.33,5430.33 -Romania,,,,,,1.0,1.0,3.0,5.0,15.0,389.0,988.0,1822.0,2773.0,3244.0,3130.0,3025.0,3029.8,3032.26,3037.52,3012.53,3014.96,3014.96 -Serbia,,,,,,,,,,,,,0.5,0.5,0.5,10.4,17.0,25.0,227.0,398.0,398.0,398.0,398.0 -Slovakia,,,,3.0,3.0,5.0,5.0,5.0,5.0,3.0,3.0,3.0,3.0,5.0,3.0,3.0,3.0,4.0,3.0,4.0,4.0,4.0,4.0 -Slovenia,,,,,,,,,,,,,2.0,2.0,3.0,3.0,3.0,3.3,3.3,3.3,3.3,3.33,3.33 -Spain,2206.0,3397.0,4891.0,5945.0,8317.0,9918.0,11722.0,14820.0,16555.0,19176.0,20693.0,21529.0,22789.0,22953.0,22920.0,22938.0,22985.0,23119.48,23400.06,25585.08,26814.19,27902.65,29302.84 -Sweden,196.0,273.0,335.0,395.0,453.0,500.0,563.0,692.0,956.0,1312.0,1854.0,2601.0,3443.0,3982.0,4875.0,5606.0,6232.0,6408.0,7097.0,8478.0,9773.0,11923.0,14364.0 -Switzerland,3.0,5.0,5.0,5.0,9.0,12.0,12.0,12.0,14.0,18.0,42.0,46.0,49.0,60.0,60.0,60.0,75.0,75.0,75.0,75.0,87.0,87.0,87.0 -UK,431.0,490.0,531.0,678.0,809.0,1351.0,1651.0,2083.0,2849.8,3468.0,4080.0,4758.0,6035.0,7586.0,8573.0,9212.0,10833.0,12597.0,13425.0,13999.0,14075.0,14492.0,14832.0 diff --git a/data/existing_infrastructure/solar_capacity_IRENA.csv b/data/existing_infrastructure/solar_capacity_IRENA.csv deleted file mode 100644 index 01683f8d..00000000 --- a/data/existing_infrastructure/solar_capacity_IRENA.csv +++ /dev/null @@ -1,34 +0,0 @@ -Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022 -Albania,,0.1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.3,0.4,0.56,0.68,0.76,0.87,1.05,1.0,1.0,1.0,14.0,21.0,23.0,28.6 -Austria,5.0,7.0,9.0,23.0,27.0,18.49,19.61,21.42,27.0,45.56,85.27,169.88,333.09,620.78,779.76,931.56,1089.53,1262.01,1447.94,1694.4,2034.74,2773.91,3538.91 -Belgium,,,1.0,1.0,1.0,2.0,2.0,20.0,62.0,386.0,1006.6,1978.6,2646.6,2901.6,3015.0,3131.6,3328.8,3620.6,4000.0,4636.6,5572.8,6012.4,6898.4 -Bosnia Herzg,,,,0.1,0.2,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.35,1.34,7.17,8.17,14.12,16.0,18.15,22.35,34.89,56.51,107.47 -Bulgaria,,,,,,,,0.03,0.1,2.0,25.0,154.0,921.99,1038.54,1028.92,1027.89,1029.89,1030.7,1033.06,1044.39,1100.21,1274.71,1948.36 -Croatia,,,,,,,,,,0.3,0.3,0.3,4.0,19.0,33.0,47.8,55.8,60.0,67.7,84.8,108.5,138.3,182.3 -Czechia,0.1,0.1,0.2,0.3,0.4,0.59,0.84,3.96,39.5,464.6,1727.0,1913.0,2022.0,2063.5,2067.4,2074.9,2067.9,2075.44,2081.05,2110.67,2171.96,2246.09,2627.09 -Denmark,1.0,1.0,2.0,2.0,2.0,3.0,3.0,3.0,3.0,5.0,7.0,17.0,402.0,571.0,607.0,782.11,850.95,906.35,998.0,1080.0,1304.29,1704.04,3122.04 -Estonia,,,,,,,,,,0.1,0.1,0.2,0.38,1.5,3.34,6.5,10.0,15.0,31.9,120.6,207.67,394.77,534.77 -Finland,2.0,3.0,3.0,3.0,4.0,4.0,5.0,5.0,6.0,6.0,7.0,7.0,8.0,9.0,11.0,17.0,39.0,82.0,140.0,222.0,318.0,425.0,590.6 -France,7.0,7.0,8.0,9.0,11.0,13.0,15.0,26.0,80.0,277.0,1044.0,3003.57,4358.75,5277.29,6034.42,7137.52,7702.08,8610.44,9638.88,10738.39,11812.2,14436.97,17036.97 -Germany,114.0,195.0,260.0,435.0,1105.0,2056.0,2899.0,4170.0,6120.0,10564.0,18004.0,25914.0,34075.0,36708.0,37898.0,39222.0,40677.0,42291.0,45156.0,48912.0,53669.0,59371.0,66662.0 -Greece,,1.0,1.0,1.0,1.0,1.0,5.0,9.0,12.0,46.0,202.0,612.0,1536.0,2579.0,2596.0,2604.0,2604.0,2605.53,2651.57,2833.79,3287.72,4277.42,5557.42 -Hungary,,,,,,,,0.4,1.0,1.0,2.0,4.0,12.0,35.0,89.0,172.0,235.0,344.0,728.0,1400.0,2131.0,2968.0,2988.0 -Ireland,,,,,,,,,,,,,,,,,,,,,,, -Italy,19.0,20.0,22.0,26.0,31.0,34.0,45.0,110.0,483.0,1264.0,3592.0,13131.0,16785.0,18185.0,18594.0,18901.0,19283.0,19682.29,20107.59,20865.28,21650.04,22594.26,25076.56 -Latvia,,,,,,,,,,,,,,,,,0.69,0.69,1.96,3.3,5.1,7.16,56.16 -Lithuania,,,,,,,,,0.1,0.1,0.1,0.3,7.0,68.0,69.0,69.0,70.0,70.08,72.0,73.0,80.0,84.0,397.0 -Luxembourg,,0.16,1.59,14.17,23.56,23.58,23.7,23.93,24.56,26.36,29.45,40.67,74.65,95.02,109.93,116.27,121.9,128.1,130.62,159.74,186.64,277.16,319.16 -Montenegro,,,,,,,,,,,,,,,,,,,,,2.57,2.57,22.2 -Netherlands,13.0,21.0,26.0,46.0,50.0,51.0,53.0,54.0,59.0,69.0,90.0,149.0,287.0,650.0,1007.0,1526.26,2135.02,2910.89,4608.0,7226.0,11108.43,14910.69,18848.69 -North Macedonia,,,,,,,,,,,,2.0,4.0,7.0,15.0,17.0,16.7,16.7,16.7,16.71,84.93,84.93,84.93 -Norway,6.0,6.0,6.0,7.0,7.0,7.0,8.0,8.0,8.3,8.7,9.1,9.5,10.0,11.0,13.0,15.0,26.7,44.9,53.11,102.53,141.53,186.53,302.53 -Poland,,,,,,,,,,,,1.11,1.3,2.39,27.15,107.78,187.25,287.09,561.98,1539.26,3954.96,7415.52,11166.52 -Portugal,1.0,1.0,1.0,2.0,2.0,2.0,3.0,24.0,59.0,115.0,134.0,169.6,235.6,293.6,412.6,441.75,493.05,539.42,617.85,832.74,1010.07,1474.78,2364.78 -Romania,,,,,,,,,0.1,0.1,0.1,1.0,41.0,761.0,1293.0,1326.0,1372.0,1374.13,1385.82,1397.71,1382.54,1393.92,1413.92 -Serbia,,,,,,0.1,0.2,0.4,0.9,1.2,1.3,1.5,3.1,4.7,6.0,9.0,11.0,10.0,11.0,11.0,11.5,11.94,11.94 -Slovakia,,,,,,,,,,,19.0,496.0,513.0,533.0,533.0,533.0,533.0,528.0,472.0,590.0,535.0,537.0,537.0 -Slovenia,1.0,1.0,,,,0.05,0.19,0.59,1.0,4.0,12.0,57.0,142.0,187.0,223.0,238.0,233.0,246.8,246.8,277.88,369.78,461.16,632.16 -Spain,1.0,3.0,6.0,10.0,19.0,37.0,113.0,476.0,3365.0,3403.0,3851.0,4260.0,4545.0,4665.0,4672.0,4677.0,4687.0,4696.0,4730.7,8772.02,10100.42,13678.4,18176.73 -Sweden,3.0,3.0,3.0,4.0,4.0,4.0,5.0,6.0,8.0,9.0,11.0,12.0,24.0,43.0,60.0,104.0,153.0,231.0,411.0,698.0,1090.0,1587.0,2587.0 -Switzerland,16.0,18.0,20.0,22.0,24.0,28.0,30.0,37.0,49.0,79.0,125.0,223.0,437.0,756.0,1061.0,1394.0,1664.0,1906.0,2173.0,2498.0,2973.0,3655.0,4339.92 -UK,2.0,3.0,4.0,6.0,8.0,11.0,14.0,18.0,23.0,27.0,95.0,1000.0,1753.0,2937.0,5528.0,9601.0,11914.0,12760.0,13059.0,13345.0,13579.0,13965.0,14660.0 diff --git a/data/gr-e-11.03.02.01.01-cc.csv b/data/gr-e-11.03.02.01.01-cc.csv new file mode 100644 index 00000000..0ba695de --- /dev/null +++ b/data/gr-e-11.03.02.01.01-cc.csv @@ -0,0 +1,45 @@ +year,passenger cars,passenger vehicles,goods vehicles,agricultural vehicles,industrial vehicles,motorcycles,mopeds (incl. fast e-bikes)¹ +1980,2246752,11087,169402,137685,0,137340,671473 +1981,2394455,11122,167846,151238,0,152508,687517 +1982,2473318,11341,178313,156631,0,178398,656102 +1983,2520610,11255,189920,165332,0,187090,674710 +1984,2552132,10853,192708,164078,0,199302,647391 +1985,2617164,10771,200537,175161,0,217974,644175 +1986,2678911,10800,207014,183689,0,225676,627523 +1987,2732720,11027,217750,189984,0,240102,613093 +1988,2819548,26869,236649,152693,43519,219987,581270 +1989,2895842,29270,241488,157867,44326,261715,551808 +1990,2985397,31180,252136,162932,45920,299264,464609 +1991,3057798,32968,257646,165571,46938,319779,418251 +1992,3091228,34136,256611,169277,47281,336448,381236 +1993,3109523,34852,253461,171414,47229,348159,358732 +1994,3165042,35676,256285,172300,47373,357252,336367 +1995,3229176,36517,262352,174026,47693,370700,317783 +1996,3268093,37662,263020,174247,47622,381986,301009 +1997,3323455,38508,264200,175689,47743,410750,280467 +1998,3383307,39012,267380,176712,47754,435042,265422 +1999,3467311,39692,273954,177148,48265,464357,246018 +2000,3545247,40260,278518,177963,48949,493781,218932 +2001,3629713,41342,285246,179321,49549,521390,199033 +2002,3700951,42401,290142,180063,50227,545132,186811 +2003,3753890,43629,292329,180295,50795,567358,173486 +2004,3811351,44784,298193,180898,50957,583010,165000 +2005,3863807,45785,307264,182093,51860,592194,156095 +2006,3899917,46445,314020,185450,53437,608648,150563 +2007,3955787,48026,324153,184062,55149,619166,144704 +2008,3989811,48536,326232,188218,55808,636540,141549 +2009,4009602,50675,327808,185902,56533,642777,139220 +2010,4075825,52751,335200,186485,58492,651202,139548 +2011,4163003,55422,348553,187130,60324,665870,142834 +2012,4254725,58278,361926,188358,62219,679822,145984 +2013,4320885,60151,371361,189305,63950,687990,147247 +2014,4384490,62436,382281,190095,65563,699219,152962 +2015,4458069,65720,393598,191132,67101,710022,161292 +2016,4524029,69676,405566,192139,68721,720381,176030 +2017,4570823,73814,416501,192858,70113,729149,188053 +2018,4602688,77985,428808,193283,71683,739344,201423 +2019,4623952,83054,440795,193834,74085,744542,211480 +2020,4658335,88293,452186,195082,75659,771586,229421 +2021,4709366,97805,466857,196530,77672,791323,244572 +2022,4721280,105158,475714,196942,79691,789794,257753 +2023,4760948,114299,485303,197678,81241,805653, diff --git a/data/heat_load_profile_DK_AdamJensen.csv b/data/heat_load_profile_DK_AdamJensen.csv deleted file mode 100644 index cb417bde..00000000 --- a/data/heat_load_profile_DK_AdamJensen.csv +++ /dev/null @@ -1,25 +0,0 @@ -hour,weekday,weekend -0,0.9181438689,0.9421512708 -1,0.9172359071,0.9400891069 -2,0.9269464481,0.9461062015 -3,0.9415047932,0.9535084941 -4,0.9656299507,0.9651094993 -5,1.0221166443,0.9834676747 -6,1.1553090493,1.0124171051 -7,1.2093411031,1.0446615927 -8,1.1470295942,1.088203419 -9,1.0877191341,1.1110334576 -10,1.0418327372,1.0926752822 -11,1.0062977133,1.055488209 -12,0.9837030359,1.0251266112 -13,0.9667570278,0.9990015154 -14,0.9548320932,0.9782897278 -15,0.9509232061,0.9698167237 -16,0.9636973319,0.974288587 -17,0.9799372563,0.9886456216 -18,1.0046501848,1.0084159643 -19,1.0079452419,1.0171243296 -20,0.9860566481,0.9994722379 -21,0.9705228074,0.982761591 -22,0.9586485819,0.9698167237 -23,0.9335023778,0.9515079292 diff --git a/data/hydrogen_salt_cavern_potentials.csv b/data/hydrogen_salt_cavern_potentials.csv deleted file mode 100644 index c1168266..00000000 --- a/data/hydrogen_salt_cavern_potentials.csv +++ /dev/null @@ -1,31 +0,0 @@ -ct,TWh -AT, -BA, -BE, -BG, -CH, -CZ, -DE,4500 -DK,700 -EE, -ES,350 -FI, -FR, -GB,1050 -GR,120 -HR, -HU, -IE, -IT, -LT, -LU, -LV, -NL,150 -NO, -PL,120 -PT,400 -RO, -RS, -SE, -SI, -SK, diff --git a/data/links_tyndp.csv b/data/links_tyndp.csv index a0603120..43030be5 100644 --- a/data/links_tyndp.csv +++ b/data/links_tyndp.csv @@ -26,3 +26,16 @@ NordBalt,Klaipeda (LT),Nybro (SE),450,,700,built,,https://en.wikipedia.org/wiki/ Estlink 1,Harku (EE),Espoo (FI),105,,350,built,,https://en.wikipedia.org/wiki/Estlink,24.560278,59.384722,24.551667,60.203889 Greenlink,Waterford (IE),Pembroke (UK),,180,500,under construction,,https://tyndp2022-project-platform.azurewebsites.net/projectsheets/transmission/286,-6.987,52.260,-4.986,51.686 Celtic Interconnector,Aghada (IE),La Martyre (FR),,572,700,under consideration,,https://tyndp2022-project-platform.azurewebsites.net/projectsheets/transmission/107,-8.16642,51.91413,-4.184,48.459 +GiLA,Bordeaux (FR),Nantes (FR),,312,640,under consideration,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,-1.209,46.901,-0.576,44.960 +HG North Tyrrhenian Corridor,Milan (IT),Viterbo (IT),,500,2000,in permitting,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,9.409,45.553,12.015,42.244 +HG Adriatic Corridor,Ferrara (IT),Foggia (IT),,582,2000,in permitting,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,11.661,44.855,15.550,41.513 +SAPEI 2,Fioumesanto (IT),Montalto (IT),,390,1000,in permitting,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,8.283,40.790,11.602,42.331 +HG Ionian-Tyrrhenian Corridor,Rossano (IT),Latina (IT),,496,2000,in permitting,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,16.629,39.568,12.779,41.430 +HG Ionian-Tyrrhenian Corridor 2,Rossano (IT),Catania (IT),,330,2000,in permitting,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,16.629,39.568,15.049,37.408 +Germany-UK Hybrid Interconnector,Fetteresso (UK),Emden (DE),800,,2000,under consideration,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,-2.383,56.991,7.207,53.376 +NU-Link Interconnector,Hornsea (UK),Moerdijk (NL),,460,1200,under consideration,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,-0.261,53.655,4.586,51.661 +APOLLO-LINK,La Farga (ES),La Spezia (IT),,725,2091,under consideration,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,2.883,42.062,9.884,44.107 +Baltic WindConnector (BWC),Lubmin (DE),Lihula (EE),,960,2000,under consideration,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,13.686,54.139,23.818,58.675 +High-Voltage Direct Current Interconnector Project Romania-Hungary,Constanta (RO),Albertirsa (HU),,930,2500,under consideration,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,28.588,44.201,19.584,47.224 +Rhine-Main-Link,Ovelgönne (DE),Marxheim (DE),,433,4000,in permitting,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,8.379,53.315,8.435,50.078 +Green Aegean Interconnector,Arachthos (GR),Ottenhofen (DE),,600,3000,under consideration,,https://eepublicdownloads.blob.core.windows.net/public-cdn-container/tyndp-documents/TYNDP2024/240220_TYNDP2024_project_portfolio.xlsx,20.967,39.185,11.868,48.207 diff --git a/data/switzerland-new_format-all_years.csv b/data/switzerland-new_format-all_years.csv new file mode 100644 index 00000000..d083e8a8 --- /dev/null +++ b/data/switzerland-new_format-all_years.csv @@ -0,0 +1,25 @@ +country,item,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022 +CH,total residential,268.2,223.4,243.4,261.3,214.2,229.1,241.2,236.5,223.7,226.5,219.1,241.2,211.3 +CH,total residential space,192.2,149,168.1,185.5,139.7,154.4,167.3,161.5,147.2,150.4,140.2,166.2,131.9 +CH,total residential water,32.2,31.6,31.9,32.2,31.7,31.9,31.8,31.8,31.8,31.7,33.3,32.5,32.5 +CH,total residential cooking,9.3,9.3,9.3,9.4,9.5,9.6,9.9,10,10.1,10.2,10.5,10.3,10.3 +CH,electricity residential,67.9,63.7,65.7,67.6,63,64.4,69.7,69.2,67.7,68.1,68.7,70.8,66.8 +CH,electricity residential space,15.9,12.8,14.3,15.8,12.3,13.5,15.8,15.6,14.7,15.3,14.8,17.8,14.8 +CH,electricity residential water,8.8,8.5,8.5,8.6,8.5,8.6,8.9,9,9.2,9.3,9.7,9.5,9.5 +CH,electricity residential cooking,4.9,4.9,4.9,4.9,5,5,5,5.1,5.1,5.1,5.4,5.2,5.3 +CH,total services,145.9,127.4,136.7,144,124.5,132.5,150.5,147.7,141.5,143.1,129.7,144.2,122.5 +CH,total services space,80,62.2,70.8,77.4,58.3,64.3,77,74.4,68.2,69.8,64.3,75.7,58.7 +CH,total services water,10.1,10,10.1,10.1,10,10,11.4,11.3,11.2,11.1,9.7,10.4,12 +CH,total services cooking,2.5,2.4,2.3,2.3,2.4,2.3,3.1,3.1,3.2,3.3,2.1,2.6,3.2 +CH,electricity services,60.5,59.2,60.3,61.4,60.3,62.6,65.9,65.7,65.5,65.6,58.8,61.6,61.6 +CH,electricity services space,4,3.2,3.8,4.2,3.3,3.6,2.7,2.5,2.3,2.3,2.2,2.5,2.5 +CH,electricity services water,0.7,0.7,0.7,0.7,0.7,0.7,1.2,1.1,1.1,1.1,0.9,1,1 +CH,electricity services cooking,2.5,2.4,2.3,2.3,2.4,2.3,3.1,3.1,3.1,3.2,3.3,2.1,3.2 +CH,total rail,11.5,11.1,11.2,11.4,11.1,11.4,11.6,11.4,11.2,11,10.2,10.6,10.8 +CH,total road,199.4,200.4,200.4,201.2,202,203.1,203.9,203.7,202.6,200.5,182.6,188.3,193.3 +CH,electricity road,0,0,0,0,0,0,0.1,0.2,0.3,0.4,0.5,0.8,1.3 +CH,electricity rail,11.5,11.1,11.2,11.4,11.1,11.4,11.5,11.3,11.1,11,10.1,10.6,10.7 +CH,total domestic aviation,3.3,3.2,3.4,3.4,3.5,3.5,3.6,3.1,3.1,2.9,2.5,2.8,3 +CH,total international aviation,58,62,63.5,64.2,64.5,66.8,70.6,72.8,77.2,78.2,28.2,31.2,56.8 +CH,total domestic navigation,1.6,1.6,1.6,1.6,1.6,1.6,1.4,1.4,1.4,1.4,1.4,1.4,1.4 +CH,total international navigation,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/doc/Makefile b/doc/Makefile index a2ae2428..9eea4532 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT diff --git a/doc/conf.py b/doc/conf.py index fe577ac7..739c7663 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -34,10 +34,10 @@ sys.path.insert(0, os.path.abspath("../scripts")) # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - #'sphinx.ext.autodoc', + "sphinx.ext.autodoc", #'sphinx.ext.autosummary', "myst_parser", - "sphinx.ext.autosectionlabel", + # "sphinx.ext.autosectionlabel", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.mathjax", @@ -50,6 +50,19 @@ extensions = [ "sphinx.ext.imgconverter", # for SVG conversion ] +autodoc_mock_imports = [ + "atlite", + "snakemake", + "pycountry", + "rioxarray", + "country_converter", + "tabula", + "memory_profiler", + "powerplantmatching", + "rasterio", + "dask.distributed", +] + autodoc_default_flags = ["members"] autosummary_generate = True @@ -72,7 +85,7 @@ master_doc = "index" # General information about the project. project = "PyPSA-Eur" -copyright = "2017-2023 Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), Fabian Neumann (TUB, KIT), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB)" +copyright = "2017-2024 Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), Fabian Neumann (TUB, KIT), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB)" author = "Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), Fabian Neumann (TUB, KIT), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB)" # The version info for the project you're documenting, acts as replacement for @@ -80,9 +93,9 @@ author = "Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann # built documents. # # The short X.Y version. -version = "0.9" +version = "0.10" # The full version, including alpha/beta/rc tags. -release = "0.9.0" +release = "0.10.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/configtables/adjustments.csv b/doc/configtables/adjustments.csv new file mode 100644 index 00000000..52617352 --- /dev/null +++ b/doc/configtables/adjustments.csv @@ -0,0 +1,8 @@ +,Unit,Values,Description +adjustments,,, +-- electricity,bool or dict,,"Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_network.`" +-- -- {attr},,,"Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``" +-- -- -- {carrier},float,per-unit,"Any carrier of the network to which parameter adjustment factor should be applied." +-- sector,bool or dict,,"Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_sector_network.`" +-- -- {attr},,,"Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``" +-- -- -- {carrier},float,per-unit,"Any carrier of the network to which parameter adjustment factor should be applied." diff --git a/doc/configtables/clustering.csv b/doc/configtables/clustering.csv index e831ca84..65411738 100644 --- a/doc/configtables/clustering.csv +++ b/doc/configtables/clustering.csv @@ -17,3 +17,6 @@ aggregation_strategies,,, -- -- {key},str,"{key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator." -- buses,,, -- -- {key},str,"{key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus." +temporal,,,Options for temporal resolution +-- resolution_elec,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks." +-- resolution_sector,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_sector_network`." diff --git a/doc/configtables/costs.csv b/doc/configtables/costs.csv index b69c0bf9..03933c18 100644 --- a/doc/configtables/costs.csv +++ b/doc/configtables/costs.csv @@ -1,6 +1,6 @@ ,Unit,Values,Description year,--,YYYY; e.g. '2030',Year for which to retrieve cost assumptions of ``resources/costs.csv``. -version,--,vX.X.X; e.g. 'v0.5.0',Version of ``technology-data`` repository to use. +version,--,vX.X.X or //vX.X.X; e.g. 'v0.5.0',Version of ``technology-data`` repository to use. If this string is of the form // then costs are instead retrieved from ``github.com//`` at the tag. rooftop_share,--,float,Share of rooftop PV when calculating capital cost of solar (joint rooftop and utility-scale PV). social_discountrate,p.u.,float,Social discount rate to compare costs in different investment periods. 0.02 corresponds to a social discount rate of 2%. fill_values,--,float,Default values if not specified for a technology in ``resources/costs.csv``. diff --git a/doc/configtables/enable.csv b/doc/configtables/enable.csv index 8dd476cb..06397fdf 100644 --- a/doc/configtables/enable.csv +++ b/doc/configtables/enable.csv @@ -5,8 +5,8 @@ retrieve_databundle,bool,"{true, false}","Switch to retrieve databundle from zen retrieve_sector_databundle,bool,"{true, false}","Switch to retrieve sector databundle from zenodo via the rule :mod:`retrieve_sector_databundle` or whether to keep a custom databundle located in the corresponding folder." retrieve_cost_data,bool,"{true, false}","Switch to retrieve technology cost data from `technology-data repository `_." build_cutout,bool,"{true, false}","Switch to enable the building of cutouts via the rule :mod:`build_cutout`." -retrieve_irena,bool,"{true, false}",Switch to enable the retrieval of ``existing_capacities`` from IRENASTAT with :mod:`retrieve_irena`. retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`." build_natura_raster,bool,"{true, false}","Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`." retrieve_natura_raster,bool,"{true, false}","Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`." custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``." +drop_leap_day,bool,"{true, false}","Switch to drop February 29 from all time-dependent data in leap years" diff --git a/doc/configtables/energy.csv b/doc/configtables/energy.csv index 8718d75e..3d13b9c3 100644 --- a/doc/configtables/energy.csv +++ b/doc/configtables/energy.csv @@ -1,7 +1,4 @@ ,Unit,Values,Description energy_totals_year ,--,"{1990,1995,2000,2005,2010,2011,…} ",The year for the sector energy use. The year must be avaliable in the Eurostat report base_emissions_year ,--,"YYYY; e.g. 1990","The base year for the sector emissions. See `European Environment Agency (EEA) `_." - -eurostat_report_year ,--,"{2016,2017,2018}","The publication year of the Eurostat report. 2016 includes Bosnia and Herzegovina, 2017 does not" - emissions ,--,"{CO2, All greenhouse gases - (CO2 equivalent)}","Specify which sectoral emissions are taken into account. Data derived from EEA. Currently only CO2 is implemented." diff --git a/doc/configtables/existing_capacities.csv b/doc/configtables/existing_capacities.csv index 87519193..eacae35b 100644 --- a/doc/configtables/existing_capacities.csv +++ b/doc/configtables/existing_capacities.csv @@ -3,4 +3,5 @@ grouping_years_power ,--,A list of years,Intervals to group existing capacities grouping_years_heat ,--,A list of years below 2020,Intervals to group existing capacities for heat threshold_capacity ,MW,float,Capacities generators and links of below threshold are removed during add_existing_capacities +default_heating_lifetime ,years,int,Default lifetime for heating technologies conventional_carriers ,--,"Any subset of {uranium, coal, lignite, oil} ",List of conventional power plants to include in the sectoral network diff --git a/doc/configtables/hydro.csv b/doc/configtables/hydro.csv index 4544d110..790029d1 100644 --- a/doc/configtables/hydro.csv +++ b/doc/configtables/hydro.csv @@ -1,8 +1,11 @@ -,Unit,Values,Description -cutout,--,Must be 'europe-2013-era5',Specifies the directory where the relevant weather data ist stored. -carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams." -PHS_max_hours,h,float,Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. -hydro_max_hours,h,"Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'}",Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation `_. -flatten_dispatch,bool,"{true, false}",Consider an upper limit for the hydro dispatch. The limit is given by the average capacity factor plus the buffer given in ``flatten_dispatch_buffer`` -flatten_dispatch_buffer,--,float,"If ``flatten_dispatch`` is true, specify the value added above the average capacity factor." -clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero." +,Unit,Values,Description +cutout,--,Must be 'europe-2013-era5',Specifies the directory where the relevant weather data ist stored. +carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams." +PHS_max_hours,h,float,Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. +hydro_max_hours,h,"Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'}",Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation `_. +flatten_dispatch,bool,"{true, false}",Consider an upper limit for the hydro dispatch. The limit is given by the average capacity factor plus the buffer given in ``flatten_dispatch_buffer`` +flatten_dispatch_buffer,--,float,"If ``flatten_dispatch`` is true, specify the value added above the average capacity factor." +clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero." +eia_norm_year,--,"Year in EIA hydro generation dataset; or False to disable","To specify a specific year by which hydro inflow is normed that deviates from the snapshots' year" +eia_correct_by_capacity,--,boolean,"Correct EIA annual hydro generation data by installed capacity." +eia_approximate_missing,--,boolean,"Approximate hydro generation data for years not included in EIA dataset through a regression based on annual runoff." diff --git a/doc/configtables/industry.csv b/doc/configtables/industry.csv index fc1b3f0f..d1b560ed 100644 --- a/doc/configtables/industry.csv +++ b/doc/configtables/industry.csv @@ -17,6 +17,8 @@ HVC_primary_fraction,--,float,The fraction of high value chemicals (HVC) produce HVC_mechanical_recycling _fraction,--,float,The fraction of high value chemicals (HVC) produced using mechanical recycling HVC_chemical_recycling _fraction,--,float,The fraction of high value chemicals (HVC) produced using chemical recycling ,,, +sector_ratios_fraction_future,--,Dictionary with planning horizons as keys.,The fraction of total progress in fuel and process switching achieved in the industry sector. +basic_chemicals_without_NH3_production_today,Mt/a,float,"The amount of basic chemicals produced without ammonia (= 86 Mtethylene-equiv - 17 MtNH3)." HVC_production_today,MtHVC/a,float,"The amount of high value chemicals (HVC) produced. This includes ethylene, propylene and BTX. From `DECHEMA (2017) `_, Figure 16, page 107" Mwh_elec_per_tHVC _mechanical_recycling,MWh/tHVC,float,"The energy amount of electricity needed to produce a ton of high value chemical (HVC) using mechanical recycling. From SI of `Meys et al (2020) `_, Table S5, for HDPE, PP, PS, PET. LDPE would be 0.756." Mwh_elec_per_tHVC _chemical_recycling,MWh/tHVC,float,"The energy amount of electricity needed to produce a ton of high value chemical (HVC) using chemical recycling. The default value is based on pyrolysis and electric steam cracking. From `Material Economics (2019) `_, page 125" diff --git a/doc/configtables/licenses-sector.csv b/doc/configtables/licenses-sector.csv index d65d3b36..a44e0a5d 100644 --- a/doc/configtables/licenses-sector.csv +++ b/doc/configtables/licenses-sector.csv @@ -1,17 +1,14 @@ description,file/folder,licence,source JRC IDEES database,jrc-idees-2015/,CC BY 4.0,https://ec.europa.eu/jrc/en/potencia/jrc-idees urban/rural fraction,urban_percent.csv,unknown,unknown -JRC biomass potentials,biomass/,unknown,https://doi.org/10.2790/39014 JRC ENSPRESO biomass potentials,remote,CC BY 4.0,https://data.jrc.ec.europa.eu/dataset/74ed5a04-7d74-4807-9eab-b94774309d9f EEA emission statistics,eea/UNFCCC_v23.csv,EEA standard re-use policy,https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 Eurostat Energy Balances,eurostat-energy_balances-*/,Eurostat,https://ec.europa.eu/eurostat/web/energy/data/energy-balances Swiss energy statistics from Swiss Federal Office of Energy,switzerland-sfoe/,unknown,http://www.bfe.admin.ch/themen/00526/00541/00542/02167/index.html?dossier_id=02169 BASt emobility statistics,emobility/,unknown,http://www.bast.de/DE/Verkehrstechnik/Fachthemen/v2-verkehrszaehlung/Stundenwerte.html?nn=626916 BDEW heating profile,heat_load_profile_BDEW.csv,unknown,https://github.com/oemof/demandlib -heating profiles for Aarhus,heat_load_profile_DK_AdamJensen.csv,unknown,Adam Jensen MA thesis at Aarhus University -George Lavidas wind/wave costs,WindWaveWEC_GLTB.xlsx,unknown,George Lavidas co2 budgets,co2_budget.csv,CC BY 4.0,https://arxiv.org/abs/2004.11009 -existing heating potentials,existing_infrastructure/existing_heating_raw.csv,unknown,https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 +existing heating potentials,existing_infrastructure/existing_heating_raw.csv,unknown,https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en IRENA existing VRE capacities,existing_infrastructure/{solar|onwind|offwind}_capcity_IRENA.csv,unknown,https://www.irena.org/Statistics/Download-Data USGS ammonia production,myb1-2017-nitro.xls,unknown,https://www.usgs.gov/centers/nmic/nitrogen-statistics-and-information hydrogen salt cavern potentials,h2_salt_caverns_GWh_per_sqkm.geojson,CC BY 4.0,https://doi.org/10.1016/j.ijhydene.2019.12.161 https://doi.org/10.20944/preprints201910.0187.v1 diff --git a/doc/configtables/load.csv b/doc/configtables/load.csv index 6e98f881..34d73dc5 100644 --- a/doc/configtables/load.csv +++ b/doc/configtables/load.csv @@ -1,6 +1,7 @@ ,Unit,Values,Description -power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards). interpolate_limit,hours,integer,"Maximum gap size (consecutive nans) which interpolated linearly." time_shift_for_large_gaps,string,string,"Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings." manual_adjustments,bool,"{true, false}","Whether to adjust the load data manually according to the function in :func:`manual_adjustment`." scaling_factor,--,float,"Global correction factor for the load time series." +fixed_year,--,Year or False,"To specify a fixed year for the load time series that deviates from the snapshots' year" +supplement_synthetic,bool,"{true, false}","Whether to supplement missing data for selected time period should be supplemented by synthetic data from https://zenodo.org/record/10820928." diff --git a/doc/configtables/offwind-ac.csv b/doc/configtables/offwind-ac.csv index 9dc0614c..b2533f04 100644 --- a/doc/configtables/offwind-ac.csv +++ b/doc/configtables/offwind-ac.csv @@ -2,7 +2,7 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `_","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." correction_factor,--,float,"Correction factor for capacity factor time series." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." diff --git a/doc/configtables/offwind-dc.csv b/doc/configtables/offwind-dc.csv index c947f358..7c537543 100644 --- a/doc/configtables/offwind-dc.csv +++ b/doc/configtables/offwind-dc.csv @@ -2,7 +2,7 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `__","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." correction_factor,--,float,"Correction factor for capacity factor time series." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." diff --git a/doc/configtables/onwind.csv b/doc/configtables/onwind.csv index f6b36e5d..3b09214b 100644 --- a/doc/configtables/onwind.csv +++ b/doc/configtables/onwind.csv @@ -2,7 +2,7 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `__","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." corine,,, -- grid_codes,--,"Any subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for wind turbine placement." diff --git a/doc/configtables/plotting.csv b/doc/configtables/plotting.csv index ed5d9c9f..82fc203c 100644 --- a/doc/configtables/plotting.csv +++ b/doc/configtables/plotting.csv @@ -1,6 +1,9 @@ ,Unit,Values,Description map,,, -- boundaries,°,"[x1,x2,y1,y2]",Boundaries of the map plots in degrees latitude (y) and longitude (x) +projection,,,, +-- name,--,"Valid Cartopy projection name","See https://scitools.org.uk/cartopy/docs/latest/reference/projections.html for list of available projections." +-- args,--,--,"Other entries under 'projection' are passed as keyword arguments to the projection constructor, e.g. ``central_longitude: 10.``." costs_max,bn Euro,float,Upper y-axis limit in cost bar plots. costs_threshold,bn Euro,float,Threshold below which technologies will not be shown in cost bar plots. energy_max,TWh,float,Upper y-axis limit in energy bar plots. diff --git a/doc/configtables/run.csv b/doc/configtables/run.csv index 90cf65ad..44f06165 100644 --- a/doc/configtables/run.csv +++ b/doc/configtables/run.csv @@ -1,5 +1,9 @@ ,Unit,Values,Description -name,--,"any string","Specify a name for your run. Results will be stored under this name." -disable_progrssbar,bool,"{true, false}","Switch to select whether progressbar should be disabled." -shared_resources,bool,"{true, false}","Switch to select whether resources should be shared across runs." +name,--,str/list,"Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run." +prefix,--,str,"Prefix for the run name which is used as a top-layer directory name in the results and resources folders." +scenarios,,, +-- enable,bool,"{true, false}","Switch to select whether workflow should generate scenarios based on ``file``." +-- file,str,,"Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory." +disable_progressbar,bool,"{true, false}","Switch to select whether progressbar should be disabled." +shared_resources,bool/str,,"Switch to select whether resources should be shared across runs. If a string is passed, this is used as a subdirectory name for shared resources. If set to 'base', only resources before creating the elec.nc file are shared." shared_cutouts,bool,"{true, false}","Switch to select whether cutouts should be shared across runs." diff --git a/doc/configtables/sector-opts.csv b/doc/configtables/sector-opts.csv index ea39c3b0..fc9e8c10 100644 --- a/doc/configtables/sector-opts.csv +++ b/doc/configtables/sector-opts.csv @@ -7,5 +7,5 @@ Trigger, Description, Definition, Status ``B``,Add biomass,,In active use ``I``,Add industry sector,,In active use ``A``,Add agriculture sector,,In active use -``dist``+``n``,Add distribution grid with investment costs of ``n`` times costs in ``data/costs_{cost_year}.csv``,,In active use +``dist``+``n``,Add distribution grid with investment costs of ``n`` times costs in ``resources/costs_{cost_year}.csv``,,In active use ``seq``+``n``,Sets the CO2 sequestration potential to ``n`` Mt CO2 per year,,In active use diff --git a/doc/configtables/sector.csv b/doc/configtables/sector.csv index 338cf34e..58ccd9bf 100644 --- a/doc/configtables/sector.csv +++ b/doc/configtables/sector.csv @@ -1,4 +1,9 @@ ,Unit,Values,Description +transport,--,"{true, false}",Flag to include transport sector. +heating,--,"{true, false}",Flag to include heating sector. +biomass,--,"{true, false}",Flag to include biomass sector. +industry,--,"{true, false}",Flag to include industry sector. +agriculture,--,"{true, false}",Flag to include agriculture sector. district_heating,--,,`prepare_sector_network.py `_ -- potential,--,float,maximum fraction of urban demand which can be supplied by district heating -- progress,--,Dictionary with planning horizons as keys., Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating @@ -66,6 +71,7 @@ boilers,--,"{true, false}",Add option for transforming gas into heat using gas b resistive_heaters,--,"{true, false}",Add option for transforming electricity into heat using resistive heaters (independently from gas boilers) oil_boilers,--,"{true, false}",Add option for transforming oil into heat using boilers biomass_boiler,--,"{true, false}",Add option for transforming biomass into heat using boilers +overdimension_individual_heating,--,"float",Add option for overdimensioning individual heating systems by a certain factor. This allows them to cover heat demand peaks e.g. 10% higher than those in the data with a setting of 1.1. chp,--,"{true, false}",Add option for using Combined Heat and Power (CHP) micro_chp,--,"{true, false}",Add option for using Combined Heat and Power (CHP) for decentral areas. solar_thermal,--,"{true, false}",Add option for using solar thermal to generate heat. @@ -84,7 +90,7 @@ regional_methanol_demand,--,"{true, false}",Spatially resolve methanol demand. S regional_oil_demand,--,"{true, false}",Spatially resolve oil demand. Set to true if regional CO2 constraints needed. regional_co2 _sequestration_potential,,, -- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP `_. --- attribute,--,string,Name of the attribute for the sequestration potential +-- attribute,--,string or list,Name (or list of names) of the attribute(s) for the sequestration potential -- include_onshore,--,"{true, false}",Add options for including onshore sequestration potentials -- min_size,Gt ,float,Any sites with lower potential than this value will be excluded -- max_size,Gt ,float,The maximum sequestration potential for any one site. @@ -108,6 +114,7 @@ min_part_load _methanolisation,per unit of p_nom ,float,The minimum unit dispatc use_fischer_tropsch _waste_heat,--,"{true, false}",Add option for using waste heat of Fischer Tropsch in district heating networks use_fuel_cell_waste_heat,--,"{true, false}",Add option for using waste heat of fuel cells in district heating networks use_electrolysis_waste _heat,--,"{true, false}",Add option for using waste heat of electrolysis in district heating networks +electricity_transmission _grid,--,"{true, false}",Switch for enabling/disabling the electricity transmission grid. electricity_distribution _grid,--,"{true, false}",Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link. electricity_distribution _grid_cost_factor,,,Multiplies the investment cost of the electricity distribution grid ,,, @@ -136,5 +143,5 @@ limit_max_growth,,, -- factor,p.u.,float,The maximum growth factor of a carrier (e.g. 1.3 allows 30% larger than max historic growth) -- max_growth,,, -- -- {carrier},GW,float,The historic maximum growth of a carrier --- max_relative_growth, +-- max_relative_growth,,, -- -- {carrier},p.u.,float,The historic maximum relative growth of a carrier diff --git a/doc/configtables/snapshots.csv b/doc/configtables/snapshots.csv index 4a3e1212..4be0439b 100644 --- a/doc/configtables/snapshots.csv +++ b/doc/configtables/snapshots.csv @@ -2,5 +2,3 @@ start,--,str or datetime-like; e.g. YYYY-MM-DD,Left bound of date range end,--,str or datetime-like; e.g. YYYY-MM-DD,Right bound of date range inclusive,--,"One of {'neither', 'both', ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``." -resolution ,--,"{false,``nH``; i.e. ``2H``-``6H``}",Resample the time-resolution by averaging over every ``n`` snapshots -segmentation,--,"{false,``n``; e.g. ``4380``}","Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load." diff --git a/doc/configtables/solar.csv b/doc/configtables/solar.csv index 8328d342..18587694 100644 --- a/doc/configtables/solar.csv +++ b/doc/configtables/solar.csv @@ -2,7 +2,7 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module can be ERA5 or SARAH-2.","Specifies the directory where the relevant weather data ist stored that is specified at ``atlite/cutouts`` configuration. Both ``sarah`` and ``era5`` work." resource,,, -- method,--,"Must be 'pv'","A superordinate technology type." --- panel,--,"One of {'Csi', 'CdTe', 'KANENA'} as defined in `atlite `__","Specifies the solar panel technology and its characteristic attributes." +-- panel,--,"One of {'Csi', 'CdTe', 'KANENA'} as defined in `atlite `_ . Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the solar panel technology and its characteristic attributes." -- orientation,,, -- -- slope,°,"Realistically any angle in [0., 90.]","Specifies the tilt angle (or slope) of the solar panel. A slope of zero corresponds to the face of the panel aiming directly overhead. A positive tilt angle steers the panel towards the equator." -- -- azimuth,°,"Any angle in [0., 360.]","Specifies the `azimuth `_ orientation of the solar panel. South corresponds to 180.°." diff --git a/doc/configtables/solving.csv b/doc/configtables/solving.csv index 6eff10ae..4d245195 100644 --- a/doc/configtables/solving.csv +++ b/doc/configtables/solving.csv @@ -7,6 +7,7 @@ options,,, -- rolling_horizon,bool,"{'true','false'}","Whether to optimize the network in a rolling horizon manner, where the snapshot range is split into slices of size `horizon` which are solved consecutively." -- seed,--,int,Random seed for increased deterministic behaviour. -- custom_extra_functionality,--,str,Path to a Python file with custom extra functionality code to be injected into the solving rules of the workflow relative to ``rules`` directory. +-- io_api,string,"{'lp','mps','direct'}",Passed to linopy and determines the API used to communicate with the solver. With the ``'lp'`` and ``'mps'`` options linopy passes a file to the solver; with the ``'direct'`` option (only supported for HIGHS and Gurobi) linopy uses an in-memory python API resulting in better performance. -- track_iterations,bool,"{'true','false'}",Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration) -- min_iterations,--,int,Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. -- max_iterations,--,int,Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. @@ -19,7 +20,7 @@ constraints ,,, -- BAU,bool,"{'true','false'}",Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities`` -- SAFE,bool,"{'true','false'}",Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network. solver,,, --- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible",Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow. +-- name,--,"One of {'gurobi', 'cplex', 'highs', 'cbc', 'glpk'}; potentially more possible",Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow. -- options,--,Key listed under ``solver_options``.,Link to specific parameter settings. solver_options,,dict,Dictionaries with solver-specific parameter settings. mem,MB,int,Estimated maximum memory requirement for solving networks. diff --git a/doc/configtables/toplevel.csv b/doc/configtables/toplevel.csv index 67954389..418acbab 100644 --- a/doc/configtables/toplevel.csv +++ b/doc/configtables/toplevel.csv @@ -1,12 +1,12 @@ -,Unit,Values,Description -version,--,0.x.x,Version of PyPSA-Eur. Descriptive only. -tutorial,bool,"{true, false}",Switch to retrieve the tutorial data set instead of the full data set. -logging,,, --- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only" --- format,--,,Custom format for log messages. See `LogRecord `_ attributes. -private,,, --- keys,,, --- -- entsoe_api,--,,Optionally specify the ENTSO-E API key. See the guidelines to get `ENTSO-E API key `_ -remote,,, --- ssh,--,,Optionally specify the SSH of a remote cluster to be synchronized. --- path,--,,Optionally specify the file path within the remote cluster to be synchronized. +,Unit,Values,Description +version,--,0.x.x,"Version of PyPSA-Eur. Descriptive only." +tutorial,bool,"{true, false}","Switch to retrieve the tutorial data set instead of the full data set." +logging,,, +-- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only" +-- format,--,"","Custom format for log messages. See `LogRecord `_ attributes." +private,,, +-- keys,,, +-- -- entsoe_api,--,,Optionally specify the ENTSO-E API key. See the guidelines to get `ENTSO-E API key `_ +remote,,, +-- ssh,--,,Optionally specify the SSH of a remote cluster to be synchronized. +-- path,--,,Optionally specify the file path within the remote cluster to be synchronized. diff --git a/doc/configuration.rst b/doc/configuration.rst index c90a88ba..a6917eca 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -9,7 +9,7 @@ Configuration ########################################## -PyPSA-Eur has several configuration options which are documented in this section and are collected in a ``config/config.yaml`` file located in the root directory. Users should copy the provided default configuration (``config/config.default.yaml``) and amend their own modifications and assumptions in the user-specific configuration file (``config/config.yaml``); confer installation instructions at :ref:`defaultconfig`. +PyPSA-Eur has several configuration options which are documented in this section and are collected in a ``config/config.yaml`` file. This file defines deviations from the default configuration (``config/config.default.yaml``); confer installation instructions at :ref:`defaultconfig`. .. _toplevel_cf: @@ -31,7 +31,7 @@ Top-level configuration .. _run_cf: ``run`` -======= +============= It is common conduct to analyse energy system optimisation models for **multiple scenarios** for a variety of reasons, e.g. assessing their sensitivity towards changing the temporal and/or geographical resolution or investigating how @@ -90,9 +90,9 @@ For each wildcard, a **list of values** is provided. The rule ``results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc`` for **all combinations** of the provided wildcard values as defined by Python's `itertools.product(...) -`_ function +`__ function that snakemake's `expand(...) function -`_ +`__ uses. An exemplary dependency graph (starting from the simplification rules) then looks like this: @@ -129,7 +129,7 @@ An exemplary dependency graph (starting from the simplification rules) then look ``snapshots`` ============= -Specifies the temporal range to build an energy system model for as arguments to `pandas.date_range `_ +Specifies the temporal range to build an energy system model for as arguments to `pandas.date_range `__ .. literalinclude:: ../config/config.default.yaml :language: yaml @@ -197,7 +197,7 @@ Switches for some rules and optional features. ``atlite`` ========== -Define and specify the ``atlite.Cutout`` used for calculating renewable potentials and time-series. All options except for ``features`` are directly used as `cutout parameters `_. +Define and specify the ``atlite.Cutout`` used for calculating renewable potentials and time-series. All options except for ``features`` are directly used as `cutout parameters `__. .. literalinclude:: ../config/config.default.yaml :language: yaml @@ -443,7 +443,7 @@ overwrite the existing values. :widths: 22,7,22,33 :file: configtables/biomass.csv -The list of available biomass is given by the category in `ENSPRESO_BIOMASS `_, namely: +The list of available biomass is given by the category in `ENSPRESO_BIOMASS `__, namely: - Agricultural waste - Manure solid, liquid @@ -577,6 +577,21 @@ The list of available biomass is given by the category in `ENSPRESO_BIOMASS `_. To ask and answer general usage questions, join the `PyPSA mailing list `_. + +Contributing to the documentation +==================================== + +We strive to keep documentation useful and up to date for all PyPSA users. If you encounter an area where documentation is not available or insufficient, we very much welcome your contribution. Here is How To: + +#. Install the conda environment for documentation from the `PyPSA repository `_. + (Here is `how to install a conda environment `_.) +#. Make your changes in the corresponding .rst file under ``pypsa-eur/doc``. +#. Compile your changes by running the following command in your terminal in the ``doc`` folder: ``make html`` + You may encounter some warnings, but end up with a message such as ``build succeeded, XX warnings.``. html files to review your changes can then be found under ``doc/_build/html``. +#. Contribute your documentation in a pull request (`here is a guide `_). diff --git a/doc/costs.rst b/doc/costs.rst index 5ddbb360..2a946b00 100644 --- a/doc/costs.rst +++ b/doc/costs.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -8,8 +8,8 @@ Techno-Economic Assumptions ############################ The database of cost assumptions is retrieved from the repository -`PyPSA/technology-data `_ and then -saved to a file ``data/costs_{year}.csv``. The ``config/config.yaml`` provides options +`PyPSA/technology-data `__ and then +saved to a file ``resources/costs_{year}.csv``. The ``config/config.yaml`` provides options to choose a reference year and use a specific version of the repository. .. literalinclude:: ../config/config.default.yaml @@ -30,7 +30,7 @@ years compiled from various sources, namely for - carbon-dioxide intensity. Many values are taken from a database published by the Danish Energy Agency (`DEA -`_). +`__). The given overnight capital costs are annualised to net present costs @@ -50,7 +50,7 @@ Modifying Assumptions Some cost assumptions (e.g. marginal cost and capital cost) can be directly set in the ``config/config.yaml`` (cf. Section :ref:`costs_cf` in :ref:`config`). To change cost assumptions in more detail, make a copy of -``data/costs_{year}.csv`` and reference the new cost file in the ``Snakefile``: +``resources/costs_{year}.csv`` and reference the new cost file in the ``Snakefile``: .. literalinclude:: ../Snakefile :start-at: COSTS diff --git a/doc/foresight.rst b/doc/foresight.rst index f8ea6108..400f67ce 100644 --- a/doc/foresight.rst +++ b/doc/foresight.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2021-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2021-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -166,13 +166,13 @@ Options The total carbon budget for the entire transition path can be indicated in the `sector_opts -`_ +`__ in ``config/config.yaml``. The carbon budget can be split among the ``planning_horizons`` following an exponential or beta decay. E.g. ``'cb40ex0'`` splits a carbon budget equal to 40 Gt :math:`_{CO_2}` following an exponential decay whose initial linear growth rate r is zero. They can also follow some user-specified path, if defined `here -`_. +`__. The paper `Speed of technological transformations required in Europe to achieve different climate goals (2022) `__ defines CO_2 budgets corresponding to global temperature increases (1.5C – 2C) diff --git a/doc/img/intro-workflow.png b/doc/img/intro-workflow.png index 27b5a389..a273ab39 100644 Binary files a/doc/img/intro-workflow.png and b/doc/img/intro-workflow.png differ diff --git a/doc/img/workflow.png b/doc/img/workflow.png new file mode 100644 index 00000000..96ce7f37 Binary files /dev/null and b/doc/img/workflow.png differ diff --git a/doc/index.rst b/doc/index.rst index 7eaffa01..8a523ea0 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -81,16 +81,16 @@ them: .. note:: You can find showcases of the model's capabilities in the Supplementary Materials of the Joule paper `The potential role of a hydrogen network in Europe - `_, the Supplementary Materials of another `paper in Joule with a + `__, the Supplementary Materials of another `paper in Joule with a description of the industry sector - `_, or in `a 2021 presentation - at EMP-E `_. + `__, or in `a 2021 presentation + at EMP-E `__. The sector-coupled extension of PyPSA-Eur was initially described in the paper `Synergies of sector coupling and transmission reinforcement in a cost-optimised, highly renewable European energy system - `_ (2018) but it differs by being based on the + `__ (2018) but it differs by being based on the higher resolution electricity transmission model `PyPSA-Eur - `_ rather than a one-node-per-country model, + `__ rather than a one-node-per-country model, and by including biomass, industry, industrial feedstocks, aviation, shipping, better carbon management, carbon capture and usage/sequestration, and gas networks. @@ -99,8 +99,8 @@ About ===== PyPSA-Eur is designed to be imported into the open energy system modelling -framework `PyPSA `_ for which `documentation -`_ is available as well. However, since the +framework `PyPSA `__ for which `documentation +`__ is available as well. However, since the workflow is modular, it should be easy to adapt the data workflow to other modelling frameworks. @@ -114,28 +114,28 @@ of the individual parts. PyPSA-Eur is under active development and has several :doc:`limitations` which you should understand before using the model. The Github repository - `issues `_ collect known + `issues `__ collect known topics we are working on. Please feel free to help or make suggestions. This project is currently maintained by the `Department of Digital -Transformation in Energy Systems `_ at the -`Technische Universität Berlin `_. Previous versions were -developed within the `IAI `_ at the `Karlsruhe Institute -of Technology (KIT) `_ which was funded by -the `Helmholtz Association `_, and by the +Transformation in Energy Systems `__ at the +`Technische Universität Berlin `__. Previous versions were +developed within the `IAI `__ at the `Karlsruhe Institute +of Technology (KIT) `__ which was funded by +the `Helmholtz Association `__, and by the `Renewable Energy Group -`_ -at `FIAS `_ to carry out simulations for the -`CoNDyNet project `_, financed by the `German Federal -Ministry for Education and Research (BMBF) `_ +`__ +at `FIAS `__ to carry out simulations for the +`CoNDyNet project `__, financed by the `German Federal +Ministry for Education and Research (BMBF) `__ as part of the `Stromnetze Research Initiative -`_. +`__. Workflow ======== -.. image:: ../graphics/workflow.png +.. image:: img/workflow.png :class: full-width :align: center @@ -153,10 +153,10 @@ to reading this documentation. - Documentation of `PyPSA `__, the package for modelling energy systems which PyPSA-Eur uses under the hood. -- Course on `Energy Systems `_ given at - Technical University of Berlin by `Prof. Dr. Tom Brown `_. -- Course on `Data Science for Energy System Modelling `_ - given at Technical University of Berlin by `Dr. Fabian Neumann `_. +- Course on `Energy Systems `__ given at + Technical University of Berlin by `Prof. Dr. Tom Brown `__. +- Course on `Data Science for Energy System Modelling `__ + given at Technical University of Berlin by `Dr. Fabian Neumann `__. Citing PyPSA-Eur diff --git a/doc/installation.rst b/doc/installation.rst index 01fdafeb..45404e1f 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -15,7 +15,7 @@ directory in which the commands following the ``%`` should be entered. Clone the Repository ==================== -First of all, clone the `PyPSA-Eur repository `_ using the version control system ``git`` in the command line. +First of all, clone the `PyPSA-Eur repository `__ using the version control system ``git`` in the command line. .. code:: bash @@ -30,11 +30,11 @@ Install Python Dependencies =============================== PyPSA-Eur relies on a set of other Python packages to function. -We recommend using the package manager `mamba `_ to install them and manage your environments. -For instructions for your operating system follow the ``mamba`` `installation guide `_. +We recommend using the package manager `mamba `__ to install them and manage your environments. +For instructions for your operating system follow the ``mamba`` `installation guide `__. You can also use ``conda`` equivalently. -The package requirements are curated in the `envs/environment.yaml `_ file. +The package requirements are curated in the `envs/environment.yaml `__ file. The environment can be installed and activated using .. code:: bash @@ -59,16 +59,16 @@ Install a Solver PyPSA passes the PyPSA-Eur network model to an external solver for performing the optimisation. PyPSA is known to work with the free software -- `HiGHS `_ -- `Cbc `_ -- `GLPK `_ (`WinGLKP `_) -- `Ipopt `_ +- `HiGHS `__ +- `Cbc `__ +- `GLPK `__ (`WinGLKP `__) +- `SCIP `__ and the non-free, commercial software (for some of which free academic licenses are available) -- `Gurobi `_ -- `CPLEX `_ -- `FICO Xpress Solver `_ +- `Gurobi `__ +- `CPLEX `__ +- `FICO Xpress Solver `__ For installation instructions of these solvers for your operating system, follow the links above. Commercial solvers such as Gurobi and CPLEX currently significantly outperform open-source solvers for large-scale problems, and @@ -76,41 +76,19 @@ it might be the case that you can only retrieve solutions by using a commercial Nevertheless, you can still use open-source solvers for smaller problems. .. seealso:: - `Instructions how to install a solver in the documentation of PyPSA `_ + `Instructions how to install a solver in the documentation of PyPSA `__ .. note:: - The rules :mod:`cluster_network` and :mod:`simplify_network` solve a quadratic optimisation problem for clustering. - The open-source solvers Cbc and GlPK cannot handle this. A fallback to Ipopt is implemented in this case, but requires - it to be installed. For an open-source solver setup install in your ``conda`` environment on OSX/Linux - - .. code:: bash - - mamba activate pypsa-eur - mamba install -c conda-forge ipopt coincbc - - and on Windows - - .. code:: bash - - mamba activate pypsa-eur - mamba install -c conda-forge ipopt glpk - - For HiGHS, run - - .. code:: bash - - mamba activate pypsa-eur - mamba install -c conda-forge ipopt - pip install highspy - - For Gurobi, run + The rules :mod:`cluster_network` and :mod:`simplify_network` solve a mixed-integer quadratic optimisation problem for clustering. + The open-source solvers HiGHS, Cbc and GlPK cannot handle this. A fallback to SCIP is implemented in this case, which is included in the standard environment specifications. + For an open-source solver setup install in your ``conda`` environment on OSX/Linux. To install the default solver Gurobi, run .. code:: bash mamba activate pypsa-eur mamba install -c gurobi gurobi - Additionally, you need to setup your `Gurobi license `_. + Additionally, you need to setup your `Gurobi license `__. .. _defaultconfig: @@ -118,11 +96,10 @@ Nevertheless, you can still use open-source solvers for smaller problems. Handling Configuration Files ============================ -PyPSA-Eur has several configuration options that must be specified in a -``config/config.yaml`` file located in the root directory. An example configuration -``config/config.default.yaml`` is maintained in the repository, which will be used to -automatically create your customisable ``config/config.yaml`` on first use. More -details on the configuration options are in :ref:`config`. +PyPSA-Eur has several configuration options that users can specify in a +``config/config.yaml`` file. The default configuration +``config/config.default.yaml`` is maintained in the repository. More details on +the configuration options are in :ref:`config`. You can also use ``snakemake`` to specify another file, e.g. ``config/config.mymodifications.yaml``, to update the settings of the ``config/config.yaml``. @@ -130,8 +107,3 @@ You can also use ``snakemake`` to specify another file, e.g. .. code:: bash .../pypsa-eur % snakemake -call --configfile config/config.mymodifications.yaml - -.. warning:: - Users are advised to regularly check their own ``config/config.yaml`` against changes - in the ``config/config.default.yaml`` when pulling a new version from the remote - repository. diff --git a/doc/introduction.rst b/doc/introduction.rst index 413db9d1..7cfa0e43 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -14,7 +14,7 @@ .. note:: - Find the introductory slides `here `_. + Find the introductory slides `here `__. .. warning:: The video only introduces the electricity-only part of PyPSA-Eur. @@ -23,7 +23,7 @@ Workflow ========= The generation of the model is controlled by the open workflow management system -`Snakemake `_. In a nutshell, the ``Snakefile`` +`Snakemake `__. In a nutshell, the ``Snakefile`` declares for each script in the ``scripts`` directory a rule which describes which files the scripts consume and produce (their corresponding input and output files). The ``snakemake`` tool then runs the scripts in the correct order @@ -54,9 +54,9 @@ preceding rules which another rule takes as input data. For the use of ``snakemake``, it makes sense to familiarize yourself quickly with the `basic tutorial -`_ and then +`__ and then read carefully through the documentation of the `command line interface -`_, noting the +`__, noting the arguments ``-j``, ``-c``, ``-f``, ``-F``, ``-n``, ``-r``, ``--dag`` and ``-t`` in particular. @@ -64,17 +64,17 @@ Scenarios, Configuration and Modification ========================================= It is easy to run PyPSA-Eur for multiple scenarios using the `wildcards feature -`_ +`__ of ``snakemake``. Wildcards allow to generalise a rule to produce all files that follow a `regular expression -`_ pattern, which defines +`__ pattern, which defines a particular scenario. One can think of a wildcard as a parameter that shows up in the input/output file names and thereby determines which rules to run, what data to retrieve and what files to produce. Details are explained in :ref:`wildcards` and :ref:`scenario`. The model also has several further configuration options collected in the -``config/config.yaml`` file located in the root directory, which that are not part of +``config/config.default.yaml`` file located in the root directory, which that are not part of the scenarios. Options are explained in :ref:`config`. Folder Structure @@ -97,5 +97,5 @@ System Requirements Building the model with the scripts in this repository runs on a regular computer. But optimising for investment and operation decisions across many scenarios requires a strong interior-point solver -like `Gurobi `_ or `CPLEX `_ with more memory. +like `Gurobi `__ or `CPLEX `__ with more memory. Open-source solvers like `HiGHS ` can also be used for smaller problems. diff --git a/doc/licenses.rst b/doc/licenses.rst index beb6f5b8..af531660 100644 --- a/doc/licenses.rst +++ b/doc/licenses.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -10,12 +10,12 @@ Licenses PyPSA-Eur is released under multiple licenses: -* All original source code is licensed as free software under `MIT `_. -* The documentation is licensed under `CC-BY-4.0 `_. -* Configuration files are mostly licensed under `CC0-1.0 `_. -* Data files are licensed under `CC-BY-4.0 `_. +* All original source code is licensed as free software under `MIT `__. +* The documentation is licensed under `CC-BY-4.0 `__. +* Configuration files are mostly licensed under `CC0-1.0 `__. +* Data files are licensed under `CC-BY-4.0 `__. -See the individual files and the `dep5 <.reuse/dep5>`_ file for license details. +See the individual files and the `dep5 <.reuse/dep5>`__ file for license details. Additionally, different licenses and terms of use also apply to the various input data for both electricity-only and sector-coupled modelling exercises, @@ -26,7 +26,7 @@ Electricity Systems Databundle .. note:: More details are included in `the description of the - data bundles on zenodo `_. + data bundles on zenodo `__. .. csv-table:: :header-rows: 1 diff --git a/doc/limitations.rst b/doc/limitations.rst index a67fad0c..aeec5da6 100644 --- a/doc/limitations.rst +++ b/doc/limitations.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -19,7 +19,7 @@ improving the approximations. This list of limitations is incomplete and will be added to over time. .. seealso:: - See also the `GitHub repository issues `_. + See also the `GitHub repository issues `__. - **Electricity transmission network topology:** The grid data is based on a map of the ENTSO-E area that is known diff --git a/doc/make.bat b/doc/make.bat index 3037f934..d64ffdc8 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -1,4 +1,4 @@ -REM SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors +REM SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors REM SPDX-License-Identifier: MIT @ECHO OFF diff --git a/doc/plotting.rst b/doc/plotting.rst index 895eab3b..a5229d8d 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -22,7 +22,22 @@ Rule ``plot_summary`` .. _map_plot: -Rule ``plot_network`` -======================== +Rule ``plot_power_network`` +=========================== -.. automodule:: plot_network +.. automodule:: plot_power_network + +Rule ``plot_power_network_perfect`` +=================================== + +.. automodule:: plot_power_network_perfect + +Rule ``plot_hydrogen_network`` +============================== + +.. automodule:: plot_hydrogen_network + +Rule ``plot_gas_network`` +========================= + +.. automodule:: plot_gas_network diff --git a/doc/preparation.rst b/doc/preparation.rst index d8f76839..feb10c60 100644 --- a/doc/preparation.rst +++ b/doc/preparation.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -15,18 +15,17 @@ Instead we provide separate data bundles which can be obtained using the ``retrieve*`` rules (:ref:`data`). Having downloaded the necessary data, -- :mod:`build_shapes` generates GeoJSON files with shapes of the countries, exclusive economic zones and `NUTS3 `_ areas. -- :mod:`build_cutout` prepares smaller weather data portions from `ERA5 `_ for cutout ``europe-2013-era5`` and SARAH for cutout ``europe-2013-sarah``. +- :mod:`build_shapes` generates GeoJSON files with shapes of the countries, exclusive economic zones and `NUTS3 `__ areas. +- :mod:`build_cutout` prepares smaller weather data portions from `ERA5 `__ for cutout ``europe-2013-era5`` and SARAH for cutout ``europe-2013-sarah``. With these and the externally extracted ENTSO-E online map topology (``data/entsoegridkit``), it can build a base PyPSA network with the following rules: -- :mod:`base_network` builds and stores the base network with all buses, HVAC lines and HVDC links, while -- :mod:`build_bus_regions` determines `Voronoi cells `_ for all substations. +- :mod:`base_network` builds and stores the base network with all buses, HVAC lines and HVDC links, and determines `Voronoi cells `__ for all substations. Then the process continues by calculating conventional power plant capacities, potentials, and per-unit availability time series for variable renewable energy carriers and hydro power plants with the following rules: -- :mod:`build_powerplants` for today's thermal power plant capacities using `powerplantmatching `_ allocating these to the closest substation for each powerplant, +- :mod:`build_powerplants` for today's thermal power plant capacities using `powerplantmatching `__ allocating these to the closest substation for each powerplant, - :mod:`build_natura_raster` for rasterising NATURA2000 natural protection areas, - :mod:`build_ship_raster` for building shipping traffic density, - :mod:`build_renewable_profiles` for the hourly capacity factors and installation potentials constrained by land-use in each substation's Voronoi cell for PV, onshore and offshore wind, and @@ -35,13 +34,6 @@ Then the process continues by calculating conventional power plant capacities, p The central rule :mod:`add_electricity` then ties all the different data inputs together into a detailed PyPSA network stored in ``networks/elec.nc``. -.. _busregions: - -Rule ``build_bus_regions`` -============================= - -.. automodule:: build_bus_regions - .. _cutout: Rule ``build_cutout`` diff --git a/doc/publications.bib b/doc/publications.bib index 4be6676a..5e1ee364 100644 --- a/doc/publications.bib +++ b/doc/publications.bib @@ -1,5 +1,5 @@ @Comment{ -SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors +SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC0-1.0 } diff --git a/doc/publications.rst b/doc/publications.rst index c824873e..f6d7986b 100644 --- a/doc/publications.rst +++ b/doc/publications.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 8d2ea10c..b73b56f8 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -9,10 +9,462 @@ Release Notes Upcoming Release ================ +* Group existing capacities to the earlier grouping_year for consistency with optimized capacities. -* Updated Global Energy Monitor LNG terminal data to March 2023 version. +* bugfix: installed heating capacities were 5% lower than existing heating capacities -* For industry distribution, use EPRTR as fallback if ETS data is not available. +* Include gas and oil fields and saline aquifers in estimation of CO2 sequestration potential. + +* bugfix: convert Strings to pathlib.Path objects as input to ConfigSettings + +* Allow the use of more solvers in clustering (Xpress, COPT, Gurobi, CPLEX, SCIP, MOSEK). + +* Enhanced support for choosing different weather years + (https://github.com/PyPSA/pypsa-eur/pull/204): + + - Processed energy statistics from eurostat (1990-2021) and IDEES (2000-2015) + are now initially stored for all available years and filtered by the year + given in ``energy: energy_totals_year:``. + + - Added option to supplement electricity load data with synthetic time series + for years not contained in OPSD (from https://zenodo.org/records/10820928, + ``load: supplement_synthetic:``). + + - The total annual heat demand for years not contained in the energy + statistics by eurostat (1990-2021) or IDEES (2000-2015) are scaled based on + a regression between the total number of heating degree days and the total + annual heat demand between the years 2007-2021, assuming a similar building + stock. + + - Added option to scale annual hydro-electricity generation data for years not + contained in the in EIA (1980-2021) based on a regression between annual + generation and total runoff per country for the years 1980-2021 + (``renewable: hydro: eia_approximate_missing:``) + + - Added option to normalize annual hydro generation data by the associated + installed capacity reported by EIA (1980-2021) in order to eliminate changes + in generation due to newly built capacity (``renewable: hydro: + eia_approximate_missing: eia_correct_by_capacity:``). + + - Added option to make hydro generation data independent of weather year + (``renewable: hydro: eia_approximate_missing: eia_norm_year:``). + + - Added option to drop leap days (``enable: drop_leap_day:``). + + - Added option to make electric load data independent of weather year + (``load: fixed_year:``). + + - Include time series of Swiss number of passenger vehicles from the `Swiss + Federal Statistical Office + `__. + + - Updated hydro-electricity generation and capacity data from EIA. + + - The easiest way to sweep over multiple weather years is to use the new + scenario management. An example for the necessary `create_scenarios.py` + script can be found in this `Github gist + `__. + +* Removed rule ``copy_config``. Instead, a config file is created for each + network output of the ``solve_*`` rules, with the same content as ``n.meta``. + +* Added new HVDC transmission projects from `TYNDP 2024 draft projects + `__. + +* Upgrade to Snakemake v8.5+. This version is the new minimum version required. + To upgrade an existing environment, run ``conda install -c bioconda + snakemake-minimal">=8.5"`` and ``pip install snakemake-storage-plugin-http`` + (https://github.com/PyPSA/pypsa-eur/pull/825). + +* Corrected a bug leading to power plants operating after their DateOut + (https://github.com/PyPSA/pypsa-eur/pull/958). Added additional grouping years + before 1980. + +* Add decommissioning of existing renewables assets in `add_existing_baseyear`. + +* The Eurostat data was updated to the 2023 version in :mod:`build_energy_totals`. + +* The latest `Swiss energy totals + `__ + have been updated to the 2023 version. + +* The JRC-IDEES data is only available until 2015. For energy totals years (``energy: energy_totals_year``) after + 2015, the data scaled using the ratio of Eurostat data reported for the energy + totals year and 2015. + +* The default energy totals year (``energy: energy_totals_year``) was updated to 2019. + +* Upgrade default techno-economic assumptions to ``technology-data`` v0.8.1. + +* Add possibility to download cost data from custom fork of ``technology-data``. + +* Linearly interpolate missing investment periods in year-dependent + configuration options. + +* Added new scenario management that supports the simultaneous execution of + multiple scenarios with a single ``snakemake`` call. For this purpose, a + ``scenarios.yaml`` file is introduced which contains customizable scenario + names with configuration overrides. To enable it, set the ``run: scenarios: + true`` and define the list of scenario names to run under ``run: name:`` in + the configuration file. The latter must be a subset of toplevel keys in the + scenario file. + + - To get started, a scenarios template file ``config/scenarios.template.yaml`` + is included in the repository, which is copied to ``config/scenarios.yaml`` + on first use. + + - The scenario file can be changed via ``run: scenarios: file:``. + + - If scenario management is activated with ``run: scenarios: enable: true``, a + new wildcard ``{run}`` is introduced. This means that the configuration + settings may depend on the new ``{run}`` wildcard. Therefore, a new + ``config_provider()`` function is used in the ``Snakefile`` and ``.smk`` + files, which takes wildcard values into account. The calls to the ``config`` + object have been reduced in ``.smk`` files since there is no awareness of + wildcard values outside rule definitions. + + - The scenario files can also be programmatically created using the template + script ``config/create_scenarios.py``. This script can be run with + ``snakemake -j1 create_scenarios`` and creates the scenarios file referenced + under ``run: scenarios: file:``. + + - The setting ``run: name: all`` will run all scenarios in + ``config/scenarios.yaml``. Otherwise, it will run those passed as list in + ``run: name:`` as long as ``run: scenarios: enable: true``. + + - The setting ``run: shared_resources:`` indicates via a boolean whether the + resources should be encapsulated by the ``run: name:``. The special setting + ``run: shared_resources: base`` shares resources until ``add_electricity`` + that do not contain wildcards other than ``{"technology", "year", + "scope"}``. + + - Added new configuration options for all ``{opts}`` and ``{sector_opts}`` + wildcard values to create a unique configuration file (``config.yaml``) per + PyPSA network file. This is done with the help of a new function + ``update_config_from_wildcards()`` which parses configuration settings from + wildcards and updates the ``snakemake.config`` object. These updated + configuration settings are used in the scripts rather than directly parsed + values from ``snakemake.wildcards``. + + - The cost data was moved from ``data/costs_{year}.csv`` to + ``resources/costs_{year}.csv`` since it depends on configuration settings. + The ``retrieve_cost_data`` rule was changed to calling a Python script. + + - Moved time clustering settings to ``clustering: temporal:`` from + ``snapshots:`` so that the latter is only used to define the + ``pandas.DatetimeIndex`` which simplifies the scenario management. + + - Collection rules get a new wildcard ``run=config["run"]["name"]`` so they + can collect outputs across different scenarios. + + - It is further possible to encapsulate your scenarios in a directory using + the setting ``run: prefix:``. + + - **Warning:** One caveat remains for the scenario management with myopic or + perfect foresight pathway optimisation. The first investment period must be + shared across all scenarios. The reason is that the ``wildcard_constraints`` + defined for the rule ``add_existing_baseyear`` do not accept wildcard-aware + input functions (cf. + `https://github.com/snakemake/snakemake/issues/2703`_). + +* The outputs of the rule ``retrieve_gas_infrastructure_data`` no longer + marked as ``protected()`` as the download size is small. + +* Bugfix: allow modelling sector-coupled landlocked regions. (Fixed handling of offshore wind.) + +* Bugfix: approximation of hydro power generation if Portugal or Spain are not included works now. + +* Bugfix: copy_timeslice does not copy anymore, if country not present in load data. + +* Adapt the disabling of transmission expansion in myopic foresight optimisations when limit is already reached to also handle cost limits. + +* Fix duplicated years and grouping years reference in `add_land_use_constraint_m`. + +* Fix type error with `m` option in `cluster_network`. + +* Fix error with `symbol` of `buses` in `simplify_network`. + +* Fix index of existing capacities in `add_power_capacities_installed_before_baseyear` with `m` option. + +* Fix custom busmap read in `cluster_network`. + +* Data on existing renewable capacities is now consistently taken from powerplantmatching (instead of being retrieved separately); the dataset has also been updated to include 2023 values. + +* Added shapes to .nc file for different stages of the network object in `base_network`, `simplify_network`, and `cluster_network`; the `build_bus_regions` rule is now integrated into the `base_network` rule. + +* Fix p_nom_min of renewables generators for myopic approach and add check of existing capacities in `add_land_use_constraint_m`. + +* Add documentation section for how to contribute documentation + + +PyPSA-Eur 0.10.0 (19th February 2024) +===================================== + +**New Features** + +* Improved representation of industry transition pathways. A new script was + added to interpolate industry sector ratios from today's status quo to future + systems (i.e. specific emissions and demands for energy and feedstocks). For + each country we gradually switch industry processes from today's specific + energy carrier usage per ton material output to the best-in-class energy + consumption of tomorrow. This is done on a per-country basis. The ratio of + today to tomorrow's energy consumption is set with the ``industry: + sector_ratios_fraction_future:`` parameter + (https://github.com/PyPSA/pypsa-eur/pull/929). + +* Add new default to overdimension heating in individual buildings. This allows + them to cover heat demand peaks e.g. 10% higher than those in the data. The + disadvantage of manipulating the costs is that the capacity is then not quite + right. This way at least the costs are right + (https://github.com/PyPSA/pypsa-eur/pull/918). + +* Allow industrial coal demand to be regional so its emissions can be included + in regional emission limits (https://github.com/PyPSA/pypsa-eur/pull/923). + +* Add option to specify to set a default heating lifetime for existing heating + (``existing_capacities: default_heating_lifetime:``) + (https://github.com/PyPSA/pypsa-eur/pull/918). + +* Added option to specify turbine and solar panel models for specific years as a + dictionary (e.g. ``renewable: onwind: resource: turbine:``). The years will be + interpreted as years from when the the corresponding turbine model substitutes + the previous model for new installations. This will only have an effect on + workflows with foresight ``"myopic"`` and still needs to be added foresight + option ``"perfect"`` (https://github.com/PyPSA/pypsa-eur/pull/912). + +* New configuration option ``everywhere_powerplants`` to build conventional + powerplants everywhere, irrespective of existing powerplants locations, in the + network (https://github.com/PyPSA/pypsa-eur/pull/850). + +* Add the option to customise map projection in plotting config under + ``plotting: projection: name`` (https://github.com/PyPSA/pypsa-eur/pull/898). + +* Add support for the linopy ``io_api`` option under ``solving: options: + io_api:``. Set to ``"direct"`` to increase model reading and writing + performance for the highs and gurobi solvers on slow file systems + (https://github.com/PyPSA/pypsa-eur/pull/892). + +* It is now possible to determine the directory for shared resources by setting + `shared_resources` to a string (https://github.com/PyPSA/pypsa-eur/pull/906). + +* Improve ``mock_snakemake()`` for usage in Snakemake modules + (https://github.com/PyPSA/pypsa-eur/pull/869). + +**Breaking Changes** + +* Remove long-deprecated function ``attach_extendable_generators`` in + :mod:`add_electricity`. + +* Remove option for wave energy as technology data is not maintained. + +* The order of buses (bus0, bus1, ...) for DAC components has changed to meet + the convention of the other components. Therefore, `bus0` refers to the + electricity bus (input), `bus1` to the heat bus (input), 'bus2' to the CO2 + atmosphere bus (input), and `bus3` to the CO2 storage bus (output) + (https://github.com/PyPSA/pypsa-eur/pull/901). + +**Changes** + +* Upgrade default techno-economic assumptions to ``technology-data`` v0.8.0. + +* Update hydrogen pipeline losses to latest data from Danish Energy Agency + (https://github.com/PyPSA/pypsa-eur/pull/933). + +* Move building of daily heat profile to its own rule + :mod:`build_hourly_heat_demand` from :mod:`prepare_sector_network` + (https://github.com/PyPSA/pypsa-eur/pull/884). + +* In :mod:`build_energy_totals`, district heating shares are now reported in a + separate file (https://github.com/PyPSA/pypsa-eur/pull/884). + +* Move calculation of district heating share to its own rule + :mod:`build_district_heat_share` + (https://github.com/PyPSA/pypsa-eur/pull/884). + +* Move building of distribution of existing heating to own rule + :mod:`build_existing_heating_distribution`. This makes the distribution of + existing heating to urban/rural, residential/services and spatially more + transparent (https://github.com/PyPSA/pypsa-eur/pull/884). + +* Default settings for recycling rates and primary product shares of high-value + chemicals have been set in accordance with the values used in `Neumann et al. + (2023) `__ linearly interpolated + between 2020 and 2050. The recycling rates are based on data from `Agora + Energiewende (2021) + `__. + +* Air-sourced heat pumps can now also be built in rural areas. Previously, only + ground-sourced heat pumps were considered for this category + (https://github.com/PyPSA/pypsa-eur/pull/890). + +* The default configuration ``config/config.default.yaml`` is now automatically + used as a base configuration file. The file ``config/config.yaml`` can now be + used to only define deviations from the default configuration. The + ``config/config.default.yaml`` is still copied into ``config/config.yaml`` on + first usage (https://github.com/PyPSA/pypsa-eur/pull/925). + +* Regions are assigned to all buses with unique coordinates in the network with + a preference given to substations. Previously, only substations had assigned + regions, but this could lead to issues when a high spatial resolution was + applied (https://github.com/PyPSA/pypsa-eur/pull/922). + +* Define global constraint for CO2 emissions on the final state of charge of the + CO2 atmosphere store. This gives a more sparse constraint that should improve + the performance of the solving process + (https://github.com/PyPSA/pypsa-eur/pull/862). + +* Switched the energy totals year from 2011 to 2013 to comply with the assumed + default weather year (https://github.com/PyPSA/pypsa-eur/pull/934). + +* Cluster residential and services heat buses by default. Can be disabled with + ``cluster_heat_buses: false`` (https://github.com/PyPSA/pypsa-eur/pull/877). + +* The rule ``plot_network`` has been split into separate rules for plotting + electricity, hydrogen and gas networks + (https://github.com/PyPSA/pypsa-eur/pull/900). + +* To determine the optimal topology to meet the number of clusters, the workflow + used pyomo in combination with ``ipopt`` or ``gurobi``. This dependency has + been replaced by using ``linopy`` in combination with ``scipopt`` or + ``gurobi``. The environment file has been updated accordingly + (https://github.com/PyPSA/pypsa-eur/pull/903). + +* The ``highs`` solver was added to the default environment file. + +* New default solver settings for COPT solver + (https://github.com/PyPSA/pypsa-eur/pull/882). + +* Data retrieval rules now use their own minimal conda environment. This can + avoid unnecessary reruns of the workflow + (https://github.com/PyPSA/pypsa-eur/pull/888). + +* Merged two OPSD time series data versions into such that the option ``load: + power_statistics:`` becomes superfluous and was hence removed + (https://github.com/PyPSA/pypsa-eur/pull/924). + +* The filtering of power plants in the ``config.default.yaml`` has been updated + regarding phased-out power plants in 2023. + +* Include all countries in ammonia production resource. This is so that the full + EU28 ammonia demand can be correctly subtracted in the rule + :mod:`build_industry_sector_ratios` + (https://github.com/PyPSA/pypsa-eur/pull/931). + +* Correctly source the existing heating technologies for buildings since the + source URL has changed. It represents the year 2012 and is only for buildings, + not district heating (https://github.com/PyPSA/pypsa-eur/pull/918). + +* Add warning when BEV availability weekly profile has negative values in + `build_transport_demand` (https://github.com/PyPSA/pypsa-eur/pull/858). + +* Time series clipping for very small values was added for Links + (https://github.com/PyPSA/pypsa-eur/pull/870). + +* A ``test.sh`` script was added to the repository to run the tests locally. + +* The CI now tests additionally against ``master`` versions of PyPSA, atlite and + powerplantmatching (https://github.com/PyPSA/pypsa-eur/pull/904). + +* A function ``sanitize_locations()`` was added to improve the coverage of the + ``location`` attribute of network components. + +**Bugs and Compatibility** + +* Bugfix: Do not reduce district heat share when building population-weighted + energy statistics. Previously the district heating share was being multiplied + by the population weighting, reducing the DH share with multiple nodes + (https://github.com/PyPSA/pypsa-eur/pull/884). + +* Bugfix: The industry coal emissions for industry were not properly tracked + (https://github.com/PyPSA/pypsa-eur/pull/923). + +* Bugfix: Correct units of subtracted chlorine and methanol demand in + :mod:`build_industry_sector_ratios` + (https://github.com/PyPSA/pypsa-eur/pull/930). + +* Various minor bugfixes to the perfect foresight workflow, though perfect + foresight must still be considered experimental + (https://github.com/PyPSA/pypsa-eur/pull/910). + +* Fix plotting of retrofitted hydrogen pipelines with myopic pathway + optimisation (https://github.com/PyPSA/pypsa-eur/pull/937). + +* Bugfix: Correct technology keys for the electricity production plotting to + work out the box. + +* Bugfix: Assure entering of code block which corrects Norwegian heat demand + (https://github.com/PyPSA/pypsa-eur/pull/870). + +* Stacktrace of uncaught exceptions should now be correctly included inside log + files (via `configure_logging(..)`) + (https://github.com/PyPSA/pypsa-eur/pull/875). + +* Bugfix: Correctly read out number of solver threads from configuration file + (https://github.com/PyPSA/pypsa-eur/pull/889). + +* Made copying default config file compatible with snakemake module + (https://github.com/PyPSA/pypsa-eur/pull/894). + +* Compatibility with ``pandas=2.2`` + (https://github.com/PyPSA/pypsa-eur/pull/861). + +Special thanks for this release to Koen van Greevenbroek (`@koen-vg +`__) for various new features, bugfixes and taking +care of deprecations. + + +PyPSA-Eur 0.9.0 (5th January 2024) +================================== + +**New Features** + +* Add option to specify losses for bidirectional links, e.g. pipelines or HVDC + links, in configuration file under ``sector: transmission_efficiency:``. Users + can specify static or length-dependent values as well as a length-dependent + electricity demand for compression, which is implemented as a multi-link to + the local electricity buses. The bidirectional links will then be split into + two unidirectional links with linked capacities (https://github.com/PyPSA/pypsa-eur/pull/739). + +* Merged option to extend geographical scope to Ukraine and Moldova. These + countries are excluded by default and is currently constrained to power-sector + only parts of the workflow. A special config file + `config/config.entsoe-all.yaml` was added as an example to run the workflow + with all ENTSO-E member countries (including observer members like Ukraine and + Moldova). Moldova can currently only be included in conjunction with Ukraine + due to the absence of demand data. The Crimean power system is manually + reconnected to the main Ukrainian grid with the configuration option + `reconnect_crimea` (https://github.com/PyPSA/pypsa-eur/pull/321). + +* New experimental support for multi-decade optimisation with perfect foresight + (``foresight: perfect``). Maximum growth rates for carriers, global carbon + budget constraints and emission constraints for particular investment periods. + +* Add option to reference an additional source file where users can specify + custom ``extra_functionality`` constraints in the configuration file. The + default setting points to an empty hull at + ``data/custom_extra_functionality.py`` (https://github.com/PyPSA/pypsa-eur/pull/824). + +* Add locations, capacities and costs of existing gas storage using Global + Energy Monitor's `Europe Gas Tracker + `__ + (https://github.com/PyPSA/pypsa-eur/pull/835). + +* Add option to use `LUISA Base Map + `__ 50m land + coverage dataset for land eligibility analysis in + :mod:`build_renewable_profiles`. Settings are analogous to the CORINE dataset + but with the key ``luisa:`` in the configuration file. To leverage the + dataset's full advantages, set the excluder resolution to 50m + (``excluder_resolution: 50``). For land category codes, see `Annex 1 of the + technical documentation + `__ + (https://github.com/PyPSA/pypsa-eur/pull/842). + +* Add option to capture CO2 contained in biogas when upgrading (``sector: + biogas_to_gas_cc``) (https://github.com/PyPSA/pypsa-eur/pull/615). + +* If load shedding is activated, it is now applied to all carriers, not only + electricity (https://github.com/PyPSA/pypsa-eur/pull/784). * Add option for heat vents in district heating (``sector: central_heat_vent:``). The combination of must-run conditions for some @@ -248,7 +700,7 @@ PyPSA-Eur 0.8.1 (27th July 2023) * Add option to consider dynamic line rating based on wind speeds and temperature according to `Glaum and Hofmann (2022) - `_. See configuration section ``lines: + `__. See configuration section ``lines: dynamic_line_rating:`` for more details. (https://github.com/PyPSA/pypsa-eur/pull/675) * Add option to include a piecewise linear approximation of transmission losses, @@ -267,7 +719,7 @@ PyPSA-Eur 0.8.1 (27th July 2023) * A ``param:`` section in the snakemake rule definitions was added to track changed settings in ``config.yaml``. The goal is to automatically re-execute rules where parameters have changed. See `Non-file parameters for rules - `_ + `__ in the snakemake documentation. (https://github.com/PyPSA/pypsa-eur/pull/663) * A new function named ``sanitize_carrier`` ensures that all unique carrier @@ -376,7 +828,7 @@ PyPSA-Eur 0.8.0 (18th March 2023) * The :mod:`solve_network` script now uses the ``linopy`` backend of PyPSA and is applied for both electricity-only and sector-coupled models. This requires an adjustment of custom ``extra_functionality``. - See the `migration guide `_ in the PyPSA documentation. + See the `migration guide `__ in the PyPSA documentation. * The configuration file ``config.default.yaml`` now also includes settings for sector-coupled models, which will be ignored when the user runs @@ -481,7 +933,7 @@ PyPSA-Eur 0.7.0 (16th February 2023) inclusive:`` to address the upstream deprecation with ``pandas=1.4``. The previous setting ``None`` is no longer supported and replaced by ``both``, see the `pandas documentation - `_. + `__. Minimum version is now ``pandas>=1.4``. * The configuration setting ``summary_dir`` was removed. @@ -535,7 +987,7 @@ PyPSA-Eur 0.6.1 (20th September 2022) * Individual commits are now tested against pre-commit hooks. This includes black style formatting, sorting of package imports, Snakefile formatting and others. Installation instructions can for the pre-commit can be found `here - `_. + `__. * Pre-commit CI is now part of the repository's CI. @@ -558,7 +1010,7 @@ PyPSA-Eur 0.6.0 (10th September 2022) * Functionality to consider shipping routes when calculating the available area for offshore technologies were added. Data for the shipping density comes from the `Global Shipping Traffic Density dataset - `_. + `__. * When transforming all transmission lines to a unified voltage level of 380kV, the workflow now preserves the transmission capacity rather than electrical @@ -595,7 +1047,7 @@ PyPSA-Eur 0.5.0 (27th July 2022) ``from_opsd`` to ``True``. * Add operational reserve margin constraint analogous to `GenX implementation - `_. Can be activated + `__. Can be activated with config setting ``electricity: operational_reserve:``. * Implement country-specific Energy Availability Factors (EAFs) for nuclear @@ -621,12 +1073,12 @@ PyPSA-Eur 0.5.0 (27th July 2022) * Techno-economic parameters of technologies (e.g. costs and efficiencies) will now be retrieved from a separate repository `PyPSA/technology-data - `_ that collects assumptions from a + `__ that collects assumptions from a variety of sources. It is activated by default with ``enable: retrieve_cost_data: true`` and controlled with ``costs: year:`` and ``costs: version:``. The location of this data changed from ``data/costs.csv`` to ``resources/costs.csv`` [`#184 - `_]. + `__]. * A new section ``conventional`` was added to the config file. This section contains configurations for conventional carriers. @@ -641,18 +1093,18 @@ PyPSA-Eur 0.5.0 (27th July 2022) * Add an efficiency factor of 88.55% to offshore wind capacity factors as a proxy for wake losses. More rigorous modelling is `planned - `_ [`#277 - `_]. + `__ [`#277 + `__]. * Following discussion in `#285 - `_ we have disabled the + `__ we have disabled the correction factor for solar PV capacity factors by default while satellite data is used. A correction factor of 0.854337 is recommended if reanalysis data like ERA5 is used. * The default deployment density of AC- and DC-connected offshore wind capacity is reduced from 3 MW/sqkm to a more conservative estimate of 2 MW/sqkm [`#280 - `_]. + `__]. * The inclusion of renewable carriers is now specified in the config entry ``renewable_carriers``. Before this was done by commenting/uncommenting @@ -681,12 +1133,12 @@ PyPSA-Eur 0.5.0 (27th July 2022) * Resource definitions for memory usage now follow `Snakemake standard resource definition - `_ + `__ ``mem_mb`` rather than ``mem``. * The powerplants that have been shut down by 2021 are filtered out. -* Updated historical `EIA hydro generation data `_. +* Updated historical `EIA hydro generation data `__. * Network building is made deterministic by supplying a fixed random state to network clustering routines. @@ -735,24 +1187,24 @@ Synchronisation Release - Ukraine and Moldova (17th March 2022) --------------------------------------------------------------- On March 16, 2022, the transmission networks of Ukraine and Moldova have -successfully been `synchronised with the continental European grid `_. We have taken +successfully been `synchronised with the continental European grid `__. We have taken this as an opportunity to add the power systems of Ukraine and Moldova to PyPSA-Eur. This includes: .. image:: img/synchronisation.png :width: 500 -* the transmission network topology from the `ENTSO-E interactive map `_. +* the transmission network topology from the `ENTSO-E interactive map `__. -* existing power plants (incl. nuclear, coal, gas and hydro) from the `powerplantmatching `_ tool +* existing power plants (incl. nuclear, coal, gas and hydro) from the `powerplantmatching `__ tool -* country-level load time series from ENTSO-E through the `OPSD platform `_, which are then distributed heuristically to substations by GDP and population density. +* country-level load time series from ENTSO-E through the `OPSD platform `__, which are then distributed heuristically to substations by GDP and population density. * wind and solar profiles based on ERA5 and SARAH-2 weather data -* hydro profiles based on historical `EIA generation data `_ +* hydro profiles based on historical `EIA generation data `__ -* a simplified calculation of wind and solar potentials based on the `Copernicus Land Cover dataset `_. +* a simplified calculation of wind and solar potentials based on the `Copernicus Land Cover dataset `__. * electrical characteristics of 750 kV transmission lines @@ -773,18 +1225,18 @@ PyPSA-Eur 0.4.0 (22th September 2021) * With this release, we change the license from copyleft GPLv3 to the more liberal MIT license with the consent of all contributors - [`#276 `_]. + [`#276 `__]. * Switch to the new major ``atlite`` release v0.2. The version upgrade comes along with significant speed up for the rule ``build_renewable_profiles.py`` (~factor 2). A lot of the code which calculated the land-use availability is now outsourced and does not rely on ``glaes``, ``geokit`` anymore. This facilitates the environment building and version compatibility of ``gdal``, ``libgdal`` with - other packages [`#224 `_]. + other packages [`#224 `__]. * Implemented changes to ``n.snapshot_weightings`` in new PyPSA version v0.18 - (cf. `PyPSA/PyPSA/#227 `_) - [`#259 `_]. + (cf. `PyPSA/PyPSA/#227 `__) + [`#259 `__]. * Add option to pre-aggregate nodes without power injections (positive or negative, i.e. generation or demand) to electrically closest nodes or neighbors @@ -793,18 +1245,18 @@ PyPSA-Eur 0.4.0 (22th September 2021) * In :mod:`simplify_network`, bus columns with no longer correct entries are removed (symbol, tags, under_construction, substation_lv, substation_off) - [`#219 `_] + [`#219 `__] * Add option to include marginal costs of links representing fuel cells, electrolysis, and battery inverters - [`#232 `_]. + [`#232 `__]. * The rule and script ``build_country_flh`` are removed as they are no longer used or maintained. * The connection cost of generators in :mod:`simplify_network` are now reported in ``resources/connection_costs_s{simpl}.csv`` - [`#261 `_]. + [`#261 `__]. * The tutorial cutout was renamed from ``cutouts/europe-2013-era5.nc`` to ``cutouts/be-03-2013-era5.nc`` to accommodate tutorial and productive @@ -814,72 +1266,72 @@ PyPSA-Eur 0.4.0 (22th September 2021) potentials was deprecated and now defaults to ``True``. * Update dependencies in ``envs/environment.yaml`` - [`#257 `_] + [`#257 `__] * Continuous integration testing switches to Github Actions from Travis CI - [`#252 `_]. + [`#252 `__]. * Documentation on readthedocs.io is now built with ``pip`` only and no longer - requires ``conda`` [`#267 `_]. + requires ``conda`` [`#267 `__]. -* Use ``Citation.cff`` [`#273 `_]. +* Use ``Citation.cff`` [`#273 `__]. **Bugs and Compatibility** -* Support for PyPSA v0.18 [`#268 `_]. +* Support for PyPSA v0.18 [`#268 `__]. * Minimum Python version set to ``3.8``. -* Removed ``six`` dependency [`#245 `_]. +* Removed ``six`` dependency [`#245 `__]. * Update :mod:`plot_network` and :mod:`make_summary` rules to latest PyPSA - versions [`#270 `_]. + versions [`#270 `__]. * Keep converter links to store components when using the ``ATK`` - wildcard and only remove DC links [`#214 `_]. + wildcard and only remove DC links [`#214 `__]. * Value for ``co2base`` in ``config.yaml`` adjusted to 1.487e9 t CO2-eq (from 3.1e9 t CO2-eq). The new value represents emissions related to the electricity sector for EU+UK+Balkan. The old value was too high and used when the emissions wildcard in ``{opts}`` was used - [`#233 `_]. + [`#233 `__]. * Add escape in :mod:`base_network` if all TYNDP links are already contained in the network - [`#246 `_]. + [`#246 `__]. * In :mod:`solve_operations_network` the optimised capacities are now fixed for all extendable links, not only HVDC links - [`#244 `_]. + [`#244 `__]. * The ``focus_weights`` are now also considered when pre-clustering in the :mod:`simplify_network` rule - [`#241 `_]. + [`#241 `__]. * in :mod:`build_renewable_profile` where offshore wind profiles could - no longer be created [`#249 `_]. + no longer be created [`#249 `__]. * Lower expansion limit of extendable carriers is now set to the existing capacity, i.e. ``p_nom_min = p_nom`` (0 before). Simultaneously, the upper limit (``p_nom_max``) is now the maximum of the installed capacity (``p_nom``) and the previous estimate based on land availability (``p_nom_max``) - [`#260 `_]. + [`#260 `__]. * Solving an operations network now includes optimized store capacities as well. Before only lines, links, generators and storage units were considered - [`#269 `_]. + [`#269 `__]. * With ``load_shedding: true`` in the solving options of ``config.yaml`` load shedding generators are only added at the AC buses, excluding buses for H2 - and battery stores [`#269 `_]. + and battery stores [`#269 `__]. * Delete duplicated capital costs at battery discharge link - [`#240 `_]. + [`#240 `__]. * Propagate the solver log file name to the solver. Previously, the PyPSA network solving functions were not told about the solver logfile specified - in the Snakemake file [`#247 `_] + in the Snakemake file [`#247 `__] PyPSA-Eur 0.3.0 (7th December 2020) ----------------------------------- @@ -892,29 +1344,29 @@ Using the ``{opts}`` wildcard for scenario: For example ``EQ0.5c`` set in the ``{opts}`` wildcard requires each country to produce on average at least 50% of its consumption. Additionally, the option ``ATK`` requires autarky at each node and removes all means of power transmission through lines and links. ``ATKc`` only removes cross-border transfer capacities. - [`#166 `_]. + [`#166 `__]. * Added an option to alter the capital cost (``c``) or installable potentials (``p``) of carriers by a factor via ``carrier+{c,p}factor`` in the ``{opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+c0.5`` reduces the capital cost of solar to 50% of original values - [`#167 `_, `#207 `_]. + [`#167 `__, `#207 `__]. * Added an option to the ``{opts}`` wildcard that applies a time series segmentation algorithm based on renewables, hydro inflow and load time series to produce a given total number of adjacent snapshots of varying lengths. This feature is an alternative to downsampling the temporal resolution by simply averaging and - uses the `tsam `_ package - [`#186 `_]. + uses the `tsam `__ package + [`#186 `__]. More OPSD integration: -* Add renewable power plants from `OPSD `_ to the network for specified technologies. +* Add renewable power plants from `OPSD `__ to the network for specified technologies. This will overwrite the capacities calculated from the heuristic approach in :func:`estimate_renewable_capacities()` - [`#212 `_]. + [`#212 `__]. -* Electricity consumption data is now retrieved directly from the `OPSD website `_ using the rule :mod:`build_electricity_demand`. +* Electricity consumption data is now retrieved directly from the `OPSD website `__ using the rule :mod:`build_electricity_demand`. The user can decide whether to take the ENTSO-E power statistics data (default) or the ENTSO-E transparency data - [`#211 `_]. + [`#211 `__]. Other: @@ -922,93 +1374,93 @@ Other: Then, the rule looks for custom busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``, which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``. i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc`` - [`#193 `_]. + [`#193 `__]. * Line and link capacities can be capped in the ``config.yaml`` at ``lines: s_nom_max:`` and ``links: p_nom_max``: - [`#166 `_]. + [`#166 `__]. * Added Google Cloud Platform tutorial (for Windows users) - [`#177 `_]. + [`#177 `__]. **Changes** * Don't remove capital costs from lines and links, when imposing a line volume limit (``lv``) or a line cost limit (``lc``). Previously, these were removed to move the expansion in direction of the limit - [`#183 `_]. + [`#183 `__]. * The mappings for clustered lines and buses produced by the :mod:`simplify_network` and :mod:`cluster_network` rules changed from Hierarchical Data Format (``.h5``) to Comma-Separated Values format (``.csv``) for ease of use. - [`#198 `_] + [`#198 `__] * The N-1 security margin for transmission lines is now fixed to a provided value in ``config.yaml``, removing an undocumented linear interpolation between 0.5 and 0.7 in the range between 37 and 200 nodes. - [`#199 `_]. + [`#199 `__]. * Modelling hydrogen and battery storage with Store and Link components is now the default, rather than using StorageUnit components with fixed power-to-energy ratio - [`#205 `_]. + [`#205 `__]. * Use ``mamba`` (https://github.com/mamba-org/mamba) for faster Travis CI builds - [`#196 `_]. + [`#196 `__]. * Multiple smaller changes: Removed unused ``{network}`` wildcard, moved environment files to dedicated ``envs`` folder, removed sector-coupling components from configuration files, updated documentation colors, minor refactoring and code cleaning - [`#190 `_]. + [`#190 `__]. **Bugs and Compatibility** * Add compatibility for pyomo 5.7.0 in :mod:`cluster_network` and :mod:`simplify_network` - [`#172 `_]. + [`#172 `__]. * Fixed a bug for storage units such that individual store and dispatch efficiencies are correctly taken account of rather than only their round-trip efficiencies. In the cost database (``data/costs.csv``) the efficiency of battery inverters should be stated as per discharge/charge rather than per roundtrip - [`#202 `_]. + [`#202 `__]. * Corrected exogenous emission price setting (in ``config: cost: emission price:``), which now correctly accounts for the efficiency and effective emission of the generators - [`#171 `_]. + [`#171 `__]. * Corrected HVDC link connections (a) between Norway and Denmark and (b) mainland Italy, Corsica (FR) and Sardinia (IT) as well as for East-Western and Anglo-Scottish interconnectors - [`#181 `_, `#206 `_]. + [`#181 `__, `#206 `__]. * Fix bug of clustering ``offwind-{ac,dc}`` generators in the option of high-resolution generators for renewables. Now, there are more sites for ``offwind-{ac,dc}`` available than network nodes. Before, they were clustered to the resolution of the network (``elec_s1024_37m.nc``: 37 network nodes, 1024 generators) - [`#191 `_]. + [`#191 `__]. * Raise a warning if ``tech_colors`` in the config are not defined for all carriers - [`#178 `_]. + [`#178 `__]. PyPSA-Eur 0.2.0 (8th June 2020) ------------------------------- -* The optimization is now performed using the ``pyomo=False`` setting in the :func:`pypsa.lopf.network_lopf`. This speeds up the solving process significantly and consumes much less memory. The inclusion of additional constraints were adjusted to the new implementation. They are all passed to the :func:`network_lopf` function via the ``extra_functionality`` argument. The rule ``trace_solve_network`` was integrated into the rule :mod:`solve_network` and can be activated via configuration with ``solving: options: track_iterations: true``. The charging and discharging capacities of batteries modelled as store-link combination are now coupled [`#116 `_]. +* The optimization is now performed using the ``pyomo=False`` setting in the :func:`pypsa.lopf.network_lopf`. This speeds up the solving process significantly and consumes much less memory. The inclusion of additional constraints were adjusted to the new implementation. They are all passed to the :func:`network_lopf` function via the ``extra_functionality`` argument. The rule ``trace_solve_network`` was integrated into the rule :mod:`solve_network` and can be activated via configuration with ``solving: options: track_iterations: true``. The charging and discharging capacities of batteries modelled as store-link combination are now coupled [`#116 `__]. -* An updated extract of the `ENTSO-E Transmission System Map `_ (including Malta) was added to the repository using the `GridKit `_ tool. This tool has been updated to retrieve up-to-date map extracts using a single `script `_. The update extract features 5322 buses, 6574 lines, 46 links. [`#118 `_]. +* An updated extract of the `ENTSO-E Transmission System Map `__ (including Malta) was added to the repository using the `GridKit `__ tool. This tool has been updated to retrieve up-to-date map extracts using a single `script `__. The update extract features 5322 buses, 6574 lines, 46 links. [`#118 `__]. -* Added `FSFE REUSE `_ compliant license information. Documentation now licensed under CC-BY-4.0 [`#160 `_]. +* Added `FSFE REUSE `__ compliant license information. Documentation now licensed under CC-BY-4.0 [`#160 `__]. -* Added a 30 minute `video introduction `_ and a 20 minute `video tutorial `_ +* Added a 30 minute `video introduction `__ and a 20 minute `video tutorial `__ * Networks now store a color and a nicely formatted name for each carrier, accessible via ``n.carrier['color']`` and ``n.carrier['nice_name'] ``(networks after ``elec.nc``). * Added an option to skip iterative solving usually performed to update the line impedances of expanded lines at ``solving: options: skip_iterations:``. -* ``snakemake`` rules for retrieving cutouts and the natura raster can now be disabled independently from their respective rules to build them; via ``config.*yaml`` [`#136 `_]. +* ``snakemake`` rules for retrieving cutouts and the natura raster can now be disabled independently from their respective rules to build them; via ``config.*yaml`` [`#136 `__]. -* Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 `_]. +* Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 `__]. -* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all available weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 `_]. +* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all available weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 `__]. -* Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 `_]. +* Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 `__]. -* Bugfix in :mod:`base_network` which now finds all closest links, not only the first entry [`#143 `_]. +* Bugfix in :mod:`base_network` which now finds all closest links, not only the first entry [`#143 `__]. -* Bugfix in :mod:`cluster_network` which now skips recalculation of link parameters if there are no links [`#149 `_]. +* Bugfix in :mod:`cluster_network` which now skips recalculation of link parameters if there are no links [`#149 `__]. -* Added information on pull requests to contribution guidelines [`#151 `_]. +* Added information on pull requests to contribution guidelines [`#151 `__]. * Improved documentation on open-source solver setup and added usage warnings. @@ -1019,31 +1471,31 @@ PyPSA-Eur 0.1.0 (9th January 2020) This is the first release of PyPSA-Eur, a model of the European power system at the transmission network level. Recent changes include: -* Documentation on installation, workflows and configuration settings is now available online at `pypsa-eur.readthedocs.io `_ [`#65 `_]. +* Documentation on installation, workflows and configuration settings is now available online at `pypsa-eur.readthedocs.io `__ [`#65 `__]. -* The ``conda`` environment files were updated and extended [`#81 `_]. +* The ``conda`` environment files were updated and extended [`#81 `__]. -* The power plant database was updated with extensive filtering options via ``pandas.query`` functionality [`#84 `_ and `#94 `_]. +* The power plant database was updated with extensive filtering options via ``pandas.query`` functionality [`#84 `__ and `#94 `__]. -* Continuous integration testing with `Travis CI `_ is now included for Linux, Mac and Windows [`#82 `_]. +* Continuous integration testing with `Travis CI `__ is now included for Linux, Mac and Windows [`#82 `__]. -* Data dependencies were moved to `zenodo `_ and are now versioned [`#60 `_]. +* Data dependencies were moved to `zenodo `__ and are now versioned [`#60 `__]. -* Data dependencies are now retrieved directly from within the snakemake workflow [`#86 `_]. +* Data dependencies are now retrieved directly from within the snakemake workflow [`#86 `__]. -* Emission prices can be added to marginal costs of generators through the keywords ``Ep`` in the ``{opts}`` wildcard [`#100 `_]. +* Emission prices can be added to marginal costs of generators through the keywords ``Ep`` in the ``{opts}`` wildcard [`#100 `__]. -* An option is introduced to add extendable nuclear power plants to the network [`#98 `_]. +* An option is introduced to add extendable nuclear power plants to the network [`#98 `__]. -* Focus weights can now be specified for particular countries for the network clustering, which allows to set a proportion of the total number of clusters for particular countries [`#87 `_]. +* Focus weights can now be specified for particular countries for the network clustering, which allows to set a proportion of the total number of clusters for particular countries [`#87 `__]. -* A new rule :mod:`add_extra_components` allows to add additional components to the network only after clustering. It is thereby possible to model storage units (e.g. battery and hydrogen) in more detail via a combination of ``Store``, ``Link`` and ``Bus`` elements [`#97 `_]. +* A new rule :mod:`add_extra_components` allows to add additional components to the network only after clustering. It is thereby possible to model storage units (e.g. battery and hydrogen) in more detail via a combination of ``Store``, ``Link`` and ``Bus`` elements [`#97 `__]. -* Hydrogen pipelines (including cost assumptions) can now be added alongside clustered network connections in the rule :mod:`add_extra_components` . Set ``electricity: extendable_carriers: Link: [H2 pipeline]`` and ensure hydrogen storage is modelled as a ``Store``. This is a first simplified stage [`#108 `_]. +* Hydrogen pipelines (including cost assumptions) can now be added alongside clustered network connections in the rule :mod:`add_extra_components` . Set ``electricity: extendable_carriers: Link: [H2 pipeline]`` and ensure hydrogen storage is modelled as a ``Store``. This is a first simplified stage [`#108 `__]. -* Logfiles for all rules of the ``snakemake`` workflow are now written in the folder ``log/`` [`#102 `_]. +* Logfiles for all rules of the ``snakemake`` workflow are now written in the folder ``log/`` [`#102 `__]. -* The new function ``_helpers.mock_snakemake`` creates a ``snakemake`` object which mimics the actual ``snakemake`` object produced by workflow by parsing the ``Snakefile`` and setting all paths for inputs, outputs, and logs. This allows running all scripts within a (I)python terminal (or just by calling ``python ``) and thereby facilitates developing and debugging scripts significantly [`#107 `_]. +* The new function ``_helpers.mock_snakemake`` creates a ``snakemake`` object which mimics the actual ``snakemake`` object produced by workflow by parsing the ``Snakefile`` and setting all paths for inputs, outputs, and logs. This allows running all scripts within a (I)python terminal (or just by calling ``python ``) and thereby facilitates developing and debugging scripts significantly [`#107 `__]. PyPSA-Eur-Sec Releases (pre-merge) @@ -1060,13 +1512,13 @@ biomass, and explicit modelling of methanol and ammonia as separate energy carriers. This release is known to work with `PyPSA-Eur -`_ Version 0.7.0 and `Technology Data -`_ Version 0.5.0. +`__ Version 0.7.0 and `Technology Data +`__ Version 0.5.0. **Gas Transmission Network** * New rule ``retrieve_gas_infrastructure_data`` that downloads and extracts the - SciGRID_gas `IGGIELGN `_ dataset from + SciGRID_gas `IGGIELGN `__ dataset from zenodo. It includes data on the transmission routes, pipe diameters, capacities, pressure, and whether the pipeline is bidirectional and carries H-Gas or L-Gas. @@ -1077,7 +1529,7 @@ This release is known to work with `PyPSA-Eur * New rule ``build_gas_input_locations`` compiles the LNG import capacities (from the Global Energy Monitor's `Europe Gas Tracker - `_, pipeline + `__, pipeline entry capacities and local production capacities for each region of the model. These are the regions where fossil gas can eventually enter the model. @@ -1090,7 +1542,7 @@ This release is known to work with `PyPSA-Eur * With the option ``sector: gas_network:``, the existing gas network is added with a lossless transport model. A length-weighted `k-edge augmentation algorithm - `_ + `__ can be run to add new candidate gas pipelines such that all regions of the model can be connected to the gas network. The number of candidates can be controlled via the setting ``sector: gas_network_connectivity_upgrade:``. When @@ -1121,7 +1573,7 @@ This release is known to work with `PyPSA-Eur * Add option for regionally-resolved geological carbon dioxide sequestration potentials through new rule ``build_sequestration_potentials`` based on - `CO2StoP `_. This + `CO2StoP `__. This can be controlled in the section ``regional_co2_sequestration_potential`` of the ``config.yaml``. It includes options to select the level of conservatism, whether onshore potentials should be included, the respective upper and lower @@ -1133,7 +1585,7 @@ This release is known to work with `PyPSA-Eur ``seq200`` in the ``{sector_opts}`` wildcard (for limit of 200 Mt CO2). * Add option to include `Allam cycle gas power plants - `_ (``allam_cycle``). + `__ (``allam_cycle``). * Add option for planning a new carbon dioxide network (``co2network``). @@ -1152,7 +1604,7 @@ This release is known to work with `PyPSA-Eur * Add regionalised hydrogen salt cavern storage potentials from `Technical Potential of Salt Caverns for Hydrogen Storage in Europe - `_. This data is compiled in + `__. This data is compiled in a new rule ``build_salt_cavern_potentials``. * Add option to resolve ammonia as separate energy carrier with Haber-Bosch @@ -1169,11 +1621,11 @@ This release is known to work with `PyPSA-Eur * Demand for liquid hydrogen in international shipping is now geographically distributed by port trade volumes in a new rule ``build_shipping_demand`` using data from the `World Bank Data Catalogue - `_. + `__. Domestic shipping remains distributed by population. * Add option to aggregate network temporally using representative snapshots or - segments (with `tsam `_). + segments (with `tsam `__). * Add option for minimum part load for Fischer-Tropsch plants (default: 90%) and methanolisation plants (default: 50%). @@ -1226,7 +1678,7 @@ This release is known to work with `PyPSA-Eur PyPSA network. * Updated `data bundle - `_ + `__ that includes the hydrogan salt cavern storage potentials. * Updated and extended documentation in @@ -1254,7 +1706,7 @@ This release is known to work with `PyPSA-Eur expansion of previous iteration as minimum capacity for next iteration. * Further rather minor bugfixes for myopic optimisation code (see `#256 - `_). + `__). Many thanks to all who contributed to this release! @@ -1273,9 +1725,9 @@ more options in setting exogenous transition paths, besides many performance improvements. This release is known to work with `PyPSA-Eur -`_ Version 0.4.0, `Technology Data -`_ Version 0.3.0 and -`PyPSA `_ Version 0.18.0. +`__ Version 0.4.0, `Technology Data +`__ Version 0.3.0 and +`PyPSA `__ Version 0.18.0. Please note that the data bundle has also been updated. @@ -1292,11 +1744,11 @@ Please note that the data bundle has also been updated. battery costs. * Separate basic chemicals into HVC (high-value chemicals), chlorine, methanol and ammonia - [`#166 `_]. + [`#166 `__]. * Add option to specify reuse, primary production, and mechanical and chemical recycling fraction of platics - [`#166 `_]. + [`#166 `__]. * Include energy demands and CO2 emissions for the agriculture, forestry and fishing sector. It is included by default through the option ``A`` in the ``sector_opts`` wildcard. @@ -1310,11 +1762,11 @@ Please note that the data bundle has also been updated. Heat demand is assigned at "services rural heat" buses. Electricity demands are added to low-voltage buses. Time series for demands are constant and distributed inside countries by population - [`#147 `_]. + [`#147 `__]. * Include today's district heating shares in myopic optimisation and add option to specify exogenous path for district heating share increase under ``sector: - district_heating:`` [`#149 `_]. + district_heating:`` [`#149 `__]. * Added option for hydrogen liquefaction costs for hydrogen demand in shipping. This introduces a new ``H2 liquid`` bus at each location. It is activated via @@ -1334,16 +1786,16 @@ Please note that the data bundle has also been updated. factor 2. In this example, ``e_nom_max`` represents the CO2 sequestration potential in Europe. -* Use `JRC ENSPRESO database `_ to +* Use `JRC ENSPRESO database `__ to spatially disaggregate biomass potentials to PyPSA-Eur regions based on overlaps with NUTS2 regions from ENSPRESO (proportional to area) (`#151 - `_). + `__). * Add option to regionally disaggregate biomass potential to individual nodes (previously given per country, then distributed by population density within) and allow the transport of solid biomass. The transport costs are determined based on the `JRC-EU-Times Bioenergy report - `_ in the new optional rule + `__ in the new optional rule ``build_biomass_transport_costs``. Biomass transport can be activated with the setting ``sector: biomass_transport: true``. @@ -1361,7 +1813,7 @@ Please note that the data bundle has also been updated. * The myopic option can now be used together with different clustering for the generators and the network. The existing renewable capacities are split evenly - among the regions in every country [`#144 `_]. + among the regions in every country [`#144 `__]. * Add optional function to use ``geopy`` to locate entries of the Hotmaps database of industrial sites with missing location based on city and country, @@ -1427,7 +1879,7 @@ Please note that the data bundle has also been updated. * Consistent use of ``__main__`` block and further unspecific code cleaning. -* Updated data bundle and moved data bundle to zenodo.org (`10.5281/zenodo.5546517 `_). +* Updated data bundle and moved data bundle to zenodo.org (`10.5281/zenodo.5546517 `__). **Bugfixes and Compatibility** @@ -1451,7 +1903,7 @@ PyPSA-Eur-Sec 0.5.0 (21st May 2021) This release includes improvements to the cost database for building retrofits, carbon budget management and wildcard settings, as well as an important bugfix for the emissions from land transport. -This release is known to work with `PyPSA-Eur `_ Version 0.3.0 and `Technology Data `_ Version 0.2.0. +This release is known to work with `PyPSA-Eur `__ Version 0.3.0 and `Technology Data `__ Version 0.2.0. Please note that the data bundle has also been updated. @@ -1472,15 +1924,15 @@ PyPSA-Eur-Sec 0.4.0 (11th December 2020) This release includes a more accurate nodal disaggregation of industry demand within each country, fixes to CHP and CCS representations, as well as changes to some configuration settings. -It has been released to coincide with `PyPSA-Eur `_ Version 0.3.0 and `Technology Data `_ Version 0.2.0, and is known to work with these releases. +It has been released to coincide with `PyPSA-Eur `__ Version 0.3.0 and `Technology Data `__ Version 0.2.0, and is known to work with these releases. New features: -* The `Hotmaps Industrial Database `_ is used to disaggregate the industrial demand spatially to the nodes inside each country (previously it was distributed by population density). +* The `Hotmaps Industrial Database `__ is used to disaggregate the industrial demand spatially to the nodes inside each country (previously it was distributed by population density). * Electricity demand from industry is now separated from the regular electricity demand and distributed according to the industry demand. Only the remaining regular electricity demand for households and services is distributed according to GDP and population. -* A cost database for the retrofitting of the thermal envelope of residential and services buildings has been integrated, as well as endogenous optimisation of the level of retrofitting. This is described in the paper `Mitigating heat demand peaks in buildings in a highly renewable European energy system `_. Retrofitting can be activated both exogenously and endogenously from the ``config.yaml``. -* The biomass and gas combined heat and power (CHP) parameters ``c_v`` and ``c_b`` were read in assuming they were extraction plants rather than back pressure plants. The data is now corrected in `Technology Data `_ Version 0.2.0 to the correct DEA back pressure assumptions and they are now implemented as single links with a fixed ratio of electricity to heat output (even as extraction plants, they were always sitting on the backpressure line in simulations, so there was no point in modelling the full heat-electricity feasibility polygon). The old assumptions underestimated the heat output. -* The Danish Energy Agency released `new assumptions for carbon capture `_ in October 2020, which have now been incorporated in PyPSA-Eur-Sec, including direct air capture (DAC) and post-combustion capture on CHPs, cement kilns and other industrial facilities. The electricity and heat demand for DAC is modelled for each node (with heat coming from district heating), but currently the electricity and heat demand for industrial capture is not modelled very cleanly (for process heat, 10% of the energy is assumed to go to carbon capture) - a new issue will be opened on this. +* A cost database for the retrofitting of the thermal envelope of residential and services buildings has been integrated, as well as endogenous optimisation of the level of retrofitting. This is described in the paper `Mitigating heat demand peaks in buildings in a highly renewable European energy system `__. Retrofitting can be activated both exogenously and endogenously from the ``config.yaml``. +* The biomass and gas combined heat and power (CHP) parameters ``c_v`` and ``c_b`` were read in assuming they were extraction plants rather than back pressure plants. The data is now corrected in `Technology Data `__ Version 0.2.0 to the correct DEA back pressure assumptions and they are now implemented as single links with a fixed ratio of electricity to heat output (even as extraction plants, they were always sitting on the backpressure line in simulations, so there was no point in modelling the full heat-electricity feasibility polygon). The old assumptions underestimated the heat output. +* The Danish Energy Agency released `new assumptions for carbon capture `__ in October 2020, which have now been incorporated in PyPSA-Eur-Sec, including direct air capture (DAC) and post-combustion capture on CHPs, cement kilns and other industrial facilities. The electricity and heat demand for DAC is modelled for each node (with heat coming from district heating), but currently the electricity and heat demand for industrial capture is not modelled very cleanly (for process heat, 10% of the energy is assumed to go to carbon capture) - a new issue will be opened on this. * Land transport is separated by energy carrier (fossil, hydrogen fuel cell electric vehicle, and electric vehicle), but still needs to be separated into heavy and light vehicles (the data is there, just not the code yet). * For assumptions that change with the investment year, there is a new time-dependent format in the ``config.yaml`` using a dictionary with keys for each year. Implemented examples include the CO2 budget, exogenous retrofitting share and land transport energy carrier; more parameters will be dynamised like this in future. * Some assumptions have been moved out of the code and into the ``config.yaml``, including the carbon sequestration potential and cost, the heat pump sink temperature, reductions in demand for high value chemicals, and some BEV DSM parameters and transport efficiencies. @@ -1503,7 +1955,7 @@ New features: * The script ``build_industrial_production_per_country_tomorrow.py`` determines the future industrial production of materials based on today's levels as well as assumed recycling and demand change measures. * The energy demand for each industry sector and each location in 2015 is also calculated, so that it can be later incorporated in the pathway optimization. * Ammonia production data is taken from the USGS and deducted from JRC-IDEES's "basic chemicals" so that it ammonia can be handled separately from the others (olefins, aromatics and chlorine). -* Solid biomass is no longer allowed to be used for process heat in cement and basic chemicals, since the wastes and residues cannot be guaranteed to reach the high temperatures required. Instead, solid biomass is used in the paper and pulp as well as food, beverages and tobacco industries, where required temperatures are lower (see `DOI:10.1002/er.3436 `_ and `DOI:10.1007/s12053-017-9571-y `_). +* Solid biomass is no longer allowed to be used for process heat in cement and basic chemicals, since the wastes and residues cannot be guaranteed to reach the high temperatures required. Instead, solid biomass is used in the paper and pulp as well as food, beverages and tobacco industries, where required temperatures are lower (see `DOI:10.1002/er.3436 `__ and `DOI:10.1007/s12053-017-9571-y `__). * National installable potentials for salt caverns are now applied. * When electricity distribution grids are activated, new industry electricity demand, resistive heaters and micro-CHPs are now connected to the lower voltage levels. * Gas distribution grid costs are included for gas boilers and micro-CHPs. @@ -1515,15 +1967,15 @@ New features: PyPSA-Eur-Sec 0.2.0 (21st August 2020) -------------------------------------- -This release introduces pathway optimization over many years (e.g. 2020, 2030, 2040, 2050) with myopic foresight, as well as outsourcing the technology assumptions to the `technology-data `_ repository. +This release introduces pathway optimization over many years (e.g. 2020, 2030, 2040, 2050) with myopic foresight, as well as outsourcing the technology assumptions to the `technology-data `__ repository. It is known to work with PyPSA-Eur v0.1.0 (commit bb3477cd69), PyPSA v0.17.1 and technology-data v0.1.0. New features: -* Option for pathway optimization with myopic foresight, based on the paper `Early decarbonisation of the European Energy system pays off (2020) `_. Investments are optimized sequentially for multiple years (e.g. 2020, 2030, 2040, 2050) taking account of existing assets built in previous years and their lifetimes. The script uses data on the existing assets for electricity and building heating technologies, but there are no assumptions yet for existing transport and industry (if you include these, the model will greenfield them). There are also some `outstanding issues `_ on e.g. the distribution of existing wind, solar and heating technologies within each country. To use myopic foresight, set ``foresight : 'myopic'`` in the ``config.yaml`` instead of the default ``foresight : 'overnight'``. An example configuration can be found in ``config.myopic.yaml``. More details on the implementation can be found in :doc:`myopic`. +* Option for pathway optimization with myopic foresight, based on the paper `Early decarbonisation of the European Energy system pays off (2020) `__. Investments are optimized sequentially for multiple years (e.g. 2020, 2030, 2040, 2050) taking account of existing assets built in previous years and their lifetimes. The script uses data on the existing assets for electricity and building heating technologies, but there are no assumptions yet for existing transport and industry (if you include these, the model will greenfield them). There are also some `outstanding issues `__ on e.g. the distribution of existing wind, solar and heating technologies within each country. To use myopic foresight, set ``foresight : 'myopic'`` in the ``config.yaml`` instead of the default ``foresight : 'overnight'``. An example configuration can be found in ``config.myopic.yaml``. More details on the implementation can be found in :doc:`myopic`. -* Technology assumptions (costs, efficiencies, etc.) are no longer stored in the repository. Instead, you have to install the `technology-data `_ database in a parallel directory. These assumptions are largely based on the `Danish Energy Agency Technology Data `_. More details on the installation can be found in :doc:`installation`. +* Technology assumptions (costs, efficiencies, etc.) are no longer stored in the repository. Instead, you have to install the `technology-data `__ database in a parallel directory. These assumptions are largely based on the `Danish Energy Agency Technology Data `__. More details on the installation can be found in :doc:`installation`. * Logs and benchmarks are now stored with the other model outputs in ``results/run-name/``. @@ -1546,7 +1998,7 @@ It is known to work with PyPSA-Eur v0.1.0 (commit bb3477cd69) and PyPSA v0.17.0. We are making this release since in version 0.2.0 we will introduce changes to allow myopic investment planning that will require minor changes for users of the overnight investment planning. PyPSA-Eur-Sec builds on the electricity generation and transmission -model `PyPSA-Eur `_ to add demand +model `PyPSA-Eur `__ to add demand and supply for the following sectors: transport, space and water heating, biomass, industry and industrial feedstocks. This completes the energy system and includes all greenhouse gas emitters except @@ -1555,17 +2007,17 @@ waste management, agriculture, forestry and land use. PyPSA-Eur-Sec was initially based on the model PyPSA-Eur-Sec-30 (Version 0.0.1 below) described in the paper `Synergies of sector coupling and transmission reinforcement in a cost-optimised, highly renewable European energy -system `_ (2018) but it differs by +system `__ (2018) but it differs by being based on the higher resolution electricity transmission model -`PyPSA-Eur `_ rather than a +`PyPSA-Eur `__ rather than a one-node-per-country model, and by including biomass, industry, industrial feedstocks, aviation, shipping, better carbon management, carbon capture and usage/sequestration, and gas networks. PyPSA-Eur-Sec includes PyPSA-Eur as a -`snakemake `_ -`subworkflow `_. PyPSA-Eur-Sec +`snakemake `__ +`subworkflow `__. PyPSA-Eur-Sec uses PyPSA-Eur to build the clustered transmission model along with wind, solar PV and hydroelectricity potentials and time series. Then PyPSA-Eur-Sec adds other conventional generators, storage units and @@ -1580,13 +2032,13 @@ PyPSA-Eur-Sec 0.0.2 (4th September 2020) This version, also called PyPSA-Eur-Sec-30-Path, built on PyPSA-Eur-Sec 0.0.1 (also called PyPSA-Eur-Sec-30) to include myopic pathway optimisation for the paper `Early decarbonisation of the -European energy system pays off `_ +European energy system pays off `__ (2020). The myopic pathway optimisation was then merged into the main PyPSA-Eur-Sec codebase in Version 0.2.0 above. This model has `its own github repository -`_ and is `archived -on Zenodo `_. +`__ and is `archived +on Zenodo `__. @@ -1597,12 +2049,12 @@ This is the first published version of PyPSA-Eur-Sec, also called PyPSA-Eur-Sec-30. It was first used in the research paper `Synergies of sector coupling and transmission reinforcement in a cost-optimised, highly renewable European energy system -`_ (2018). The model covers 30 +`__ (2018). The model covers 30 European countries with one node per country. It includes demand and supply for electricity, space and water heating in buildings, and land transport. -It is `archived on Zenodo `_. +It is `archived on Zenodo `__. Release Process @@ -1625,6 +2077,6 @@ Release Process * Tag a release on Github via ``git tag v0.x.x``, ``git push``, ``git push --tags``. Include release notes in the tag message. -* Make a `GitHub release `_, which automatically triggers archiving to the `zenodo code repository `_ with `MIT license `_. +* Make a `GitHub release `__, which automatically triggers archiving to the `zenodo code repository `__ with `MIT license `__. -* Send announcement on the `PyPSA mailing list `_. +* Send announcement on the `PyPSA mailing list `__. diff --git a/doc/requirements.txt b/doc/requirements.txt index 3e760c81..a1cd0a5c 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2019-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2019-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/doc/retrieve.rst b/doc/retrieve.rst index 06a07441..8d9c08da 100644 --- a/doc/retrieve.rst +++ b/doc/retrieve.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -25,12 +25,12 @@ Rule ``retrieve_cutout`` .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.6382570.svg :target: https://doi.org/10.5281/zenodo.6382570 -Cutouts are spatio-temporal subsets of the European weather data from the `ECMWF ERA5 `_ reanalysis dataset and the `CMSAF SARAH-2 `_ solar surface radiation dataset for the year 2013. -They have been prepared by and are for use with the `atlite `_ tool. You can either generate them yourself using the ``build_cutouts`` rule or retrieve them directly from `zenodo `__ through the rule ``retrieve_cutout``. +Cutouts are spatio-temporal subsets of the European weather data from the `ECMWF ERA5 `__ reanalysis dataset and the `CMSAF SARAH-2 `__ solar surface radiation dataset for the year 2013. +They have been prepared by and are for use with the `atlite `__ tool. You can either generate them yourself using the ``build_cutouts`` rule or retrieve them directly from `zenodo `__ through the rule ``retrieve_cutout``. The :ref:`tutorial` uses a smaller cutout than required for the full model (30 MB), which is also automatically downloaded. .. note:: - To download cutouts yourself from the `ECMWF ERA5 `_ you need to `set up the CDS API `_. + To download cutouts yourself from the `ECMWF ERA5 `__ you need to `set up the CDS API `__. **Relevant Settings** @@ -47,10 +47,10 @@ The :ref:`tutorial` uses a smaller cutout than required for the full model (30 M **Outputs** -- ``cutouts/{cutout}``: weather data from either the `ERA5 `_ reanalysis weather dataset or `SARAH-2 `_ satellite-based historic weather data. +- ``cutouts/{cutout}``: weather data from either the `ERA5 `__ reanalysis weather dataset or `SARAH-2 `__ satellite-based historic weather data. .. seealso:: - For details see :mod:`build_cutout` and read the `atlite documentation `_. + For details see :mod:`build_cutout` and read the `atlite documentation `__. Rule ``retrieve_natura_raster`` @@ -59,7 +59,7 @@ Rule ``retrieve_natura_raster`` .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4706686.svg :target: https://doi.org/10.5281/zenodo.4706686 -This rule, as a substitute for :mod:`build_natura_raster`, downloads an already rasterized version (`natura.tiff `_) of `Natura 2000 `_ natural protection areas to reduce computation times. The file is placed into the ``resources`` sub-directory. +This rule, as a substitute for :mod:`build_natura_raster`, downloads an already rasterized version (`natura.tiff `__) of `Natura 2000 `__ natural protection areas to reduce computation times. The file is placed into the ``resources`` sub-directory. **Relevant Settings** @@ -74,7 +74,7 @@ This rule, as a substitute for :mod:`build_natura_raster`, downloads an already **Outputs** -- ``resources/natura.tiff``: Rasterized version of `Natura 2000 `_ natural protection areas to reduce computation times. +- ``resources/natura.tiff``: Rasterized version of `Natura 2000 `__ natural protection areas to reduce computation times. .. seealso:: For details see :mod:`build_natura_raster`. @@ -83,7 +83,7 @@ This rule, as a substitute for :mod:`build_natura_raster`, downloads an already Rule ``retrieve_electricity_demand`` ==================================== -This rule downloads hourly electric load data for each country from the `OPSD platform `_. +This rule downloads hourly electric load data for each country from the `OPSD platform `__. **Relevant Settings** @@ -91,13 +91,13 @@ None. **Outputs** -- ``resources/load_raw.csv`` +- ``data/electricity_demand_raw.csv`` Rule ``retrieve_cost_data`` ================================ -This rule downloads techno-economic assumptions from the `technology-data repository `_. +This rule downloads techno-economic assumptions from the `technology-data repository `__. **Relevant Settings** @@ -118,15 +118,10 @@ This rule downloads techno-economic assumptions from the `technology-data reposi - ``resources/costs.csv`` -Rule ``retrieve_irena`` -================================ - -.. automodule:: retrieve_irena - Rule ``retrieve_ship_raster`` ================================ -This rule downloads data on global shipping traffic density from the `World Bank Data Catalogue `_. +This rule downloads data on global shipping traffic density from the `World Bank Data Catalogue `__. **Relevant Settings** diff --git a/doc/sector.rst b/doc/sector.rst index 303e7ed2..bdfc5386 100644 --- a/doc/sector.rst +++ b/doc/sector.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -7,8 +7,15 @@ Building Sector-Coupled Networks ########################################## -.. warning:: - This part of the documentation is under development. +The preparation process of the sector-coupled version of the PyPSA-Eur energy system model consists of a group of ``snakemake`` rules which are briefly outlined and explained in detail in the sections below. + +Not all data dependencies are shipped with the git repository. +Instead we provide separate data bundles which can be obtained +using the ``retrieve*`` rules (:ref:`data`). +Having downloaded the necessary data, + +- :mod:`add_brownfield` builds and stores the base network with all buses, HVAC lines and HVDC links, while + Rule ``add_brownfield`` ============================================================================== @@ -20,6 +27,12 @@ Rule ``add_existing_baseyear`` .. automodule:: add_existing_baseyear +Rule ``build_existing_heating_distribution`` +============================================================================== + +.. automodule:: build_existing_heating_distribution + + Rule ``build_ammonia_production`` ============================================================================== @@ -50,6 +63,11 @@ Rule ``build_energy_totals`` .. automodule:: build_energy_totals +Rule ``build_heat_totals`` +============================================================================== + +.. automodule:: build_heat_totals + Rule ``build_gas_input_locations`` ============================================================================== @@ -60,10 +78,20 @@ Rule ``build_gas_network`` .. automodule:: build_gas_network -Rule ``build_heat_demand`` +Rule ``build_daily_heat_demand`` ============================================================================== -.. automodule:: build_heat_demand +.. automodule:: build_daily_heat_demand + +Rule ``build_hourly_heat_demand`` +============================================================================== + +.. automodule:: build_hourly_heat_demand + +Rule ``build_district_heat_share`` +============================================================================== + +.. automodule:: build_district_heat_share Rule ``build_industrial_distribution_key`` ============================================================================== @@ -155,11 +183,6 @@ Rule ``cluster_gas_network`` .. automodule:: cluster_gas_network -Rule ``copy_config`` -============================================================================== - -.. automodule:: copy_config - Rule ``prepare_sector_network`` ============================================================================== diff --git a/doc/simplification.rst b/doc/simplification.rst index 21f61de7..cb048461 100644 --- a/doc/simplification.rst +++ b/doc/simplification.rst @@ -1,7 +1,7 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -12,11 +12,11 @@ Simplifying Electricity Networks The simplification ``snakemake`` rules prepare **approximations** of the full model, for which it is computationally viable to co-optimize generation, storage and transmission capacities. - :mod:`simplify_network` transforms the transmission grid to a 380 kV only equivalent network, while -- :mod:`cluster_network` uses a `k-means `_ based clustering technique to partition the network into a given number of zones and then reduce the network to a representation with one bus per zone. +- :mod:`cluster_network` uses a `k-means `__ based clustering technique to partition the network into a given number of zones and then reduce the network to a representation with one bus per zone. The simplification and clustering steps are described in detail in the paper -- Jonas Hörsch and Tom Brown. `The role of spatial scale in joint optimisations of generation and transmission for European highly renewable scenarios `_), *14th International Conference on the European Energy Market*, 2017. `arXiv:1705.07617 `_, `doi:10.1109/EEM.2017.7982024 `_. +- Jonas Hörsch and Tom Brown. `The role of spatial scale in joint optimisations of generation and transmission for European highly renewable scenarios `__), *14th International Conference on the European Energy Market*, 2017. `arXiv:1705.07617 `__, `doi:10.1109/EEM.2017.7982024 `__. After simplification and clustering of the network, additional components may be appended in the rule :mod:`add_extra_components` and the network is prepared for solving in :mod:`prepare_network`. diff --git a/doc/solving.rst b/doc/solving.rst index 21cc5c25..a8020d29 100644 --- a/doc/solving.rst +++ b/doc/solving.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/spatial_resolution.rst b/doc/spatial_resolution.rst index c6e9c3de..20158ab6 100644 --- a/doc/spatial_resolution.rst +++ b/doc/spatial_resolution.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2021-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2021-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -9,7 +9,7 @@ Spatial resolution ########################################## -The default nodal resolution of the model follows the electricity generation and transmission model `PyPSA-Eur `_, which clusters down the electricity transmission substations in each European country based on the k-means algorithm (See `cluster_network `_ for a complete explanation). This gives nodes which correspond to major load and generation centres (typically cities). +The default nodal resolution of the model follows the electricity generation and transmission model `PyPSA-Eur `__, which clusters down the electricity transmission substations in each European country based on the k-means algorithm (See `cluster_network `__ for a complete explanation). This gives nodes which correspond to major load and generation centres (typically cities). The total number of nodes for Europe is set in the ``config/config.yaml`` file under ``clusters``. The number of nodes can vary between 37, the number of independent countries / synchronous areas, and several hundred. With 200-300 nodes the model needs 100-150 GB RAM to solve with a commercial solver like Gurobi. @@ -21,7 +21,7 @@ Exemplary unsolved network clustered to 37 nodes: .. image:: ../graphics/elec_s_37.png -The total number of nodes for Europe is set in the ``config/config.yaml`` file under `clusters `_. The number of nodes can vary between 37, the number of independent countries/synchronous areas, and several hundred. With 200-300 nodes, the model needs 100-150 GB RAM to solve with a commercial solver like Gurobi. +The total number of nodes for Europe is set in the ``config/config.yaml`` file under `clusters `__. The number of nodes can vary between 37, the number of independent countries/synchronous areas, and several hundred. With 200-300 nodes, the model needs 100-150 GB RAM to solve with a commercial solver like Gurobi. Not all of the sectors are at the full nodal resolution, and some demand for some sectors is distributed to nodes using heuristics that need to be corrected. Some networks are copper-plated to reduce computational times. Here are some examples of how spatial resolution is set for different sectors in PyPSA-Eur-Sec: @@ -37,18 +37,18 @@ Here are some examples of how spatial resolution is set for different sectors in • Electricity demand in industry: Modeled as nodal, based on the location of industrial facilities from HotMaps database. • Industry demand (heat, chemicals, etc.) : Modeled as nodal, distributed in each country based on locations of industry from HotMaps database. -• Hydrogen network: Modeled as nodal (if activated in the `config `_ file). +• Hydrogen network: Modeled as nodal (if activated in the `config `__ file). -• Methane network: It can be modeled as a single node for Europe or it can be nodally resolved if activated in the `config `_. One node can be considered reasonable since future demand is expected to be low and no bottlenecks are expected. Also, the nodally resolved methane grid is based on SciGRID_gas data. +• Methane network: It can be modeled as a single node for Europe or it can be nodally resolved if activated in the `config `__. One node can be considered reasonable since future demand is expected to be low and no bottlenecks are expected. Also, the nodally resolved methane grid is based on SciGRID_gas data. -• Solid biomass: It can be modeled as a single node for Europe or it can be nodally resolved if activated in the `config `_. Nodal modeling includes modeling biomass potential per country (given per country, then distributed by population density within) and the transport of solid biomass between countries. +• Solid biomass: It can be modeled as a single node for Europe or it can be nodally resolved if activated in the `config `__. Nodal modeling includes modeling biomass potential per country (given per country, then distributed by population density within) and the transport of solid biomass between countries. -• CO2: It can be modeled as a single node for Europe or it can be nodally resolved with CO2 transport pipelines if activated in the `config `_. It should mentioned that in single node mode a transport and storage cost is added for sequestered CO2, the cost of which can be adjusted in the `config `_. +• CO2: It can be modeled as a single node for Europe or it can be nodally resolved with CO2 transport pipelines if activated in the `config `__. It should mentioned that in single node mode a transport and storage cost is added for sequestered CO2, the cost of which can be adjusted in the `config `__. • Carbonaceous fuels: Modeled as a single node for Europe by default, since transport costs for liquids are low and no bottlenecks are expected. Can be regionally resolved in configuration. **Electricity distribution network** -Contrary to the transmission grid, the grid topology at the distribution level (at and below 110 kV) is not included due to the very high computational burden. However, a link per node can be used (if activated in the `Config `_ file) to represent energy transferred between distribution and transmission levels at every node. In essence, the total energy capacity connecting the transmission grid and the low-voltage level is optimized. The cost assumptions for this link can be adjusted in Config file `options `_ , and is currently assumed to be 500 Eur/kW. +Contrary to the transmission grid, the grid topology at the distribution level (at and below 110 kV) is not included due to the very high computational burden. However, a link per node can be used (if activated in the `Config `__ file) to represent energy transferred between distribution and transmission levels at every node. In essence, the total energy capacity connecting the transmission grid and the low-voltage level is optimized. The cost assumptions for this link can be adjusted in Config file `options `__ , and is currently assumed to be 500 Eur/kW. Rooftop PV, heat pumps, resistive heater, home batteries chargers for passenger EVs, as well as individual heating technologies (heat pumps and resistive heaters) are connected to low-voltage level. All the remaining generation and storage technologies are connected to the transmission grid. In practice, this means that the distribution grid capacity is only extended if it is necessary to balance the mismatch between local generation and demand. diff --git a/doc/supply_demand.rst b/doc/supply_demand.rst index b043268b..cc598aaf 100644 --- a/doc/supply_demand.rst +++ b/doc/supply_demand.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2021-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2021-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -11,7 +11,7 @@ An initial orientation to the supply and demand options in the model PyPSA-Eur-Sec can be found in the description of the model PyPSA-Eur-Sec-30 in the paper `Synergies of sector coupling and transmission reinforcement in a cost-optimised, highly renewable -European energy system `_ (2018). +European energy system `__ (2018). The latest version of PyPSA-Eur-Sec differs by including biomass, industry, industrial feedstocks, aviation, shipping, better carbon management, carbon capture and usage/sequestration, and gas networks. @@ -26,13 +26,13 @@ Electricity supply and demand ============================= Electricity supply and demand follows the electricity generation and -transmission model `PyPSA-Eur `_, +transmission model `PyPSA-Eur `__, except that hydrogen storage is integrated into the hydrogen supply, demand and network, and PyPSA-Eur-Sec includes CHPs. Unlike PyPSA-Eur, PyPSA-Eur-Sec does not distribution electricity demand for industry according to population and GDP, but uses the geographical data from the `Hotmaps Industrial Database -`_. +`__. Also unlike PyPSA-Eur, PyPSA-Eur-Sec subtracts existing electrified heating from the existing electricity demand, so that power-to-heat can be optimised separately. @@ -44,7 +44,7 @@ Heat demand =========== Building heating in residential and services sectors is resolved regionally, both for individual buildings and district heating systems, which include different supply options (see :ref:`heat-supply`.) -Annual heat demands per country are retrieved from `JRC-IDEES `_ and split into space and water heating. For space heating, the annual demands are converted to daily values based on the population-weighted Heating Degree Day (HDD) using the `atlite tool `_, where space heat demand is proportional to the difference between the daily average ambient temperature (read from `ERA5 `_) and a threshold temperature above which space heat demand is zero. A threshold temperature of 15 °C is assumed by default. The daily space heat demand is distributed to the hours of the day following heat demand profiles from `BDEW `_. These differ for weekdays and weekends/holidays and between residential and services demand. +Annual heat demands per country are retrieved from `JRC-IDEES `__ and split into space and water heating. For space heating, the annual demands are converted to daily values based on the population-weighted Heating Degree Day (HDD) using the `atlite tool `__, where space heat demand is proportional to the difference between the daily average ambient temperature (read from `ERA5 `__) and a threshold temperature above which space heat demand is zero. A threshold temperature of 15 °C is assumed by default. The daily space heat demand is distributed to the hours of the day following heat demand profiles from `BDEW `__. These differ for weekdays and weekends/holidays and between residential and services demand. *Space heating* @@ -54,11 +54,11 @@ The space heating demand can be exogenously reduced by retrofitting measures tha :language: yaml :lines: 205 -Co-optimsing of building renovation is also possible, if it is activated in the `config file `_. +Co-optimsing of building renovation is also possible, if it is activated in the `config file `__. Renovation of the thermal envelope reduces the space heating demand and is optimised at each node for every heat bus. Renovation measures through additional insulation material and replacement of energy inefficient windows are considered. -In a first step, costs per energy savings are estimated in `build_retro_cost.py `_. They depend on the insulation condition of the building stock and costs for renovation of the building elements. In a second step, for those cost per energy savings two possible renovation strengths are determined: a moderate renovation with lower costs, a lower maximum possible space heat savings, and an ambitious renovation with associated higher costs and higher efficiency gains. They are added by step-wise linearisation in form of two additional generations in `prepare_sector_network.py `_. +In a first step, costs per energy savings are estimated in `build_retro_cost.py `__. They depend on the insulation condition of the building stock and costs for renovation of the building elements. In a second step, for those cost per energy savings two possible renovation strengths are determined: a moderate renovation with lower costs, a lower maximum possible space heat savings, and an ambitious renovation with associated higher costs and higher efficiency gains. They are added by step-wise linearisation in form of two additional generations in `prepare_sector_network.py `__. Further information are given in the publication : - `Mitigating heat demand peaks in buildings in a highly renewable European energy system, (2021) `_. +`Mitigating heat demand peaks in buildings in a highly renewable European energy system, (2021) `__. *Water heating* @@ -66,7 +66,7 @@ Hot water demand is assumed to be constant throughout the year. *Urban and rural heating* -For every country, heat demand is split between low and high population density areas. These country-level totals are then distributed to each region in proportion to their rural and urban populations respectively. Urban areas with dense heat demand can be supplied with large-scale district heating systems. The percentage of urban heat demand that can be supplied by district heating networks as well as lump-sum losses in district heating systems is exogenously determined in the `config file `_. +For every country, heat demand is split between low and high population density areas. These country-level totals are then distributed to each region in proportion to their rural and urban populations respectively. Urban areas with dense heat demand can be supplied with large-scale district heating systems. The percentage of urban heat demand that can be supplied by district heating networks as well as lump-sum losses in district heating systems is exogenously determined in the `config file `__. *Cooling demand* @@ -96,41 +96,41 @@ Different supply options are available depending on whether demand is met centra **Urban central heat** -For large-scale district heating systems the following options are available: combined heat and power (CHP) plants consuming gas or biomass from waste and residues with and without carbon capture (CC), large-scale air-sourced heat pumps, gas and oil boilers, resistive heaters, and fuel cell CHPs. Additionally, waste heat from the `Fischer-Tropsch `_ and `Sabatier `_ processes for the production of synthetic hydrocarbons can supply district heating systems. For more detailed explanation of these processes, see :ref:`Oil-based products supply` and :ref:`Methane supply`. +For large-scale district heating systems the following options are available: combined heat and power (CHP) plants consuming gas or biomass from waste and residues with and without carbon capture (CC), large-scale air-sourced heat pumps, gas and oil boilers, resistive heaters, and fuel cell CHPs. Additionally, waste heat from the `Fischer-Tropsch `__ and `Sabatier `__ processes for the production of synthetic hydrocarbons can supply district heating systems. For more detailed explanation of these processes, see :ref:`Oil-based products supply` and :ref:`Methane supply`. **Residential and Urban decentral heat** Supply options in individual buildings include gas and oil boilers, air- and ground-sourced heat pumps, resistive heaters, and solar thermal collectors. -Ground-source heat pumps are only allowed in rural areas because of space constraints. Thus, only air- source heat pumps are allowed in urban areas. This is a conservative assumption, since there are many possible sources of low-temperature heat that could be tapped in cities (e.g. waste water, ground water, or natural bodies of water). Costs, lifetimes and efficiencies for these technologies are retrieved from the `technology-data repository `_. +Ground-source heat pumps are only allowed in rural areas because of space constraints. Thus, only air- source heat pumps are allowed in urban areas. This is a conservative assumption, since there are many possible sources of low-temperature heat that could be tapped in cities (e.g. waste water, ground water, or natural bodies of water). Costs, lifetimes and efficiencies for these technologies are retrieved from the `technology-data repository `__. -Below are more detailed explanations for each heating supply component, all of which are modelled as `links `_ in PyPSA-Eur-Sec. +Below are more detailed explanations for each heating supply component, all of which are modelled as `links `__ in PyPSA-Eur-Sec. .. _Large-scale CHP: **Large-scale CHP** -Large Combined Heat and Power plants are included in the model if it is specified in the `config file `_. +Large Combined Heat and Power plants are included in the model if it is specified in the `config file `__. -CHPs are based on back pressure plants operating with a fixed ratio of electricity to heat output. The efficiencies of each are given on the back pressure line, where the back pressure coefficient cb is the electricity output divided by the heat output. (For a more complete explanation of the operation of CHPs refer to the study by Dahl et al. : `Cost sensitivity of optimal sector-coupled district heating production systems `_. +CHPs are based on back pressure plants operating with a fixed ratio of electricity to heat output. The efficiencies of each are given on the back pressure line, where the back pressure coefficient cb is the electricity output divided by the heat output. (For a more complete explanation of the operation of CHPs refer to the study by Dahl et al. : `Cost sensitivity of optimal sector-coupled district heating production systems `__. PyPSA-Eur-Sec includes CHP plants fueled by methane and solid biomass from waste and residues. Hydrogen fuel cells also produce both electricity and heat. -The methane CHP is modeled on the Danish Energy Agency (DEA) “Gas turbine simple cycle (large)” while the solid biomass CHP is based on the DEA’s “09b Wood Pellets Medium”. For biomass CHP, cb = `0.46 `_ , whereas for gas CHP, cb = `1 `_. +The methane CHP is modeled on the Danish Energy Agency (DEA) “Gas turbine simple cycle (large)” while the solid biomass CHP is based on the DEA’s “09b Wood Pellets Medium”. For biomass CHP, cb = `0.46 `__ , whereas for gas CHP, cb = `1 `__. -NB: The old PyPSA-Eur-Sec-30 model assumed an extraction plant (like the DEA coal CHP) for gas which has flexible production of heat and electricity within the feasibility diagram of Figure 4 in the study by `Brown et al. `_ We have switched to the DEA back pressure plants since these are more common for smaller plants for biomass, and because the extraction plants were on the back pressure line for 99.5% of the time anyway. The plants were all changed to back pressure in PyPSA-Eur-Sec v0.4.0. +NB: The old PyPSA-Eur-Sec-30 model assumed an extraction plant (like the DEA coal CHP) for gas which has flexible production of heat and electricity within the feasibility diagram of Figure 4 in the study by `Brown et al. `__ We have switched to the DEA back pressure plants since these are more common for smaller plants for biomass, and because the extraction plants were on the back pressure line for 99.5% of the time anyway. The plants were all changed to back pressure in PyPSA-Eur-Sec v0.4.0. **Micro-CHP** -PyPSA-Eur-Sec allows individual buildings to make use of `micro gas CHPs `_ that are assumed to be installed at the distribution grid level. +PyPSA-Eur-Sec allows individual buildings to make use of `micro gas CHPs `__ that are assumed to be installed at the distribution grid level. **Heat pumps** -The coefficient of performance (COP) of air- and ground-sourced heat pumps depends on the ambient or soil temperature respectively. Hence, the COP is a time-varying parameter (refer to `Config `_ file). Generally, the COP will be lower during winter when temperatures are low. Because the ambient temperature is more volatile than the soil temperature, the COP of ground-sourced heat pumps is less variable. Moreover, the COP depends on the difference between the source and sink temperatures: +The coefficient of performance (COP) of air- and ground-sourced heat pumps depends on the ambient or soil temperature respectively. Hence, the COP is a time-varying parameter (refer to `Config `__ file). Generally, the COP will be lower during winter when temperatures are low. Because the ambient temperature is more volatile than the soil temperature, the COP of ground-sourced heat pumps is less variable. Moreover, the COP depends on the difference between the source and sink temperatures: .. math:: \Delta T = T_{sink} − T_{source} -For the sink water temperature Tsink we assume 55 °C [`Config `_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 `_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. `_. For air-sourced heat pumps (ASHP), we use the function: +For the sink water temperature Tsink we assume 55 °C [`Config `__ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 `__ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. `__. For air-sourced heat pumps (ASHP), we use the function: .. math:: COP (\Delta T) = 6.81 - 0.121\Delta T + 0.000630\Delta T^2 @@ -142,44 +142,44 @@ for ground-sourced heat pumps (GSHP), we use the function: **Resistive heaters** -Can be activated in Config from the `boilers `_ option. -Resistive heaters produce heat with a fixed conversion efficiency (refer to `Technology-data repository `_ ). +Can be activated in Config from the `boilers `__ option. +Resistive heaters produce heat with a fixed conversion efficiency (refer to `Technology-data repository `__ ). **Gas, oil, and biomass boilers** -Can be activated in Config from the `boilers `_ , `oil boilers `_ , and `biomass boiler `_ option. +Can be activated in Config from the `boilers `__ , `oil boilers `__ , and `biomass boiler `__ option. Similar to resistive heaters, boilers have a fixed efficiency and produce heat using gas, oil or biomass. **Solar thermal collectors** -Can be activated in the config file from the `solar_thermal `_ option. -Solar thermal profiles are built based on weather data and also have the `options `_ for setting the sky model and the orientation of the panel in the config file, which are then used by the atlite tool to calculate the solar resource time series. +Can be activated in the config file from the `solar_thermal `__ option. +Solar thermal profiles are built based on weather data and also have the `options `__ for setting the sky model and the orientation of the panel in the config file, which are then used by the atlite tool to calculate the solar resource time series. **Waste heat from Fuel Cells, Methanation and Fischer-Tropsch plants** -Waste heat from `fuel cells `_ in addition to processes like `Fischer-Tropsch `_, methanation, and Direct Air Capture (DAC) is dumped into district heating networks. +Waste heat from `fuel cells `__ in addition to processes like `Fischer-Tropsch `__, methanation, and Direct Air Capture (DAC) is dumped into district heating networks. **Existing heating capacities and decommissioning** -For the myopic transition paths, capacities already existing for technologies supplying heat are retrieved from `“Mapping and analyses of the current and future (2020 - 2030)” `_ . For the sake of simplicity, coal, oil and gas boiler capacities are assimilated to gas boilers. Besides that, existing capacities for heat resistors, air-sourced and ground-sourced heat pumps are included in the model. For heating capacities, 25% of existing capacities in 2015 are assumed to be decommissioned in every 5-year time step after 2020. +For the myopic transition paths, capacities already existing for technologies supplying heat are retrieved from `“Mapping and analyses of the current and future (2020 - 2030)” `__ . For the sake of simplicity, coal, oil and gas boiler capacities are assimilated to gas boilers. Besides that, existing capacities for heat resistors, air-sourced and ground-sourced heat pumps are included in the model. For heating capacities, 25% of existing capacities in 2015 are assumed to be decommissioned in every 5-year time step after 2020. **Thermal Energy Storage** -Activated in Config from the `tes `_ option. +Activated in Config from the `tes `__ option. -Thermal energy can be stored in large water pits associated with district heating systems and individual thermal energy storage (TES), i.e., small water tanks. Water tanks are modelled as `stores `_. -A thermal energy density of 46.8 kWh :math:`_{th}`/m3 is assumed, corresponding to a temperature difference of 40 K. The decay of thermal energy in the stores: 1- :math:`e^{-1/24τ}` is assumed to have a time constant of  τ=180 days for central TES and  τ=3 days for individual TES, both modifiable through `tes_tau `_ in config file. Charging and discharging efficiencies are 90% due to pipe losses. +Thermal energy can be stored in large water pits associated with district heating systems and individual thermal energy storage (TES), i.e., small water tanks. Water tanks are modelled as `stores `__. +A thermal energy density of 46.8 kWh :math:`_{th}`/m3 is assumed, corresponding to a temperature difference of 40 K. The decay of thermal energy in the stores: 1- :math:`e^{-1/24τ}` is assumed to have a time constant of  τ=180 days for central TES and  τ=3 days for individual TES, both modifiable through `tes_tau `__ in config file. Charging and discharging efficiencies are 90% due to pipe losses. **Retrofitting of the thermal envelope of buildings** -Co-optimising building renovation is only enabled if in the `config `_ file. To reduce the computational burden, +Co-optimising building renovation is only enabled if in the `config `__ file. To reduce the computational burden, default setting is set as false. Renovation of the thermal envelope reduces the space heating demand and is optimised at each node for every heat bus. Renovation measures through additional insulation material and replacement of energy inefficient windows are considered. -In a first step, costs per energy savings are estimated in the `build_retro_cost.py `_ script. +In a first step, costs per energy savings are estimated in the `build_retro_cost.py `__ script. They depend on the insulation condition of the building stock and costs for renovation of the building elements. In a second step, for those cost per energy savings two possible renovation @@ -187,12 +187,12 @@ strengths are determined: a moderate renovation with lower costs and lower maximum possible space heat savings, and an ambitious renovation with associated higher costs and higher efficiency gains. They are added by step-wise linearisation in form of two additional generations in -the `prepare_sector_network.py `_ script. +the `prepare_sector_network.py `__ script. Settings in the ``config/config.yaml`` concerning the endogenously optimisation of building -renovation include `cost factor `_, `interest rate `_, `annualised cost `_, `tax weighting `_, and `construction index `_. +renovation include `cost factor `__, `interest rate `__, `annualised cost `__, `tax weighting `__, and `construction index `__. -Further information are given in the study by Zeyen et al. : `Mitigating heat demand peaks in buildings in a highly renewable European energy system, (2021) `_. +Further information are given in the study by Zeyen et al. : `Mitigating heat demand peaks in buildings in a highly renewable European energy system, (2021) `__. .. _Hydrogen demand: @@ -200,7 +200,7 @@ Hydrogen demand ============================= Hydrogen is consumed in the industry sector (see :ref:`Industry demand`) to produce ammonia (see :ref:`Chemicals Industry`) and direct reduced iron (DRI) (see :ref:`Iron and Steel`). Hydrogen is also consumed to produce synthetic methane (see :ref:`Methane supply`) and liquid hydrocarbons (see :ref:`Oil-based products supply`) which have multiple uses in industry and other sectors. -Hydrogen is also used for transport applications (see :ref:`Transportation`), where it is exogenously fixed. It is used in `heavy-duty land transport `_ and as liquified hydrogen in the shipping sector (see :ref:`Shipping`). Furthermore, stationary fuel cells may re-electrify hydrogen (with waste heat as a byproduct) to balance renewable fluctuations (see :ref:`Electricity supply and demand`). The waste heat from the stationary fuel cells can be used in `district-heating systems `_. +Hydrogen is also used for transport applications (see :ref:`Transportation`), where it is exogenously fixed. It is used in `heavy-duty land transport `__ and as liquified hydrogen in the shipping sector (see :ref:`Shipping`). Furthermore, stationary fuel cells may re-electrify hydrogen (with waste heat as a byproduct) to balance renewable fluctuations (see :ref:`Electricity supply and demand`). The waste heat from the stationary fuel cells can be used in `district-heating systems `__. .. _Hydrogen supply: @@ -220,7 +220,7 @@ combined with a water-gas shift reaction CO + H_2O \xrightarrow{} CO_2 + H_2 -SMR is included `here `_. +SMR is included `here `__. PyPSA-Eur-Sec allows this route of :math:`H_2` production with and without [carbon capture (CC)] (see :ref:`Carbon dioxide capture, usage and sequestration (CCU/S)`). These routes are often referred to as blue and grey hydrogen. Here, methane input can be both of fossil or synthetic origin. Green hydrogen can be produced by electrolysis to split water into hydrogen and oxygen @@ -234,12 +234,12 @@ For the electrolysis, alkaline electrolysers are chosen since they have lower co **Transport** -Hydrogen is transported by pipelines. :math:`H_2` pipelines are endogenously generated, either via a greenfield :math:`H_2` network, or by `retrofitting natural gas pipelines `_). Retrofitting is implemented in such a way that for every unit of decommissioned gas pipeline, a share (60% is used in the study by `Neumann et al. `_) of its nominal capacity (exogenously determined in the `config file `_.) is available for hydrogen transport. When the gas network is not resolved, this input denotes the potential for gas pipelines repurposed into hydrogen pipelines. +Hydrogen is transported by pipelines. :math:`H_2` pipelines are endogenously generated, either via a greenfield :math:`H_2` network, or by `retrofitting natural gas pipelines `__). Retrofitting is implemented in such a way that for every unit of decommissioned gas pipeline, a share (60% is used in the study by `Neumann et al. `__) of its nominal capacity (exogenously determined in the `config file `__.) is available for hydrogen transport. When the gas network is not resolved, this input denotes the potential for gas pipelines repurposed into hydrogen pipelines. New pipelines can be built additionally on all routes where there currently is a gas or electricity network connection. These new pipelines will be built where no sufficient retrofitting options are available. The capacities of new and repurposed pipelines are a result of the optimisation. **Storage** -Hydrogen can be stored in overground steel tanks or `underground salt caverns `_. For the latter, energy storage capacities in every country are limited to the potential estimation for onshore salt caverns within `50 km `_ of shore to avoid environmental issues associated with brine solution disposal. Underground storage potentials for hydrogen in European salt caverns is acquired from `Caglayan et al. `_ +Hydrogen can be stored in overground steel tanks or `underground salt caverns `__. For the latter, energy storage capacities in every country are limited to the potential estimation for onshore salt caverns within `50 km `__ of shore to avoid environmental issues associated with brine solution disposal. Underground storage potentials for hydrogen in European salt caverns is acquired from `Caglayan et al. `__ .. _Methane demand: @@ -253,7 +253,7 @@ Methane is used in individual and large-scale gas boilers, in CHP plants with an Methane supply =================================== -In addition to methane from fossil origins, the model also considers biogenic and synthetic sources. `The gas network can either be modelled, or it can be assumed that gas transport is not limited `_. If gas infrastructure is regionally resolved, fossil gas can enter the system only at existing and planned LNG terminals, pipeline entry-points, and intra- European gas extraction sites, which are retrieved from the SciGRID Gas IGGIELGN dataset and the GEM Wiki. +In addition to methane from fossil origins, the model also considers biogenic and synthetic sources. `The gas network can either be modelled, or it can be assumed that gas transport is not limited `__. If gas infrastructure is regionally resolved, fossil gas can enter the system only at existing and planned LNG terminals, pipeline entry-points, and intra- European gas extraction sites, which are retrieved from the SciGRID Gas IGGIELGN dataset and the GEM Wiki. Biogas can be upgraded to methane. Synthetic methane can be produced by processing hydrogen and captures :math:`CO_2` in the Sabatier reaction @@ -275,7 +275,7 @@ The following figure shows the unclustered European gas transmission network bas Biomass Supply ===================== -Biomass supply potentials for each European country are taken from the `JRC ENSPRESO database `_ where data is available for various years (2010, 2020, 2030, 2040 and 2050) and scenarios (low, medium, high). No biomass import from outside Europe is assumed. More information on the data set can be found `here `_. +Biomass supply potentials for each European country are taken from the `JRC ENSPRESO database `__ where data is available for various years (2010, 2020, 2030, 2040 and 2050) and scenarios (low, medium, high). No biomass import from outside Europe is assumed. More information on the data set can be found `here `__. .. _Biomass demand: @@ -283,19 +283,19 @@ Biomass demand ===================== -Biomass supply potentials for every NUTS2 region are taken from the `JRC ENSPRESO database `_ where data is available for various years (2010, 2020, 2030, 2040 and 2050) and different availability scenarios (low, medium, high). No biomass import from outside Europe is assumed. More information on the data set can be found `here `_. The data for NUTS2 regions is mapped to PyPSA-Eur-Sec model regions in proportion to the area overlap. +Biomass supply potentials for every NUTS2 region are taken from the `JRC ENSPRESO database `__ where data is available for various years (2010, 2020, 2030, 2040 and 2050) and different availability scenarios (low, medium, high). No biomass import from outside Europe is assumed. More information on the data set can be found `here `__. The data for NUTS2 regions is mapped to PyPSA-Eur-Sec model regions in proportion to the area overlap. -The desired scenario can be selected in the PyPSA-Eur-Sec `configuration `_. The script for building the biomass potentials from the JRC ENSPRESO data base is located `here `_. Consult the script to see the keywords that specify the scenario options. +The desired scenario can be selected in the PyPSA-Eur-Sec `configuration `__. The script for building the biomass potentials from the JRC ENSPRESO data base is located `here `__. Consult the script to see the keywords that specify the scenario options. -The `configuration `_ also allows the user to define how the various types of biomass are used in the model by using the following categories: biogas, solid biomass, and not included. Feedstocks categorized as biogas, typically manure and sludge waste, are available to the model as biogas, which can be upgraded to biomethane. Feedstocks categorized as solid biomass, e.g. secondary forest residues or municipal waste, are available for combustion in combined-heat-and power (CHP) plants and for medium temperature heat (below 500 °C) applications in industry. It can also converted to gas or liquid fuels. +The `configuration `__ also allows the user to define how the various types of biomass are used in the model by using the following categories: biogas, solid biomass, and not included. Feedstocks categorized as biogas, typically manure and sludge waste, are available to the model as biogas, which can be upgraded to biomethane. Feedstocks categorized as solid biomass, e.g. secondary forest residues or municipal waste, are available for combustion in combined-heat-and power (CHP) plants and for medium temperature heat (below 500 °C) applications in industry. It can also converted to gas or liquid fuels. Feedstocks labeled as not included are ignored by the model. -A `typical use case for biomass `_ would be the medium availability scenario for 2030 where only residues from agriculture and forestry as well as biodegradable municipal waste are considered as energy feedstocks. Fuel crops are avoided because they compete with scarce land for food production, while primary wood, as well as wood chips and pellets, are avoided because of concerns about sustainability. See the supporting materials of the `paper `_ for more details. +A `typical use case for biomass `__ would be the medium availability scenario for 2030 where only residues from agriculture and forestry as well as biodegradable municipal waste are considered as energy feedstocks. Fuel crops are avoided because they compete with scarce land for food production, while primary wood, as well as wood chips and pellets, are avoided because of concerns about sustainability. See the supporting materials of the `paper `__ for more details. *Solid biomass conversion and use* @@ -303,19 +303,19 @@ A `typical use case for biomass `_ would be th Solid biomass can be used directly to provide process heat up to 500˚C in the industry. It can also be burned in CHP plants and boilers associated with heating systems. These technologies are described elsewhere (see :ref:`Large-scale CHP` and :ref:`Industry demand`). -Solid biomass can be converted to syngas if the option is enabled in the `config file `_. In this case the model will enable the technology BioSNG both with and without the option for carbon capture (see `Technology-data repository `_). +Solid biomass can be converted to syngas if the option is enabled in the `config file `__. In this case the model will enable the technology BioSNG both with and without the option for carbon capture (see `Technology-data repository `__). -Liquefaction of solid biomass `can be enabled `_ allowing the model to convert it into liquid hydrocarbons that can replace conventional oil products. This technology also comes with and without carbon capture (see `Technology-data repository `_). +Liquefaction of solid biomass `can be enabled `__ allowing the model to convert it into liquid hydrocarbons that can replace conventional oil products. This technology also comes with and without carbon capture (see `Technology-data repository `__). *Transport of solid biomass* -The transport of solid biomass can either be assumed unlimited between countries or it can be associated with a country specific cost per MWh/km. In the config file these options are toggled `here `_. If the option is off, use of solid biomass is transport. If it is turned on, a biomass transport network will be `created `_ between all nodes. This network resembles road transport of biomass and the cost of transportation is a variable cost which is proportional to distance and a country specific cost per MWh/km. The latter is `estimated `_ from the country specific costs per ton/km used in the publication `“The JRC-EU-TIMES model. Bioenergy potentials for EU and neighbouring countries” `_. +The transport of solid biomass can either be assumed unlimited between countries or it can be associated with a country specific cost per MWh/km. In the config file these options are toggled `here `__. If the option is off, use of solid biomass is transport. If it is turned on, a biomass transport network will be `created `__ between all nodes. This network resembles road transport of biomass and the cost of transportation is a variable cost which is proportional to distance and a country specific cost per MWh/km. The latter is `estimated `__ from the country specific costs per ton/km used in the publication `“The JRC-EU-TIMES model. Bioenergy potentials for EU and neighbouring countries” `__. *Biogas transport and use* -Biogas will be aggregated into a common European resources if a gas network is not modelled explicitly, i.e., the `gas_network `_ option is set to false. If, on the other hand, a gas network is included, the biogas potential will be associated with each node of origin. +Biogas will be aggregated into a common European resources if a gas network is not modelled explicitly, i.e., the `gas_network `__ option is set to false. If, on the other hand, a gas network is included, the biogas potential will be associated with each node of origin. The model can only use biogas by first upgrading it to natural gas quality [see :ref:`Methane supply`] (bio methane) which is fed into the general gas network. .. _Oil-based products demand: @@ -338,7 +338,7 @@ Oil-based products can be either of fossil origin or synthetically produced by c 𝑛CO+(2𝑛+1)H_2 → C_{n}H_{2n + 2} +𝑛H_2O -with costs as included from the `technology-data repository `_. The waste heat from the Fischer-Tropsch process is supplied to `district heating networks `_. The share of fossil and synthetic oil is an optimisation result depending on the techno-economic assumptions. +with costs as included from the `technology-data repository `__. The waste heat from the Fischer-Tropsch process is supplied to `district heating networks `__. The share of fossil and synthetic oil is an optimisation result depending on the techno-economic assumptions. *Oil-based transport* @@ -361,24 +361,24 @@ The Subsection overview below provides a general description of the modelling ap Greenhouse gas emissions associated with industry can be classified into energy-related and process-related emissions. Today, fossil fuels are used for process heat energy in the chemicals industry, but also as a non-energy feedstock for chemicals like ammonia ( :math:`NH_3`), ethylene ( :math:`C_2H_4`) and methanol ( :math:`CH_3OH`). Energy-related emissions can be curbed by using low-emission energy sources. The only option to reduce process-related emissions is by using an alternative manufacturing process or by assuming a certain rate of recycling so that a lower amount of virgin material is needed. -The overarching modelling procedure can be described as follows. First, the energy demands and process emissions for every unit of material output are estimated based on data from the `JRC-IDEES database `_ and the fuel and process switching described in the subsequent sections. Second, the 2050 energy demands and process emissions are calculated using the per-unit-of-material ratios based on the industry transformations and the `country-level material production in 2015 `_, assuming constant material demand. +The overarching modelling procedure can be described as follows. First, the energy demands and process emissions for every unit of material output are estimated based on data from the `JRC-IDEES database `__ and the fuel and process switching described in the subsequent sections. Second, the 2050 energy demands and process emissions are calculated using the per-unit-of-material ratios based on the industry transformations and the `country-level material production in 2015 `__, assuming constant material demand. -Missing or too coarsely aggregated data in the JRC-IDEES database is supplemented with additional datasets: `Eurostat energy balances `_, `United States `_, `Geological Survey `_ for ammonia production, `DECHEMA `_ for methanol and chlorine, and `national statistics from Switzerland `_. +Missing or too coarsely aggregated data in the JRC-IDEES database is supplemented with additional datasets: `Eurostat energy balances `__, `United States `__, `Geological Survey `__ for ammonia production, `DECHEMA `__ for methanol and chlorine, and `national statistics from Switzerland `__. Where there are fossil and electrified alternatives for the same process (e.g. in glass manufacture or drying), we assume that the process is completely electrified. Current electricity demands (lighting, air compressors, motor drives, fans, pumps) will remain electric. Processes that require temperatures below 500 °C are supplied with solid biomass, since we assume that residues and wastes are not suitable for high-temperature applications. We see solid biomass use primarily in the pulp and paper industry, where it is already widespread, and in food, beverages and tobacco, where it replaces natural gas. Industries which require high temperatures (above 500 °C), such as metals, chemicals and non-metallic minerals are either electrified where suitable processes already exist, or the heat is provided with synthetic methane. Hydrogen for high-temperature process heat is not part of the model currently. -Where process heat is required, our approach depends on the necessary temperature. For example, due to the high share of high-temperature process heat demand (see `Naegler et al. `_ and `Rehfeldt el al. `_), we disregard geothermal and solar thermal energy as sources for process heat since they cannot attain high-temperature heat. +Where process heat is required, our approach depends on the necessary temperature. For example, due to the high share of high-temperature process heat demand (see `Naegler et al. `__ and `Rehfeldt el al. `__), we disregard geothermal and solar thermal energy as sources for process heat since they cannot attain high-temperature heat. -The following figure shows the final consumption of energy and non-energy feedstocks in industry today in comparison to the scenario in 2050 assumed in `Neumann et al `_. +The following figure shows the final consumption of energy and non-energy feedstocks in industry today in comparison to the scenario in 2050 assumed in `Neumann et al `__. .. image:: ../graphics/fec_industry_today_tomorrow.png The following figure shows the process emissions in industry today (top bar) and in 2050 without -carbon capture (bottom bar) assumed in `Neumann et al `_. +carbon capture (bottom bar) assumed in `Neumann et al `__. @@ -386,7 +386,7 @@ carbon capture (bottom bar) assumed in `Neumann et al `_, which is illustrated in the figure below. This open database includes georeferenced industrial sites of energy-intensive industry sectors in EU28, including cement, basic chemicals, glass, iron and steel, non-ferrous metals, non-metallic minerals, paper, and refineries subsectors. The use of this spatial dataset enables the calculation of regional and process-specific energy demands. This approach assumes that there will be no significant migration of energy-intensive industries. +Inside each country the industrial demand is then distributed using the `Hotmaps Industrial Database `__, which is illustrated in the figure below. This open database includes georeferenced industrial sites of energy-intensive industry sectors in EU28, including cement, basic chemicals, glass, iron and steel, non-ferrous metals, non-metallic minerals, paper, and refineries subsectors. The use of this spatial dataset enables the calculation of regional and process-specific energy demands. This approach assumes that there will be no significant migration of energy-intensive industries. .. image:: ../graphics/hotmaps.png @@ -395,7 +395,7 @@ Inside each country the industrial demand is then distributed using the `Hotmaps **Iron and Steel** -Two alternative routes are used today to manufacture steel in Europe. The primary route (integrated steelworks) represents 60% of steel production, while the secondary route (electric arc furnaces, EAF), represents the other 40% `(Lechtenböhmer et. al) `_. +Two alternative routes are used today to manufacture steel in Europe. The primary route (integrated steelworks) represents 60% of steel production, while the secondary route (electric arc furnaces, EAF), represents the other 40% `(Lechtenböhmer et. al) `__. The primary route uses blast furnaces in which coke is used to reduce iron ore into molten iron, which is then converted into steel: @@ -415,9 +415,9 @@ The primary route uses blast furnaces in which coke is used to reduce iron ore i FeO + CO \xrightarrow{} Fe + CO_2 -The primary route of steelmaking implies large process emissions of 0.22 t :math:`_{CO_2}` /t of steel, amounting to 7% of global greenhouse gas emissions `(Vogl et. al) `_. +The primary route of steelmaking implies large process emissions of 0.22 t :math:`_{CO_2}` /t of steel, amounting to 7% of global greenhouse gas emissions `(Vogl et. al) `__. -In the secondary route, electric arc furnaces are used to melt scrap metal. This limits the :math:`CO_2` emissions to the burning of graphite electrodes `(Friedrichsen et. al) `_, and reduces process emissions to 0.03 t :math:`_{CO_2}` /t of steel. +In the secondary route, electric arc furnaces are used to melt scrap metal. This limits the :math:`CO_2` emissions to the burning of graphite electrodes `(Friedrichsen et. al) `__, and reduces process emissions to 0.03 t :math:`_{CO_2}` /t of steel. We assume that the primary route can be replaced by a third route in 2050, using direct reduced iron (DRI) and subsequent processing in an EAF. @@ -433,10 +433,10 @@ We assume that the primary route can be replaced by a third route in 2050, using FeO + H_2 \xrightarrow{} Fe + H_2O -This circumvents the process emissions associated with the use of coke. For hydrogen- based DRI, we assume energy requirements of 1.7 MWh :math:`_{H_2}` /t steel `(Vogl et. al) `_ and 0.322 MWh :math:`_{el}`/t steel `(HYBRIT 2016) `_. +This circumvents the process emissions associated with the use of coke. For hydrogen- based DRI, we assume energy requirements of 1.7 MWh :math:`_{H_2}` /t steel `(Vogl et. al) `__ and 0.322 MWh :math:`_{el}`/t steel `(HYBRIT 2016) `__. -The share of steel produced via the primary route is exogenously set in the `config file `_. The share of steel obtained via hydrogen-based DRI plus EAF is also set exogenously in the `config file `_. The remaining share is manufactured through the secondary route using scrap metal in EAF. Bioenergy as alternative to coke in blast furnaces is not considered in the model (`Mandova et.al `_, `Suopajärvi et.al `_). +The share of steel produced via the primary route is exogenously set in the `config file `__. The share of steel obtained via hydrogen-based DRI plus EAF is also set exogenously in the `config file `__. The remaining share is manufactured through the secondary route using scrap metal in EAF. Bioenergy as alternative to coke in blast furnaces is not considered in the model (`Mandova et.al `__, `Suopajärvi et.al `__). For the remaining subprocesses in this sector, the following transformations are assumed. Methane is used as energy source for the smelting process. Activities associated with furnaces, refining and rolling, and product finishing are electrified assuming the current efficiency values for these cases. These transformations result in changes in process emissions as outlined in the process emissions figure presented in the industry overview section (see :ref:`Overview`). @@ -446,28 +446,28 @@ For the remaining subprocesses in this sector, the following transformations are The chemicals industry includes a wide range of diverse industries, including the production of basic organic compounds (olefins, alcohols, aromatics), basic inorganic compounds (ammonia, chlorine), polymers (plastics), and end-user products (cosmetics, pharmaceutics). -The chemicals industry consumes large amounts of fossil-fuel based feedstocks (see `Levi et. al `_), which can also be produced from renewables as outlined for hydrogen (see :ref:`Hydrogen supply`), for methane (see :ref:`Methane supply`), and for oil-based products (see :ref:`Oil-based products supply`). The ratio between synthetic and fossil-based fuels used in the industry is an endogenous result of the optimisation. +The chemicals industry consumes large amounts of fossil-fuel based feedstocks (see `Levi et. al `__), which can also be produced from renewables as outlined for hydrogen (see :ref:`Hydrogen supply`), for methane (see :ref:`Methane supply`), and for oil-based products (see :ref:`Oil-based products supply`). The ratio between synthetic and fossil-based fuels used in the industry is an endogenous result of the optimisation. -The basic chemicals consumption data from the `JRC IDEES `_ database comprises high- value chemicals (ethylene, propylene and BTX), chlorine, methanol and ammonia. However, it is necessary to separate out these chemicals because their current and future production routes are different. +The basic chemicals consumption data from the `JRC IDEES `__ database comprises high- value chemicals (ethylene, propylene and BTX), chlorine, methanol and ammonia. However, it is necessary to separate out these chemicals because their current and future production routes are different. -Statistics for the production of ammonia, which is commonly used as a fertilizer, are taken from the `USGS `_ for every country. Ammonia can be made from hydrogen and nitrogen using the Haber-Bosch process. +Statistics for the production of ammonia, which is commonly used as a fertilizer, are taken from the `USGS `__ for every country. Ammonia can be made from hydrogen and nitrogen using the Haber-Bosch process. .. math:: N_2 + 3H_2 \xrightarrow{} 2NH_3 -The Haber-Bosch process is not explicitly represented in the model, such that demand for ammonia enters the model as a demand for hydrogen ( 6.5 MWh :math:`_{H_2}` / t :math:`_{NH_3}` ) and electricity ( 1.17 MWh :math:`_{el}` /t :math:`_{NH_3}` ) (see `Wang et. al `_). Today, natural gas dominates in Europe as the source for the hydrogen used in the Haber-Bosch process, but the model can choose among the various hydrogen supply options described in the hydrogen section (see :ref:`Hydrogen supply`) +The Haber-Bosch process is not explicitly represented in the model, such that demand for ammonia enters the model as a demand for hydrogen ( 6.5 MWh :math:`_{H_2}` / t :math:`_{NH_3}` ) and electricity ( 1.17 MWh :math:`_{el}` /t :math:`_{NH_3}` ) (see `Wang et. al `__). Today, natural gas dominates in Europe as the source for the hydrogen used in the Haber-Bosch process, but the model can choose among the various hydrogen supply options described in the hydrogen section (see :ref:`Hydrogen supply`) -The total production and specific energy consumption of chlorine and methanol is taken from a `DECHEMA report `_. According to this source, the production of chlorine amounts to 9.58 MtCl/a, which is assumed to require electricity at 3.6 MWh :math:`_{el}`/t of chlorine and yield hydrogen at 0.937 MWh :math:`_{H_2}`/t of chlorine in the chloralkali process. The production of methanol adds up to 1.5 MtMeOH/a, requiring electricity at 0.167 MWh :math:`_{el}`/t of methanol and methane at 10.25 MWh :math:`_{CH_4}`/t of methanol. +The total production and specific energy consumption of chlorine and methanol is taken from a `DECHEMA report `__. According to this source, the production of chlorine amounts to 9.58 MtCl/a, which is assumed to require electricity at 3.6 MWh :math:`_{el}`/t of chlorine and yield hydrogen at 0.937 MWh :math:`_{H_2}`/t of chlorine in the chloralkali process. The production of methanol adds up to 1.5 MtMeOH/a, requiring electricity at 0.167 MWh :math:`_{el}`/t of methanol and methane at 10.25 MWh :math:`_{CH_4}`/t of methanol. -The production of ammonia, methanol, and chlorine production is deducted from the JRC IDEES basic chemicals, leaving the production totals of high-value chemicals. For this, we assume that the liquid hydrocarbon feedstock comes from synthetic or fossil- origin naphtha (14 MWh :math:`_{naphtha}`/t of HVC, similar to `Lechtenböhmer et al `_), ignoring the methanol-to-olefin route. Furthermore, we assume the following transformations of the energy-consuming processes in the production of plastics: the final energy consumption in steam processing is converted to methane since requires temperature above 500 °C (4.1 MWh :math:`_{CH_4}` /t of HVC, see `Rehfeldt et al. `_); and the remaining processes are electrified using the current efficiency of microwave for high-enthalpy heat processing, electric furnaces, electric process cooling and electric generic processes (2.85 MWh :math:`_{el}`/t of HVC). +The production of ammonia, methanol, and chlorine production is deducted from the JRC IDEES basic chemicals, leaving the production totals of high-value chemicals. For this, we assume that the liquid hydrocarbon feedstock comes from synthetic or fossil- origin naphtha (14 MWh :math:`_{naphtha}`/t of HVC, similar to `Lechtenböhmer et al `__), ignoring the methanol-to-olefin route. Furthermore, we assume the following transformations of the energy-consuming processes in the production of plastics: the final energy consumption in steam processing is converted to methane since requires temperature above 500 °C (4.1 MWh :math:`_{CH_4}` /t of HVC, see `Rehfeldt et al. `__); and the remaining processes are electrified using the current efficiency of microwave for high-enthalpy heat processing, electric furnaces, electric process cooling and electric generic processes (2.85 MWh :math:`_{el}`/t of HVC). The process emissions from feedstock in the chemical industry are as high as 0.369 t :math:`_{CO_2}`/t of ethylene equivalent. We consider process emissions for all the material output, which is a conservative approach since it assumes that all plastic-embedded :math:`CO_2` will eventually be released into the atmosphere. However, plastic disposal in landfilling will avoid, or at least delay, associated :math:`CO_2` emissions. -Circular economy practices drastically reduce the amount of primary feedstock needed for the production of plastics in the model (see `Kullmann et al. `_, `Meys et al. (2021) `_, `Meys et al. (2020) `_, `Gu et al. `_) and consequently, also the energy demands and level of process emission. The percentage of plastics that are assumed to be mechanically recycled can be selected in the `config file `_, as well as -the percentage that is chemically recycled, see `config file `_ The energy consumption for those recycling processes are respectively 0.547 MWh :math:`_{el}`/t of HVC (as indicated in the `config file `_) (`Meys et al. (2020) `_), and 6.9 MWh :math:`_{el}`/t of HVC (as indicated in the `config file `_) based on pyrolysis and electric steam cracking (see `Materials Economics `_ report). +Circular economy practices drastically reduce the amount of primary feedstock needed for the production of plastics in the model (see `Kullmann et al. `__, `Meys et al. (2021) `__, `Meys et al. (2020) `__, `Gu et al. `__) and consequently, also the energy demands and level of process emission. The percentage of plastics that are assumed to be mechanically recycled can be selected in the `config file `__, as well as +the percentage that is chemically recycled, see `config file `__ The energy consumption for those recycling processes are respectively 0.547 MWh :math:`_{el}`/t of HVC (as indicated in the `config file `__) (`Meys et al. (2020) `__), and 6.9 MWh :math:`_{el}`/t of HVC (as indicated in the `config file `__) based on pyrolysis and electric steam cracking (see `Materials Economics `__ report). **Non-metallic Mineral Products** @@ -476,7 +476,7 @@ This subsector includes the manufacturing of cement, ceramics, and glass. *Cement* -Cement is used in construction to make concrete. The production of cement involves high energy consumption and large process emissions. The calcination of limestone to chemically reactive calcium oxide, also known as lime, involves process emissions of 0.54 t :math:`_{CO_2}` /t cement (see `Akhtar et al. `_. +Cement is used in construction to make concrete. The production of cement involves high energy consumption and large process emissions. The calcination of limestone to chemically reactive calcium oxide, also known as lime, involves process emissions of 0.54 t :math:`_{CO_2}` /t cement (see `Akhtar et al. `__. .. math:: @@ -487,16 +487,16 @@ Additionally, :math:`CO_2` is emitted from the combustion of fossil fuels to pro Cement process emissions can be captured assuming a capture rate of 90%. Whether emissions are captured is decided by the model taking into account the capital costs of carbon capture modules. The electricity and heat demand of process emission carbon capture is currently ignored. For net-zero emission scenarios, the remaining process emissions need to be compensated by negative emissions. -With the exception of electricity demand and biomass demand for low-temperature heat (0.06 MWh/t and 0.2 MWh/t), the final energy consumption of this subsector is assumed to be supplied by methane (0.52 MWh/t), which is capable of delivering the required high-temperature heat. This implies a switch from burning solid fuels to burning gas which will require adjustments of the `kilns <10.1109/CITCON.2013.6525276>`_. The share of fossil vs. synthetic methane consumed is a result of the optimisation +With the exception of electricity demand and biomass demand for low-temperature heat (0.06 MWh/t and 0.2 MWh/t), the final energy consumption of this subsector is assumed to be supplied by methane (0.52 MWh/t), which is capable of delivering the required high-temperature heat. This implies a switch from burning solid fuels to burning gas which will require adjustments of the `kilns <10.1109/CITCON.2013.6525276>`__. The share of fossil vs. synthetic methane consumed is a result of the optimisation *Ceramics* -The ceramics sector is assumed to be fully electrified based on the current efficiency of already electrified processes which include microwave drying and sintering of raw materials, electric kilns for primary production processes, electric furnaces for the `product finishing `_. In total, the final electricity consumption is 0.44 MWh/t of ceramic. The manufacturing of ceramics includes process emissions of 0.03 t :math:`_{CO_2}`/t of ceramic. For a detailed overview of the ceramics industry sector see `Furszyfer Del Rio et al `_. +The ceramics sector is assumed to be fully electrified based on the current efficiency of already electrified processes which include microwave drying and sintering of raw materials, electric kilns for primary production processes, electric furnaces for the `product finishing `__. In total, the final electricity consumption is 0.44 MWh/t of ceramic. The manufacturing of ceramics includes process emissions of 0.03 t :math:`_{CO_2}`/t of ceramic. For a detailed overview of the ceramics industry sector see `Furszyfer Del Rio et al `__. *Glass* -The production of glass is assumed to be fully electrified based on the current efficiency of electric melting tanks and electric annealing which adds up to an electricity demand of 2.07 MWh :math:`_{el}`/t of `glass `_. The manufacturing of glass incurs process emissions of 0.1 t :math:`_{CO_2}`/t of glass. Potential efficiency improvements, which according to `Lechtenböhmer et al `_ could reduce energy demands to 0.85 MW :math:`_{el}`/t of glass, have not been considered. For a detailed overview of the glass industry sector see `Furszyfer Del Rio et al `_. +The production of glass is assumed to be fully electrified based on the current efficiency of electric melting tanks and electric annealing which adds up to an electricity demand of 2.07 MWh :math:`_{el}`/t of `glass `__. The manufacturing of glass incurs process emissions of 0.1 t :math:`_{CO_2}`/t of glass. Potential efficiency improvements, which according to `Lechtenböhmer et al `__ could reduce energy demands to 0.85 MW :math:`_{el}`/t of glass, have not been considered. For a detailed overview of the glass industry sector see `Furszyfer Del Rio et al `__. **Non-ferrous Metals** @@ -511,75 +511,75 @@ The primary route involves two energy-intensive processes: the production of alu 2Al_2O_3 +3C \xrightarrow{} 4Al+3CO_2 -The primary route requires high-enthalpy heat (2.3 MWh/t) to produce alumina which is supplied by methane and causes process emissions of 1.5 t :math:`_{CO_2}`/t aluminium. According to `Friedrichsen et al. `_, inert anodes might become commercially available by 2030 that would eliminate the process emissions, but they are not included in the model. Assuming all subprocesses are electrified, the primary route requires 15.4 MWh :math:`_{el}`/t of aluminium. +The primary route requires high-enthalpy heat (2.3 MWh/t) to produce alumina which is supplied by methane and causes process emissions of 1.5 t :math:`_{CO_2}`/t aluminium. According to `Friedrichsen et al. `__, inert anodes might become commercially available by 2030 that would eliminate the process emissions, but they are not included in the model. Assuming all subprocesses are electrified, the primary route requires 15.4 MWh :math:`_{el}`/t of aluminium. -In the secondary route, scrap aluminium is remelted. The energy demand for this process is only 10% of the primary route and there are no associated process emissions. Assuming all subprocesses are electrified, the secondary route requires 1.7 MWh/t of aluminium. The share of aliminum manufactured by the primary and secondary route can be selected in the `config file `_] +In the secondary route, scrap aluminium is remelted. The energy demand for this process is only 10% of the primary route and there are no associated process emissions. Assuming all subprocesses are electrified, the secondary route requires 1.7 MWh/t of aluminium. The share of aliminum manufactured by the primary and secondary route can be selected in the `config file `__] For the other non-ferrous metals, we assume the electrification of the entire manufacturing process with an average electricity demand of 3.2 MWh :math:`_{el}`/t lead equivalent. **Other Industry Subsectors** -The remaining industry subsectors include (a) pulp, paper, printing, (b) food, beverages, tobacco, (c) textiles and leather, (d) machinery equipment, (e) transport equipment, (f) wood and wood products, (g) others. Low- and mid-temperature process heat in these industries is assumed to be `supplied by biomass `_ while the remaining processes are electrified. None of the subsectors involve process emissions. +The remaining industry subsectors include (a) pulp, paper, printing, (b) food, beverages, tobacco, (c) textiles and leather, (d) machinery equipment, (e) transport equipment, (f) wood and wood products, (g) others. Low- and mid-temperature process heat in these industries is assumed to be `supplied by biomass `__ while the remaining processes are electrified. None of the subsectors involve process emissions. Agriculture demand ========================= -Energy demands for the agriculture, forestry and fishing sector per country are taken from the `JRC-IDEES database `_. Missing countries are filled with `Eurostat data `_. Agricultural energy demands are split into electricity (lighting, ventilation, specific electricity uses, electric pumping devices), heat (specific heat uses, low enthalpy heat), and machinery oil (motor drives, farming machine drives, diesel-fueled pumping devices). Heat demand is assigned at “services rural heat” buses. Time series for demands are assumed to be constant and distributed inside countries by population. +Energy demands for the agriculture, forestry and fishing sector per country are taken from the `JRC-IDEES database `__. Missing countries are filled with `Eurostat data `__. Agricultural energy demands are split into electricity (lighting, ventilation, specific electricity uses, electric pumping devices), heat (specific heat uses, low enthalpy heat), and machinery oil (motor drives, farming machine drives, diesel-fueled pumping devices). Heat demand is assigned at “services rural heat” buses. Time series for demands are assumed to be constant and distributed inside countries by population. .. _Transportation: Transportation ========================= -Annual energy demands for land transport, aviation and shipping for every country are retrieved from `JRC-IDEES data set `_. Below, the details of how each of these categories are treated is explained. +Annual energy demands for land transport, aviation and shipping for every country are retrieved from `JRC-IDEES data set `__. Below, the details of how each of these categories are treated is explained. .. _Land transport: **Land transport** -Both road and rail transport is combined as `land transport demand `_ although electrified rail transport is excluded because that demand is included in the current electricity demand. +Both road and rail transport is combined as `land transport demand `__ although electrified rail transport is excluded because that demand is included in the current electricity demand. -The most important settings for land transport are the exogenously fixed fuel mix (an option enabling the endogeous optimization of transport electrification is planned but not yet implemented). In the `config file `_, the share of battery electric vehicles (BEV) and hydrogen fuel cell vehicles (FCEV) can be set. The remaining percentage will be treated as internal combustion engines (ICE) that consume oil products. +The most important settings for land transport are the exogenously fixed fuel mix (an option enabling the endogeous optimization of transport electrification is planned but not yet implemented). In the `config file `__, the share of battery electric vehicles (BEV) and hydrogen fuel cell vehicles (FCEV) can be set. The remaining percentage will be treated as internal combustion engines (ICE) that consume oil products. *Battery Electric vehicles (BEV)* -For the electrified land transport, country-specific factors are computed by comparing the `current car final energy consumption per km in `_ (average for Europe 0.7 kWh/km) to the 0.18 kWh/km value assumed for battery-to-wheels efficiency in EVs. The characteristic `weekly profile `_ provided by the German Federal Highway Research Institute (BASt) is used to obtain hourly time series for European countries taking into account the corresponding local times. Furthermore, a temperature dependence is included in the time series to account for heating/cooling demand in transport. For temperatures `below `_/`above `_ certain threshold values, e.g. 15 °C/20 °C, `temperature coefficients `_ of typically 0.98%/°C and 0.63%/°C are assumed, based on the `paper `_. +For the electrified land transport, country-specific factors are computed by comparing the `current car final energy consumption per km in `__ (average for Europe 0.7 kWh/km) to the 0.18 kWh/km value assumed for battery-to-wheels efficiency in EVs. The characteristic `weekly profile `__ provided by the German Federal Highway Research Institute (BASt) is used to obtain hourly time series for European countries taking into account the corresponding local times. Furthermore, a temperature dependence is included in the time series to account for heating/cooling demand in transport. For temperatures `below `__/`above `__ certain threshold values, e.g. 15 °C/20 °C, `temperature coefficients `__ of typically 0.98%/°C and 0.63%/°C are assumed, based on the `paper `__. -For BEVs the user can define the `storage energy capacity `_, `charging power capacity `_, and `charging efficiency `_. +For BEVs the user can define the `storage energy capacity `__, `charging power capacity `__, and `charging efficiency `__. -For BEV, smart charging is an option. A `certain share `_ of the BEV fleet can shift their charging time. The BEV state of charge is forced to be higher than a `set percentage `_, e.g. 75%, every day at a `specified hour `_, e.g., 7 am, to ensure that the batteries are sufficiently charged for peak usage in the morning and they not behave as seasonal storage. They also have the option to participate in vehicle-to-grid (V2G) services to facilitate system operation if that `is enabled `_. +For BEV, smart charging is an option. A `certain share `__ of the BEV fleet can shift their charging time. The BEV state of charge is forced to be higher than a `set percentage `__, e.g. 75%, every day at a `specified hour `__, e.g., 7 am, to ensure that the batteries are sufficiently charged for peak usage in the morning and they not behave as seasonal storage. They also have the option to participate in vehicle-to-grid (V2G) services to facilitate system operation if that `is enabled `__. The battery cost of BEV is not included in the model since it is assumed that BEV owners buy them to primarily satisfy their mobility needs. *Hydrogen fuel cell vehicles (FCEV)* The share of all land transport that is specified to be be FCEV will be converted to a demand for hydrogen (see :ref:`Hydrogen supply`) using the `FCEV efficiency -`_. +`__. FCEVs are typically used to simulate demand for transport that is hard to electrify directly, e.g. heavy construction machinery. But it may also be used to investigate a more widespread adoption of the technology. *Internal combustion engine vehicles (ICE)* All land transport that is not specified to be either BEV or FCEV will be treated as conventional ICEs. The transport demand is converted to a demand for oil products (see :ref:`Oil-based products supply`) using the `ICE efficiency -`_. +`__. .. _Aviation: **Aviation** -The `demand for aviation `_ includes international and domestic use. It is modelled as an oil demand since aviation consumes kerosene. This can be produced synthetically or have fossil-origin (see :ref:`Oil-based products supply`). +The `demand for aviation `__ includes international and domestic use. It is modelled as an oil demand since aviation consumes kerosene. This can be produced synthetically or have fossil-origin (see :ref:`Oil-based products supply`). .. _Shipping: **Shipping** -Shipping energy demand is covered by a combination of oil and hydrogen. Other fuel options, like methanol or ammonia, are currently not included in PyPSA-Eur-Sec. The share of shipping that is assumed to be supplied by hydrogen can be selected in the `config file `_. +Shipping energy demand is covered by a combination of oil and hydrogen. Other fuel options, like methanol or ammonia, are currently not included in PyPSA-Eur-Sec. The share of shipping that is assumed to be supplied by hydrogen can be selected in the `config file `__. -To estimate the `hydrogen demand `_, the average fuel efficiency of the fleet is used in combination with the efficiency of the fuel cell defined in the technology-data repository. The average fuel efficiency is set in the `config file `_. +To estimate the `hydrogen demand `__, the average fuel efficiency of the fleet is used in combination with the efficiency of the fuel cell defined in the technology-data repository. The average fuel efficiency is set in the `config file `__. The consumed hydrogen comes from the general hydrogen bus where it can be produced by SMR, SMR+CC or electrolysers (see :ref:`Hydrogen supply`). The fraction that is not converted into hydrogen use oil products, i.e. is connected to the general oil bus. -The energy demand for liquefaction of the hydrogen used for shipping can be `included `_. If this option is selected, liquifaction will happen at the `node where the shipping demand occurs `_. +The energy demand for liquefaction of the hydrogen used for shipping can be `included `__. If this option is selected, liquifaction will happen at the `node where the shipping demand occurs `__. .. _Carbon dioxide capture, usage and sequestration (CCU/S): @@ -600,12 +600,12 @@ For the following point source emissions, carbon capture is applicable: • CHP plants using biomass or methane -• `Coal power plants `_. +• `Coal power plants `__. -Point source emissions are captured assuming a capture rate, e.g. 90%, which can be specified in the `config file `_. The electricity and heat demand of process emission carbon capture +Point source emissions are captured assuming a capture rate, e.g. 90%, which can be specified in the `config file `__. The electricity and heat demand of process emission carbon capture is currently ignored. -DAC (if `included `_) includes the adsorption phase where electricity and heat consumptionsare required to assist the adsorption process and regenerate the adsorbent. It also includes the drying and compression of :math:`CO_2` prior to storage which consumes electricity and rejects heat. +DAC (if `included `__) includes the adsorption phase where electricity and heat consumptionsare required to assist the adsorption process and regenerate the adsorbent. It also includes the drying and compression of :math:`CO_2` prior to storage which consumes electricity and rejects heat. *Carbon dioxide usage* @@ -614,8 +614,8 @@ naphtha). If captured carbon is used, the :math:`CO_2` emissions of the syntheti *Carbon dioxide sequestration* -Captured :math:`CO_2` can also be sequestered underground up to an annual sequestration limit of 200 Mt :math:`_{CO_2}`/a. This limit can be chosen in the `config file `_. As stored carbon dioxide is modelled as a single node for Europe, :math:`CO_2` transport constraints are neglected. Since :math:`CO_2` sequestration is an immature technology, the cost assumption is defined in the `config file `_. +Captured :math:`CO_2` can also be sequestered underground up to an annual sequestration limit of 200 Mt :math:`_{CO_2}`/a. This limit can be chosen in the `config file `__. As stored carbon dioxide is modelled as a single node for Europe, :math:`CO_2` transport constraints are neglected. Since :math:`CO_2` sequestration is an immature technology, the cost assumption is defined in the `config file `__. *Carbon dioxide transport* -Carbon dioxide can be modelled as a single node for Europe (in this case, :math:`CO_2` transport constraints are neglected). A network for modelling the transport of :math:`CO_2` among the different nodes can also be created if selected in the `config file `_. +Carbon dioxide can be modelled as a single node for Europe (in this case, :math:`CO_2` transport constraints are neglected). A network for modelling the transport of :math:`CO_2` among the different nodes can also be created if selected in the `config file `__. diff --git a/doc/support.rst b/doc/support.rst index 1d512d59..fc86a998 100644 --- a/doc/support.rst +++ b/doc/support.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -7,8 +7,8 @@ Support ####################### -* In case of code-related **questions**, please post on `stack overflow `_. -* For non-programming related and more general questions please refer to the `mailing list `_. -* To **discuss** with other PyPSA users, organise projects, share news, and get in touch with the community you can use the `discord server `_. -* For **bugs and feature requests**, please use the `issue tracker `_. -* We strongly welcome anyone interested in providing **contributions** to this project. If you have any ideas, suggestions or encounter problems, feel invited to file issues or make pull requests on `Github `_. For further information on how to contribute, please refer to :ref:`contributing`. +* In case of code-related **questions**, please post on `stack overflow `__. +* For non-programming related and more general questions please refer to the `mailing list `__. +* To **discuss** with other PyPSA users, organise projects, share news, and get in touch with the community you can use the `discord server `__. +* For **bugs and feature requests**, please use the `issue tracker `__. +* We strongly welcome anyone interested in providing **contributions** to this project. If you have any ideas, suggestions or encounter problems, feel invited to file issues or make pull requests on `Github `__. For further information on how to contribute, please refer to :ref:`contributing`. diff --git a/doc/tutorial.rst b/doc/tutorial.rst index e58ad123..4c10b573 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -133,51 +133,50 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; - 0[label = "solve_network", color = "0.33 0.6 0.85", style="rounded"]; - 1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.03 0.6 0.85", style="rounded"]; - 2[label = "add_extra_components", color = "0.45 0.6 0.85", style="rounded"]; - 3[label = "cluster_network\nclusters: 6", color = "0.46 0.6 0.85", style="rounded"]; - 4[label = "simplify_network\nsimpl: ", color = "0.52 0.6 0.85", style="rounded"]; - 5[label = "add_electricity", color = "0.55 0.6 0.85", style="rounded"]; - 6[label = "build_renewable_profiles\ntechnology: solar", color = "0.15 0.6 0.85", style="rounded"]; - 7[label = "base_network", color = "0.37 0.6 0.85", style="rounded,dashed"]; - 8[label = "build_shapes", color = "0.07 0.6 0.85", style="rounded,dashed"]; - 9[label = "retrieve_databundle", color = "0.60 0.6 0.85", style="rounded"]; - 10[label = "retrieve_natura_raster", color = "0.42 0.6 0.85", style="rounded"]; - 11[label = "build_bus_regions", color = "0.09 0.6 0.85", style="rounded,dashed"]; - 12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.15 0.6 0.85", style="rounded"]; - 13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.15 0.6 0.85", style="rounded"]; - 14[label = "build_ship_raster", color = "0.02 0.6 0.85", style="rounded"]; - 15[label = "retrieve_ship_raster", color = "0.40 0.6 0.85", style="rounded"]; - 16[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.15 0.6 0.85", style="rounded"]; - 17[label = "build_line_rating", color = "0.32 0.6 0.85", style="rounded"]; - 18[label = "retrieve_cost_data\nyear: 2030", color = "0.50 0.6 0.85", style="rounded"]; - 19[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded,dashed"]; - 20[label = "build_electricity_demand", color = "0.13 0.6 0.85", style="rounded,dashed"]; - 21[label = "retrieve_electricity_demand", color = "0.31 0.6 0.85", style="rounded"]; - 22[label = "copy_config", color = "0.23 0.6 0.85", style="rounded"]; + 0[label = "solve_network", color = "0.24 0.6 0.85", style="rounded"]; + 1[label = "prepare_network\nll: vopt\nopts: Co2L-3H", color = "0.10 0.6 0.85", style="rounded"]; + 2[label = "add_extra_components", color = "0.33 0.6 0.85", style="rounded"]; + 3[label = "cluster_network\nclusters: 128", color = "0.59 0.6 0.85", style="rounded"]; + 4[label = "simplify_network\nsimpl: ", color = "0.18 0.6 0.85", style="rounded"]; + 5[label = "add_electricity", color = "0.48 0.6 0.85", style="rounded"]; + 6[label = "build_renewable_profiles\ntechnology: solar", color = "0.29 0.6 0.85", style="rounded"]; + 7[label = "base_network", color = "0.30 0.6 0.85", style="rounded"]; + 8[label = "build_shapes", color = "0.61 0.6 0.85", style="rounded"]; + 9[label = "retrieve_databundle", color = "0.06 0.6 0.85", style="rounded"]; + 10[label = "retrieve_natura_raster", color = "0.03 0.6 0.85", style="rounded"]; + 11[label = "retrieve_cutout\ncutout: europe-2013-sarah", color = "0.50 0.6 0.85", style="rounded"]; + 12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.29 0.6 0.85", style="rounded"]; + 13[label = "retrieve_cutout\ncutout: europe-2013-era5", color = "0.50 0.6 0.85", style="rounded"]; + 14[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.29 0.6 0.85", style="rounded"]; + 15[label = "build_ship_raster", color = "0.16 0.6 0.85", style="rounded"]; + 16[label = "retrieve_ship_raster", color = "0.53 0.6 0.85", style="rounded"]; + 17[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.29 0.6 0.85", style="rounded"]; + 18[label = "build_hydro_profile", color = "0.47 0.6 0.85", style="rounded"]; + 19[label = "retrieve_cost_data\nyear: 2030", color = "0.21 0.6 0.85", style="rounded"]; + 20[label = "build_powerplants", color = "0.56 0.6 0.85", style="rounded"]; + 21[label = "build_electricity_demand", color = "0.54 0.6 0.85", style="rounded"]; + 22[label = "retrieve_electricity_demand", color = "0.34 0.6 0.85", style="rounded"]; + 23[label = "retrieve_synthetic_electricity_demand", color = "0.65 0.6 0.85", style="rounded"]; 1 -> 0 - 22 -> 0 2 -> 1 - 18 -> 1 + 19 -> 1 3 -> 2 - 18 -> 2 + 19 -> 2 4 -> 3 - 18 -> 3 + 19 -> 3 5 -> 4 - 18 -> 4 - 11 -> 4 + 19 -> 4 + 7 -> 4 6 -> 5 12 -> 5 - 13 -> 5 - 16 -> 5 - 7 -> 5 + 14 -> 5 17 -> 5 18 -> 5 - 11 -> 5 + 7 -> 5 19 -> 5 - 9 -> 5 20 -> 5 + 9 -> 5 + 21 -> 5 8 -> 5 7 -> 6 9 -> 6 @@ -186,29 +185,30 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i 11 -> 6 8 -> 7 9 -> 8 - 8 -> 11 - 7 -> 11 7 -> 12 9 -> 12 10 -> 12 8 -> 12 - 11 -> 12 - 7 -> 13 - 9 -> 13 - 10 -> 13 - 14 -> 13 - 8 -> 13 - 11 -> 13 + 13 -> 12 + 7 -> 14 + 9 -> 14 + 10 -> 14 15 -> 14 - 7 -> 16 - 9 -> 16 - 10 -> 16 - 14 -> 16 - 8 -> 16 - 11 -> 16 + 8 -> 14 + 13 -> 14 + 16 -> 15 + 13 -> 15 7 -> 17 - 7 -> 19 - 21 -> 20 + 9 -> 17 + 10 -> 17 + 15 -> 17 + 8 -> 17 + 13 -> 17 + 8 -> 18 + 13 -> 18 + 7 -> 20 + 22 -> 21 + 23 -> 21 } | @@ -218,26 +218,26 @@ In the terminal, this will show up as a list of jobs to be run: .. code:: bash Building DAG of jobs... - job count min threads max threads - ------------------------ ------- ------------- ------------- - add_electricity 1 1 1 - add_extra_components 1 1 1 - base_network 1 1 1 - build_bus_regions 1 1 1 - build_hydro_profile 1 1 1 - build_electricity_demand 1 1 1 - build_powerplants 1 1 1 - build_renewable_profiles 4 1 1 - build_shapes 1 1 1 - build_ship_raster 1 1 1 - cluster_network 1 1 1 - prepare_network 1 1 1 - retrieve_cost_data 1 1 1 - retrieve_databundle 1 1 1 - retrieve_natura_raster 1 1 1 - simplify_network 1 1 1 - solve_network 1 1 1 - total 20 1 1 + Job stats: + job count + ------------------------------------- ------- + add_electricity 1 + add_extra_components 1 + build_line_rating 1 + build_renewable_profiles 4 + build_ship_raster 1 + cluster_network 1 + prepare_network 1 + retrieve_cost_data 1 + retrieve_cutout 1 + retrieve_databundle 1 + retrieve_electricity_demand 1 + retrieve_natura_raster 1 + retrieve_ship_raster 1 + retrieve_synthetic_electricity_demand 1 + simplify_network 1 + solve_network 1 + total 19 ``snakemake`` then runs these jobs in the correct order. @@ -246,16 +246,16 @@ A job (here ``simplify_network``) will display its attributes and normally some .. code:: bash - [Mon Jan 1 00:00:00 2023] + [Mon Feb 19 17:06:17 2024] rule simplify_network: - input: networks/elec.nc, resources/costs.csv, resources/regions_onshore.geojson, resources/regions_offshore.geojson - output: networks/elec_s.nc, resources/regions_onshore_elec_s.geojson, resources/regions_offshore_elec_s.geojson, resources/busmap_elec_s.csv, resources/connection_costs_s.csv - log: logs/simplify_network/elec_s.log + input: resources/test/networks/elec.nc, data/costs_2030.csv, resources/test/regions_onshore.geojson, resources/test/regions_offshore.geojson + output: resources/test/networks/elec_s.nc, resources/test/regions_onshore_elec_s.geojson, resources/test/regions_offshore_elec_s.geojson, resources/test/busmap_elec_s.csv, resources/test/connection_costs_s.csv + log: logs/test-elec/simplify_network/elec_s.log jobid: 4 - benchmark: benchmarks/simplify_network/elec_s - reason: Missing output files: resources/busmap_elec_s.csv, resources/regions_onshore_elec_s.geojson, networks/elec_s.nc, resources/regions_offshore_elec_s.geojson; Input files updated by another job: resources/regions_offshore.geojson, resources/regions_onshore.geojson, resources/costs.csv, networks/elec.nc + benchmark: benchmarks/test-elec/simplify_network/elec_s + reason: Missing output files: resources/test/regions_offshore_elec_s.geojson, resources/test/busmap_elec_s.csv, resources/test/regions_onshore_elec_s.geojson, resources/test/networks/elec_s.nc; Input files updated by another job: resources/test/regions_offshore.geojson, resources/test/networks/elec.nc, resources/test/regions_onshore.geojson, data/costs_2030.csv wildcards: simpl= - resources: tmpdir=/tmp, mem_mb=4000, mem_mib=3815 + resources: tmpdir=/tmp, mem_mb=12000, mem_mib=11445 Once the whole worktree is finished, it should state so in the terminal. @@ -313,4 +313,4 @@ Jupyter Notebooks). n = pypsa.Network("results/networks/elec_s_6_ec_lcopt_Co2L-24H.nc") -For inspiration, read the `examples section in the PyPSA documentation `_. +For inspiration, read the `examples section in the PyPSA documentation `__. diff --git a/doc/tutorial_sector.rst b/doc/tutorial_sector.rst index 53a60353..d6fafa69 100644 --- a/doc/tutorial_sector.rst +++ b/doc/tutorial_sector.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -61,46 +61,69 @@ To run an overnight / greenfiled scenario with the specifications above, run snakemake -call all --configfile config/test/config.overnight.yaml -which will result in the following *additional* jobs ``snakemake`` wants to run -on top of those already included in the electricity-only tutorial: +which will result in the following jobs ``snakemake`` wants to run, some of +which were already included in the electricity-only tutorial: .. code:: bash - job count min threads max threads - ------------------------------------------------ ------- ------------- ------------- - all 1 1 1 - build_ammonia_production 1 1 1 - build_biomass_potentials 1 1 1 - build_clustered_population_layouts 1 1 1 - build_cop_profiles 1 1 1 - build_gas_input_locations 1 1 1 - build_gas_network 1 1 1 - build_heat_demands 3 1 1 - build_industrial_distribution_key 1 1 1 - build_industrial_energy_demand_per_country_today 1 1 1 - build_industrial_energy_demand_per_node 1 1 1 - build_industrial_energy_demand_per_node_today 1 1 1 - build_industrial_production_per_country 1 1 1 - build_industrial_production_per_country_tomorrow 1 1 1 - build_industrial_production_per_node 1 1 1 - build_industry_sector_ratios 1 1 1 - build_population_weighted_energy_totals 1 1 1 - build_salt_cavern_potentials 1 1 1 - build_shipping_demand 1 1 1 - build_simplified_population_layouts 1 1 1 - build_solar_thermal_profiles 3 1 1 - build_temperature_profiles 3 1 1 - build_transport_demand 1 1 1 - cluster_gas_network 1 1 1 - cluster_network 1 1 1 - copy_config 1 1 1 - make_summary 1 1 1 - plot_network 1 1 1 - plot_summary 1 1 1 - prepare_sector_network 1 1 1 - retrieve_gas_infrastructure_data 1 1 1 - retrieve_sector_databundle 1 1 1 - solve_sector_network 1 1 1 + job count + ------------------------------------------------ ------- + add_electricity 1 + add_extra_components 1 + all 1 + build_ammonia_production 1 + build_biomass_potentials 1 + build_clustered_population_layouts 1 + build_cop_profiles 1 + build_daily_heat_demand 1 + build_district_heat_share 1 + build_energy_totals 1 + build_gas_input_locations 1 + build_gas_network 1 + build_heat_totals 1 + build_hourly_heat_demand 1 + build_industrial_distribution_key 1 + build_industrial_energy_demand_per_country_today 1 + build_industrial_energy_demand_per_node 1 + build_industrial_energy_demand_per_node_today 1 + build_industrial_production_per_country 1 + build_industrial_production_per_country_tomorrow 1 + build_industrial_production_per_node 1 + build_industry_sector_ratios 1 + build_industry_sector_ratios_intermediate 1 + build_population_layouts 1 + build_population_weighted_energy_totals 2 + build_renewable_profiles 4 + build_salt_cavern_potentials 1 + build_ship_raster 1 + build_shipping_demand 1 + build_simplified_population_layouts 1 + build_solar_thermal_profiles 3 + build_temperature_profiles 3 + build_transport_demand 1 + cluster_gas_network 1 + cluster_network 1 + make_summary 1 + plot_gas_network 1 + plot_hydrogen_network 1 + plot_power_network 1 + plot_power_network_clustered 1 + plot_summary 1 + prepare_network 1 + prepare_sector_network 1 + retrieve_cost_data 1 + retrieve_cutout 1 + retrieve_databundle 1 + retrieve_electricity_demand 1 + retrieve_eurostat_data 1 + retrieve_gas_infrastructure_data 1 + retrieve_natura_raster 1 + retrieve_sector_databundle 1 + retrieve_ship_raster 1 + retrieve_synthetic_electricity_demand 1 + simplify_network 1 + solve_sector_network 1 + total 63 This covers the retrieval of additional raw data from online resources and preprocessing data about the transport, industry, and heating sectors as well as @@ -119,161 +142,252 @@ successfully. graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; - 0[label = "all", color = "0.51 0.6 0.85", style="rounded"]; - 1[label = "plot_summary", color = "0.54 0.6 0.85", style="rounded"]; - 2[label = "make_summary", color = "0.44 0.6 0.85", style="rounded"]; - 3[label = "solve_sector_network", color = "0.46 0.6 0.85", style="rounded"]; - 4[label = "prepare_sector_network", color = "0.09 0.6 0.85", style="rounded"]; - 5[label = "cluster_gas_network", color = "0.38 0.6 0.85", style="rounded"]; - 6[label = "build_gas_network", color = "0.00 0.6 0.85", style="rounded"]; - 7[label = "retrieve_gas_infrastructure_data", color = "0.33 0.6 0.85", style="rounded"]; - 8[label = "cluster_network", color = "0.26 0.6 0.85", style="rounded"]; - 9[label = "simplify_network", color = "0.03 0.6 0.85", style="rounded"]; - 10[label = "add_electricity", color = "0.25 0.6 0.85", style="rounded"]; - 11[label = "build_renewable_profiles", color = "0.07 0.6 0.85", style="rounded"]; - 12[label = "base_network", color = "0.16 0.6 0.85", style="rounded"]; - 13[label = "build_shapes", color = "0.65 0.6 0.85", style="rounded"]; - 14[label = "retrieve_databundle", color = "0.20 0.6 0.85", style="rounded"]; - 15[label = "retrieve_natura_raster", color = "0.10 0.6 0.85", style="rounded"]; - 16[label = "build_bus_regions", color = "0.11 0.6 0.85", style="rounded"]; - 17[label = "build_ship_raster", color = "0.56 0.6 0.85", style="rounded"]; - 18[label = "retrieve_ship_raster", color = "0.15 0.6 0.85", style="rounded"]; - 19[label = "retrieve_cost_data", color = "0.50 0.6 0.85", style="rounded"]; - 20[label = "build_powerplants", color = "0.49 0.6 0.85", style="rounded"]; - 21[label = "build_electricity_demand", color = "0.39 0.6 0.85", style="rounded"]; - 22[label = "retrieve_electricity_demand", color = "0.05 0.6 0.85", style="rounded"]; - 23[label = "build_gas_input_locations", color = "0.45 0.6 0.85", style="rounded"]; - 24[label = "prepare_network", color = "0.31 0.6 0.85", style="rounded"]; - 25[label = "add_extra_components", color = "0.23 0.6 0.85", style="rounded"]; - 26[label = "build_energy_totals", color = "0.19 0.6 0.85", style="rounded"]; - 27[label = "build_population_weighted_energy_totals", color = "0.27 0.6 0.85", style="rounded"]; - 28[label = "build_clustered_population_layouts", color = "0.64 0.6 0.85", style="rounded"]; - 29[label = "build_population_layouts", color = "0.43 0.6 0.85", style="rounded"]; - 30[label = "build_shipping_demand", color = "0.57 0.6 0.85", style="rounded"]; - 31[label = "build_transport_demand", color = "0.53 0.6 0.85", style="rounded"]; - 32[label = "build_temperature_profiles", color = "0.58 0.6 0.85", style="rounded"]; - 33[label = "build_biomass_potentials", color = "0.30 0.6 0.85", style="rounded"]; - 34[label = "build_salt_cavern_potentials", color = "0.47 0.6 0.85", style="rounded"]; - 35[label = "build_simplified_population_layouts", color = "0.32 0.6 0.85", style="rounded"]; - 36[label = "build_industrial_energy_demand_per_node", color = "0.14 0.6 0.85", style="rounded"]; - 37[label = "build_industry_sector_ratios", color = "0.18 0.6 0.85", style="rounded"]; - 38[label = "build_ammonia_production", color = "0.48 0.6 0.85", style="rounded"]; - 39[label = "build_industrial_production_per_node", color = "0.12 0.6 0.85", style="rounded"]; - 40[label = "build_industrial_distribution_key", color = "0.61 0.6 0.85", style="rounded"]; - 41[label = "build_industrial_production_per_country_tomorrow", color = "0.22 0.6 0.85", style="rounded"]; - 42[label = "build_industrial_production_per_country", color = "0.59 0.6 0.85", style="rounded"]; - 43[label = "build_industrial_energy_demand_per_node_today", color = "0.62 0.6 0.85", style="rounded"]; - 44[label = "build_industrial_energy_demand_per_country_today", color = "0.41 0.6 0.85", style="rounded"]; - 45[label = "build_heat_demands", color = "0.08 0.6 0.85", style="rounded"]; - 46[label = "build_cop_profiles", color = "0.52 0.6 0.85", style="rounded"]; - 47[label = "build_solar_thermal_profiles", color = "0.17 0.6 0.85", style="rounded"]; - 48[label = "copy_config", color = "0.40 0.6 0.85", style="rounded"]; - 49[label = "plot_network", color = "0.60 0.6 0.85", style="rounded"]; - 1 -> 0 - 2 -> 1 - 49 -> 2 - 19 -> 2 - 3 -> 2 - 48 -> 3 - 4 -> 3 - 19 -> 3 - 9 -> 4 - 11 -> 4 - 45 -> 4 - 36 -> 4 - 47 -> 4 - 26 -> 4 - 27 -> 4 - 8 -> 4 - 33 -> 4 - 24 -> 4 - 35 -> 4 - 5 -> 4 - 23 -> 4 - 34 -> 4 - 19 -> 4 - 31 -> 4 - 46 -> 4 - 30 -> 4 - 32 -> 4 - 28 -> 4 - 6 -> 5 - 8 -> 5 - 7 -> 6 - 19 -> 8 - 9 -> 8 - 19 -> 9 - 10 -> 9 - 16 -> 9 - 14 -> 10 - 21 -> 10 - 20 -> 10 - 19 -> 10 - 11 -> 10 - 16 -> 10 - 13 -> 10 - 12 -> 10 - 14 -> 11 - 17 -> 11 - 15 -> 11 - 16 -> 11 - 12 -> 11 - 13 -> 11 - 13 -> 12 - 14 -> 13 - 12 -> 16 - 13 -> 16 - 18 -> 17 - 12 -> 20 - 22 -> 21 - 8 -> 23 - 7 -> 23 - 25 -> 24 - 19 -> 24 - 19 -> 25 - 8 -> 25 - 13 -> 26 - 28 -> 27 - 26 -> 27 - 8 -> 28 - 29 -> 28 - 13 -> 29 - 13 -> 30 - 8 -> 30 - 26 -> 30 - 32 -> 31 - 28 -> 31 - 27 -> 31 - 26 -> 31 - 8 -> 32 - 29 -> 32 - 13 -> 33 - 14 -> 33 - 8 -> 33 - 8 -> 34 - 9 -> 35 - 29 -> 35 - 37 -> 36 - 39 -> 36 - 43 -> 36 - 38 -> 37 - 41 -> 39 - 40 -> 39 - 28 -> 40 - 8 -> 40 - 42 -> 41 - 38 -> 42 - 44 -> 43 - 40 -> 43 - 38 -> 44 - 42 -> 44 - 8 -> 45 - 29 -> 45 - 32 -> 46 - 8 -> 47 - 29 -> 47 - 8 -> 49 - 3 -> 49 + 0[label = "all", color = "0.66 0.6 0.85", style="rounded"]; + 1[label = "plot_summary", color = "0.20 0.6 0.85", style="rounded"]; + 2[label = "make_summary", color = "0.02 0.6 0.85", style="rounded"]; + 3[label = "solve_sector_network", color = "0.11 0.6 0.85", style="rounded"]; + 4[label = "prepare_sector_network\nsector_opts: CO2L0-24h-T-H-B-I-A-dist1", color = "0.22 0.6 0.85", style="rounded"]; + 5[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.26 0.6 0.85", style="rounded"]; + 6[label = "base_network", color = "0.53 0.6 0.85", style="rounded"]; + 7[label = "build_shapes", color = "0.04 0.6 0.85", style="rounded"]; + 8[label = "retrieve_databundle", color = "0.49 0.6 0.85", style="rounded"]; + 9[label = "retrieve_natura_raster", color = "0.46 0.6 0.85", style="rounded"]; + 10[label = "build_ship_raster", color = "0.29 0.6 0.85", style="rounded"]; + 11[label = "retrieve_ship_raster", color = "0.42 0.6 0.85", style="rounded"]; + 12[label = "retrieve_cutout\ncutout: be-03-2013-era5", color = "0.27 0.6 0.85", style="rounded"]; + 13[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.26 0.6 0.85", style="rounded"]; + 14[label = "cluster_gas_network", color = "0.48 0.6 0.85", style="rounded"]; + 15[label = "build_gas_network", color = "0.27 0.6 0.85", style="rounded"]; + 16[label = "retrieve_gas_infrastructure_data", color = "0.38 0.6 0.85", style="rounded"]; + 17[label = "cluster_network\nclusters: 5", color = "0.58 0.6 0.85", style="rounded"]; + 18[label = "simplify_network\nsimpl: ", color = "0.55 0.6 0.85", style="rounded"]; + 19[label = "add_electricity", color = "0.37 0.6 0.85", style="rounded"]; + 20[label = "build_renewable_profiles\ntechnology: solar", color = "0.26 0.6 0.85", style="rounded"]; + 21[label = "build_renewable_profiles\ntechnology: onwind", color = "0.26 0.6 0.85", style="rounded"]; + 22[label = "retrieve_cost_data\nyear: 2030", color = "0.14 0.6 0.85", style="rounded"]; + 23[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded"]; + 24[label = "build_electricity_demand", color = "0.61 0.6 0.85", style="rounded"]; + 25[label = "retrieve_electricity_demand", color = "0.08 0.6 0.85", style="rounded"]; + 26[label = "retrieve_synthetic_electricity_demand", color = "0.36 0.6 0.85", style="rounded"]; + 27[label = "build_gas_input_locations", color = "0.44 0.6 0.85", style="rounded"]; + 28[label = "prepare_network\nll: v1.5\nopts: ", color = "0.25 0.6 0.85", style="rounded"]; + 29[label = "add_extra_components", color = "0.39 0.6 0.85", style="rounded"]; + 30[label = "retrieve_eurostat_data", color = "0.20 0.6 0.85", style="rounded"]; + 31[label = "build_population_weighted_energy_totals\nkind: energy", color = "0.58 0.6 0.85", style="rounded"]; + 32[label = "build_energy_totals", color = "0.44 0.6 0.85", style="rounded"]; + 33[label = "retrieve_sector_databundle", color = "0.60 0.6 0.85", style="rounded"]; + 34[label = "build_clustered_population_layouts", color = "0.46 0.6 0.85", style="rounded"]; + 35[label = "build_population_layouts", color = "0.43 0.6 0.85", style="rounded"]; + 36[label = "build_population_weighted_energy_totals\nkind: heat", color = "0.58 0.6 0.85", style="rounded"]; + 37[label = "build_heat_totals", color = "0.11 0.6 0.85", style="rounded"]; + 38[label = "build_shipping_demand", color = "0.16 0.6 0.85", style="rounded"]; + 39[label = "build_transport_demand", color = "0.04 0.6 0.85", style="rounded"]; + 40[label = "build_temperature_profiles\nscope: total", color = "0.28 0.6 0.85", style="rounded"]; + 41[label = "build_biomass_potentials\nplanning_horizons: 2030", color = "0.07 0.6 0.85", style="rounded"]; + 42[label = "build_salt_cavern_potentials", color = "0.47 0.6 0.85", style="rounded"]; + 43[label = "build_simplified_population_layouts", color = "0.29 0.6 0.85", style="rounded"]; + 44[label = "build_industrial_energy_demand_per_node", color = "0.39 0.6 0.85", style="rounded"]; + 45[label = "build_industry_sector_ratios_intermediate\nplanning_horizons: 2030", color = "0.57 0.6 0.85", style="rounded"]; + 46[label = "build_industry_sector_ratios", color = "0.55 0.6 0.85", style="rounded"]; + 47[label = "build_ammonia_production", color = "0.00 0.6 0.85", style="rounded"]; + 48[label = "build_industrial_energy_demand_per_country_today", color = "0.52 0.6 0.85", style="rounded"]; + 49[label = "build_industrial_production_per_country", color = "0.19 0.6 0.85", style="rounded"]; + 50[label = "build_industrial_production_per_node", color = "0.21 0.6 0.85", style="rounded"]; + 51[label = "build_industrial_distribution_key", color = "0.10 0.6 0.85", style="rounded"]; + 52[label = "build_industrial_production_per_country_tomorrow\nplanning_horizons: 2030", color = "0.63 0.6 0.85", style="rounded"]; + 53[label = "build_industrial_energy_demand_per_node_today", color = "0.18 0.6 0.85", style="rounded"]; + 54[label = "build_hourly_heat_demand", color = "0.13 0.6 0.85", style="rounded"]; + 55[label = "build_daily_heat_demand\nscope: total", color = "0.22 0.6 0.85", style="rounded"]; + 56[label = "build_district_heat_share\nplanning_horizons: 2030", color = "0.34 0.6 0.85", style="rounded"]; + 57[label = "build_temperature_profiles\nscope: rural", color = "0.28 0.6 0.85", style="rounded"]; + 58[label = "build_temperature_profiles\nscope: urban", color = "0.28 0.6 0.85", style="rounded"]; + 59[label = "build_cop_profiles", color = "0.65 0.6 0.85", style="rounded"]; + 60[label = "build_solar_thermal_profiles\nscope: total", color = "0.54 0.6 0.85", style="rounded"]; + 61[label = "build_solar_thermal_profiles\nscope: urban", color = "0.54 0.6 0.85", style="rounded"]; + 62[label = "build_solar_thermal_profiles\nscope: rural", color = "0.54 0.6 0.85", style="rounded"]; + 63[label = "plot_power_network_clustered", color = "0.15 0.6 0.85", style="rounded"]; + 64[label = "plot_power_network", color = "0.56 0.6 0.85", style="rounded"]; + 65[label = "plot_hydrogen_network", color = "0.60 0.6 0.85", style="rounded"]; + 66[label = "plot_gas_network", color = "0.53 0.6 0.85", style="rounded"]; + 1 -> 0 + 2 -> 1 + 30 -> 1 + 33 -> 1 + 3 -> 2 + 22 -> 2 + 63 -> 2 + 64 -> 2 + 65 -> 2 + 66 -> 2 + 4 -> 3 + 5 -> 4 + 13 -> 4 + 14 -> 4 + 27 -> 4 + 28 -> 4 + 30 -> 4 + 31 -> 4 + 36 -> 4 + 38 -> 4 + 39 -> 4 + 32 -> 4 + 33 -> 4 + 41 -> 4 + 22 -> 4 + 42 -> 4 + 18 -> 4 + 17 -> 4 + 34 -> 4 + 43 -> 4 + 44 -> 4 + 54 -> 4 + 56 -> 4 + 40 -> 4 + 57 -> 4 + 58 -> 4 + 59 -> 4 + 60 -> 4 + 61 -> 4 + 62 -> 4 + 6 -> 5 + 8 -> 5 + 9 -> 5 + 10 -> 5 + 7 -> 5 + 12 -> 5 + 7 -> 6 + 8 -> 7 + 11 -> 10 + 12 -> 10 + 6 -> 13 + 8 -> 13 + 9 -> 13 + 10 -> 13 + 7 -> 13 + 12 -> 13 + 15 -> 14 + 17 -> 14 + 16 -> 15 + 18 -> 17 + 22 -> 17 + 19 -> 18 + 22 -> 18 + 6 -> 18 + 20 -> 19 + 21 -> 19 + 5 -> 19 + 13 -> 19 + 6 -> 19 + 22 -> 19 + 23 -> 19 + 8 -> 19 + 24 -> 19 + 7 -> 19 + 6 -> 20 + 8 -> 20 + 9 -> 20 + 7 -> 20 + 12 -> 20 + 6 -> 21 + 8 -> 21 + 9 -> 21 + 7 -> 21 + 12 -> 21 + 6 -> 23 + 25 -> 24 + 26 -> 24 + 16 -> 27 + 17 -> 27 + 29 -> 28 + 22 -> 28 + 17 -> 29 + 22 -> 29 + 32 -> 31 + 34 -> 31 + 7 -> 32 + 33 -> 32 + 30 -> 32 + 35 -> 34 + 17 -> 34 + 12 -> 34 + 7 -> 35 + 12 -> 35 + 37 -> 36 + 34 -> 36 + 32 -> 37 + 7 -> 38 + 17 -> 38 + 32 -> 38 + 34 -> 39 + 31 -> 39 + 32 -> 39 + 33 -> 39 + 40 -> 39 + 35 -> 40 + 17 -> 40 + 12 -> 40 + 33 -> 41 + 17 -> 41 + 8 -> 41 + 7 -> 41 + 33 -> 42 + 17 -> 42 + 35 -> 43 + 18 -> 43 + 12 -> 43 + 45 -> 44 + 50 -> 44 + 53 -> 44 + 46 -> 45 + 48 -> 45 + 49 -> 45 + 47 -> 46 + 33 -> 46 + 33 -> 47 + 33 -> 48 + 49 -> 48 + 47 -> 49 + 33 -> 49 + 30 -> 49 + 51 -> 50 + 52 -> 50 + 17 -> 51 + 34 -> 51 + 33 -> 51 + 49 -> 52 + 51 -> 53 + 48 -> 53 + 55 -> 54 + 35 -> 55 + 17 -> 55 + 12 -> 55 + 32 -> 56 + 34 -> 56 + 35 -> 57 + 17 -> 57 + 12 -> 57 + 35 -> 58 + 17 -> 58 + 12 -> 58 + 40 -> 59 + 57 -> 59 + 58 -> 59 + 35 -> 60 + 17 -> 60 + 12 -> 60 + 35 -> 61 + 17 -> 61 + 12 -> 61 + 35 -> 62 + 17 -> 62 + 12 -> 62 + 17 -> 63 + 3 -> 64 + 17 -> 64 + 3 -> 65 + 17 -> 65 + 3 -> 66 + 17 -> 66 } | @@ -320,23 +434,10 @@ To run a myopic foresight scenario with the specifications above, run snakemake -call all --configfile config/test/config.myopic.yaml -which will result in the following *additional* jobs ``snakemake`` wants to run: - -.. code:: bash - - job count min threads max threads - ------------------------------------------------ ------- ------------- ------------- - all 1 1 1 - add_brownfield 2 1 1 - add_existing_baseyear 1 1 1 - plot_network 3 1 1 - plot_summary 1 1 1 - prepare_sector_network 3 1 1 - solve_sector_network_myopic 3 1 1 - -which translates to the following workflow diagram which nicely outlines -how the sequential pathway optimisation with myopic foresight is -implemented in the workflow: +which will result in additional jobs ``snakemake`` wants to run, which +translates to the following workflow diagram which nicely outlines how the +sequential pathway optimisation with myopic foresight is implemented in the +workflow: .. graphviz:: :class: full-width @@ -346,164 +447,412 @@ implemented in the workflow: graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; - 0[label = "all", color = "0.38 0.6 0.85", style="rounded"]; - 1[label = "plot_summary", color = "0.61 0.6 0.85", style="rounded"]; - 2[label = "make_summary", color = "0.51 0.6 0.85", style="rounded"]; - 3[label = "solve_sector_network_myopic", color = "0.32 0.6 0.85", style="rounded"]; - 4[label = "add_existing_baseyear", color = "0.20 0.6 0.85", style="rounded"]; - 5[label = "prepare_sector_network", color = "0.14 0.6 0.85", style="rounded"]; - 6[label = "prepare_network", color = "0.06 0.6 0.85", style="rounded"]; - 7[label = "add_extra_components", color = "0.00 0.6 0.85", style="rounded"]; - 8[label = "cluster_network", color = "0.18 0.6 0.85", style="rounded"]; - 9[label = "simplify_network", color = "0.30 0.6 0.85", style="rounded"]; - 10[label = "add_electricity", color = "0.24 0.6 0.85", style="rounded"]; - 11[label = "build_renewable_profiles", color = "0.40 0.6 0.85", style="rounded"]; - 12[label = "base_network", color = "0.11 0.6 0.85", style="rounded"]; - 13[label = "build_shapes", color = "0.29 0.6 0.85", style="rounded"]; - 14[label = "retrieve_databundle", color = "0.58 0.6 0.85", style="rounded"]; - 15[label = "retrieve_natura_raster", color = "0.39 0.6 0.85", style="rounded"]; - 16[label = "build_bus_regions", color = "0.60 0.6 0.85", style="rounded"]; - 17[label = "build_ship_raster", color = "0.65 0.6 0.85", style="rounded"]; - 18[label = "retrieve_ship_raster", color = "0.09 0.6 0.85", style="rounded"]; - 19[label = "retrieve_cost_data", color = "0.04 0.6 0.85", style="rounded"]; - 20[label = "build_powerplants", color = "0.28 0.6 0.85", style="rounded"]; - 21[label = "build_electricity_demand", color = "0.46 0.6 0.85", style="rounded"]; - 22[label = "retrieve_electricity_demand", color = "0.44 0.6 0.85", style="rounded"]; - 23[label = "build_energy_totals", color = "0.53 0.6 0.85", style="rounded"]; - 24[label = "build_population_weighted_energy_totals", color = "0.03 0.6 0.85", style="rounded"]; - 25[label = "build_clustered_population_layouts", color = "0.34 0.6 0.85", style="rounded"]; - 26[label = "build_population_layouts", color = "0.63 0.6 0.85", style="rounded"]; - 27[label = "build_shipping_demand", color = "0.05 0.6 0.85", style="rounded"]; - 28[label = "build_transport_demand", color = "0.52 0.6 0.85", style="rounded"]; - 29[label = "build_temperature_profiles", color = "0.16 0.6 0.85", style="rounded"]; - 30[label = "build_biomass_potentials", color = "0.47 0.6 0.85", style="rounded"]; - 31[label = "build_salt_cavern_potentials", color = "0.48 0.6 0.85", style="rounded"]; - 32[label = "build_simplified_population_layouts", color = "0.08 0.6 0.85", style="rounded"]; - 33[label = "build_industrial_energy_demand_per_node", color = "0.22 0.6 0.85", style="rounded"]; - 34[label = "build_industry_sector_ratios", color = "0.56 0.6 0.85", style="rounded"]; - 35[label = "build_ammonia_production", color = "0.57 0.6 0.85", style="rounded"]; - 36[label = "build_industrial_production_per_node", color = "0.66 0.6 0.85", style="rounded"]; - 37[label = "build_industrial_distribution_key", color = "0.41 0.6 0.85", style="rounded"]; - 38[label = "build_industrial_production_per_country_tomorrow", color = "0.54 0.6 0.85", style="rounded"]; - 39[label = "build_industrial_production_per_country", color = "0.10 0.6 0.85", style="rounded"]; - 40[label = "build_industrial_energy_demand_per_node_today", color = "0.55 0.6 0.85", style="rounded"]; - 41[label = "build_industrial_energy_demand_per_country_today", color = "0.35 0.6 0.85", style="rounded"]; - 42[label = "build_heat_demands", color = "0.49 0.6 0.85", style="rounded"]; - 43[label = "build_cop_profiles", color = "0.01 0.6 0.85", style="rounded"]; - 44[label = "build_solar_thermal_profiles", color = "0.45 0.6 0.85", style="rounded"]; - 45[label = "copy_config", color = "0.33 0.6 0.85", style="rounded"]; - 46[label = "add_brownfield", color = "0.59 0.6 0.85", style="rounded"]; - 47[label = "plot_network", color = "0.15 0.6 0.85", style="rounded"]; - 1 -> 0 - 2 -> 1 - 3 -> 2 - 19 -> 2 - 47 -> 2 - 46 -> 3 - 19 -> 3 - 4 -> 3 - 45 -> 3 - 43 -> 4 - 19 -> 4 - 20 -> 4 - 9 -> 4 - 5 -> 4 - 25 -> 4 - 8 -> 4 - 28 -> 5 - 23 -> 5 - 11 -> 5 - 33 -> 5 - 24 -> 5 - 43 -> 5 - 19 -> 5 - 27 -> 5 - 6 -> 5 - 31 -> 5 - 32 -> 5 - 44 -> 5 - 9 -> 5 - 30 -> 5 - 25 -> 5 - 29 -> 5 - 42 -> 5 - 8 -> 5 - 7 -> 6 - 19 -> 6 - 19 -> 7 - 8 -> 7 - 9 -> 8 - 19 -> 8 - 10 -> 9 - 19 -> 9 - 16 -> 9 - 11 -> 10 - 19 -> 10 - 14 -> 10 - 20 -> 10 - 12 -> 10 - 21 -> 10 - 16 -> 10 - 13 -> 10 - 15 -> 11 - 14 -> 11 - 13 -> 11 - 12 -> 11 - 16 -> 11 - 17 -> 11 - 13 -> 12 - 14 -> 13 - 13 -> 16 - 12 -> 16 - 18 -> 17 - 12 -> 20 - 22 -> 21 - 13 -> 23 - 25 -> 24 - 23 -> 24 - 8 -> 25 - 26 -> 25 - 13 -> 26 - 13 -> 27 - 23 -> 27 - 8 -> 27 - 24 -> 28 - 25 -> 28 - 29 -> 28 - 23 -> 28 - 8 -> 29 - 26 -> 29 - 13 -> 30 - 14 -> 30 - 8 -> 30 - 8 -> 31 - 9 -> 32 - 26 -> 32 - 34 -> 33 - 36 -> 33 - 40 -> 33 - 35 -> 34 - 37 -> 36 - 38 -> 36 - 25 -> 37 - 8 -> 37 - 39 -> 38 - 35 -> 39 - 41 -> 40 - 37 -> 40 - 39 -> 41 - 35 -> 41 - 8 -> 42 - 26 -> 42 - 29 -> 43 - 8 -> 44 - 26 -> 44 - 3 -> 46 - 19 -> 46 - 5 -> 46 - 43 -> 46 - 3 -> 47 - 8 -> 47 + 0[label = "all", color = "0.20 0.6 0.85", style="rounded"]; + 1[label = "plot_summary", color = "0.55 0.6 0.85", style="rounded"]; + 2[label = "make_summary", color = "0.21 0.6 0.85", style="rounded"]; + 3[label = "solve_sector_network_myopic", color = "0.50 0.6 0.85", style="rounded"]; + 4[label = "add_existing_baseyear", color = "0.38 0.6 0.85", style="rounded"]; + 5[label = "prepare_sector_network\nsector_opts: 24h-T-H-B-I-A-dist1", color = "0.53 0.6 0.85", style="rounded"]; + 6[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.54 0.6 0.85", style="rounded"]; + 7[label = "base_network", color = "0.12 0.6 0.85", style="rounded"]; + 8[label = "build_shapes", color = "0.21 0.6 0.85", style="rounded"]; + 9[label = "retrieve_databundle", color = "0.41 0.6 0.85", style="rounded"]; + 10[label = "retrieve_natura_raster", color = "0.38 0.6 0.85", style="rounded"]; + 11[label = "build_ship_raster", color = "0.05 0.6 0.85", style="rounded"]; + 12[label = "retrieve_ship_raster", color = "0.25 0.6 0.85", style="rounded"]; + 13[label = "retrieve_cutout\ncutout: be-03-2013-era5", color = "0.23 0.6 0.85", style="rounded"]; + 14[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.54 0.6 0.85", style="rounded"]; + 15[label = "cluster_gas_network", color = "0.16 0.6 0.85", style="rounded"]; + 16[label = "build_gas_network", color = "0.26 0.6 0.85", style="rounded"]; + 17[label = "retrieve_gas_infrastructure_data", color = "0.04 0.6 0.85", style="rounded"]; + 18[label = "cluster_network\nclusters: 5", color = "0.10 0.6 0.85", style="rounded"]; + 19[label = "simplify_network\nsimpl: ", color = "0.02 0.6 0.85", style="rounded"]; + 20[label = "add_electricity", color = "0.56 0.6 0.85", style="rounded"]; + 21[label = "build_renewable_profiles\ntechnology: solar", color = "0.54 0.6 0.85", style="rounded"]; + 22[label = "build_renewable_profiles\ntechnology: onwind", color = "0.54 0.6 0.85", style="rounded"]; + 23[label = "retrieve_cost_data\nyear: 2030", color = "0.16 0.6 0.85", style="rounded"]; + 24[label = "build_powerplants", color = "0.63 0.6 0.85", style="rounded"]; + 25[label = "build_electricity_demand", color = "0.57 0.6 0.85", style="rounded"]; + 26[label = "retrieve_electricity_demand", color = "0.27 0.6 0.85", style="rounded"]; + 27[label = "retrieve_synthetic_electricity_demand", color = "0.58 0.6 0.85", style="rounded"]; + 28[label = "build_gas_input_locations", color = "0.28 0.6 0.85", style="rounded"]; + 29[label = "prepare_network\nll: v1.5\nopts: ", color = "0.14 0.6 0.85", style="rounded"]; + 30[label = "add_extra_components", color = "0.14 0.6 0.85", style="rounded"]; + 31[label = "retrieve_eurostat_data", color = "0.58 0.6 0.85", style="rounded"]; + 32[label = "build_population_weighted_energy_totals\nkind: energy", color = "0.36 0.6 0.85", style="rounded"]; + 33[label = "build_energy_totals", color = "0.65 0.6 0.85", style="rounded"]; + 34[label = "retrieve_sector_databundle", color = "0.46 0.6 0.85", style="rounded"]; + 35[label = "build_clustered_population_layouts", color = "0.52 0.6 0.85", style="rounded"]; + 36[label = "build_population_layouts", color = "0.13 0.6 0.85", style="rounded"]; + 37[label = "build_population_weighted_energy_totals\nkind: heat", color = "0.36 0.6 0.85", style="rounded"]; + 38[label = "build_heat_totals", color = "0.31 0.6 0.85", style="rounded"]; + 39[label = "build_shipping_demand", color = "0.01 0.6 0.85", style="rounded"]; + 40[label = "build_transport_demand", color = "0.51 0.6 0.85", style="rounded"]; + 41[label = "build_temperature_profiles\nscope: total", color = "0.00 0.6 0.85", style="rounded"]; + 42[label = "build_biomass_potentials\nplanning_horizons: 2030", color = "0.18 0.6 0.85", style="rounded"]; + 43[label = "build_salt_cavern_potentials", color = "0.25 0.6 0.85", style="rounded"]; + 44[label = "build_simplified_population_layouts", color = "0.27 0.6 0.85", style="rounded"]; + 45[label = "build_industrial_energy_demand_per_node", color = "0.30 0.6 0.85", style="rounded"]; + 46[label = "build_industry_sector_ratios_intermediate\nplanning_horizons: 2030", color = "0.41 0.6 0.85", style="rounded"]; + 47[label = "build_industry_sector_ratios", color = "0.03 0.6 0.85", style="rounded"]; + 48[label = "build_ammonia_production", color = "0.37 0.6 0.85", style="rounded"]; + 49[label = "build_industrial_energy_demand_per_country_today", color = "0.10 0.6 0.85", style="rounded"]; + 50[label = "build_industrial_production_per_country", color = "0.03 0.6 0.85", style="rounded"]; + 51[label = "build_industrial_production_per_node", color = "0.63 0.6 0.85", style="rounded"]; + 52[label = "build_industrial_distribution_key", color = "0.17 0.6 0.85", style="rounded"]; + 53[label = "build_industrial_production_per_country_tomorrow\nplanning_horizons: 2030", color = "0.06 0.6 0.85", style="rounded"]; + 54[label = "build_industrial_energy_demand_per_node_today", color = "0.08 0.6 0.85", style="rounded"]; + 55[label = "build_hourly_heat_demand", color = "0.08 0.6 0.85", style="rounded"]; + 56[label = "build_daily_heat_demand\nscope: total", color = "0.60 0.6 0.85", style="rounded"]; + 57[label = "build_district_heat_share\nplanning_horizons: 2030", color = "0.32 0.6 0.85", style="rounded"]; + 58[label = "build_temperature_profiles\nscope: rural", color = "0.00 0.6 0.85", style="rounded"]; + 59[label = "build_temperature_profiles\nscope: urban", color = "0.00 0.6 0.85", style="rounded"]; + 60[label = "build_cop_profiles", color = "0.11 0.6 0.85", style="rounded"]; + 61[label = "build_solar_thermal_profiles\nscope: total", color = "0.01 0.6 0.85", style="rounded"]; + 62[label = "build_solar_thermal_profiles\nscope: urban", color = "0.01 0.6 0.85", style="rounded"]; + 63[label = "build_solar_thermal_profiles\nscope: rural", color = "0.01 0.6 0.85", style="rounded"]; + 64[label = "build_existing_heating_distribution", color = "0.40 0.6 0.85", style="rounded"]; + 65[label = "solve_sector_network_myopic", color = "0.50 0.6 0.85", style="rounded"]; + 66[label = "add_brownfield", color = "0.45 0.6 0.85", style="rounded"]; + 67[label = "prepare_sector_network\nsector_opts: 24h-T-H-B-I-A-dist1", color = "0.53 0.6 0.85", style="rounded"]; + 68[label = "build_biomass_potentials\nplanning_horizons: 2040", color = "0.18 0.6 0.85", style="rounded"]; + 69[label = "retrieve_cost_data\nyear: 2040", color = "0.16 0.6 0.85", style="rounded"]; + 70[label = "build_industrial_energy_demand_per_node", color = "0.30 0.6 0.85", style="rounded"]; + 71[label = "build_industry_sector_ratios_intermediate\nplanning_horizons: 2040", color = "0.41 0.6 0.85", style="rounded"]; + 72[label = "build_industrial_production_per_node", color = "0.63 0.6 0.85", style="rounded"]; + 73[label = "build_industrial_production_per_country_tomorrow\nplanning_horizons: 2040", color = "0.06 0.6 0.85", style="rounded"]; + 74[label = "build_district_heat_share\nplanning_horizons: 2040", color = "0.32 0.6 0.85", style="rounded"]; + 75[label = "solve_sector_network_myopic", color = "0.50 0.6 0.85", style="rounded"]; + 76[label = "add_brownfield", color = "0.45 0.6 0.85", style="rounded"]; + 77[label = "prepare_sector_network\nsector_opts: 24h-T-H-B-I-A-dist1", color = "0.53 0.6 0.85", style="rounded"]; + 78[label = "build_biomass_potentials\nplanning_horizons: 2050", color = "0.18 0.6 0.85", style="rounded"]; + 79[label = "retrieve_cost_data\nyear: 2050", color = "0.16 0.6 0.85", style="rounded"]; + 80[label = "build_industrial_energy_demand_per_node", color = "0.30 0.6 0.85", style="rounded"]; + 81[label = "build_industry_sector_ratios_intermediate\nplanning_horizons: 2050", color = "0.41 0.6 0.85", style="rounded"]; + 82[label = "build_industrial_production_per_node", color = "0.63 0.6 0.85", style="rounded"]; + 83[label = "build_industrial_production_per_country_tomorrow\nplanning_horizons: 2050", color = "0.06 0.6 0.85", style="rounded"]; + 84[label = "build_district_heat_share\nplanning_horizons: 2050", color = "0.32 0.6 0.85", style="rounded"]; + 85[label = "plot_power_network_clustered", color = "0.09 0.6 0.85", style="rounded"]; + 86[label = "plot_power_network", color = "0.43 0.6 0.85", style="rounded"]; + 87[label = "plot_power_network", color = "0.43 0.6 0.85", style="rounded"]; + 88[label = "plot_power_network", color = "0.43 0.6 0.85", style="rounded"]; + 89[label = "plot_hydrogen_network", color = "0.33 0.6 0.85", style="rounded"]; + 90[label = "plot_hydrogen_network", color = "0.33 0.6 0.85", style="rounded"]; + 91[label = "plot_hydrogen_network", color = "0.33 0.6 0.85", style="rounded"]; + 1 -> 0 + 2 -> 1 + 31 -> 1 + 34 -> 1 + 3 -> 2 + 65 -> 2 + 75 -> 2 + 23 -> 2 + 85 -> 2 + 86 -> 2 + 87 -> 2 + 88 -> 2 + 89 -> 2 + 90 -> 2 + 91 -> 2 + 4 -> 3 + 23 -> 3 + 5 -> 4 + 24 -> 4 + 19 -> 4 + 18 -> 4 + 35 -> 4 + 23 -> 4 + 60 -> 4 + 64 -> 4 + 6 -> 5 + 14 -> 5 + 15 -> 5 + 28 -> 5 + 29 -> 5 + 31 -> 5 + 32 -> 5 + 37 -> 5 + 39 -> 5 + 40 -> 5 + 33 -> 5 + 34 -> 5 + 42 -> 5 + 23 -> 5 + 43 -> 5 + 19 -> 5 + 18 -> 5 + 35 -> 5 + 44 -> 5 + 45 -> 5 + 55 -> 5 + 57 -> 5 + 41 -> 5 + 58 -> 5 + 59 -> 5 + 60 -> 5 + 61 -> 5 + 62 -> 5 + 63 -> 5 + 7 -> 6 + 9 -> 6 + 10 -> 6 + 11 -> 6 + 8 -> 6 + 13 -> 6 + 8 -> 7 + 9 -> 8 + 12 -> 11 + 13 -> 11 + 7 -> 14 + 9 -> 14 + 10 -> 14 + 11 -> 14 + 8 -> 14 + 13 -> 14 + 16 -> 15 + 18 -> 15 + 17 -> 16 + 19 -> 18 + 23 -> 18 + 20 -> 19 + 23 -> 19 + 7 -> 19 + 21 -> 20 + 22 -> 20 + 6 -> 20 + 14 -> 20 + 7 -> 20 + 23 -> 20 + 24 -> 20 + 9 -> 20 + 25 -> 20 + 8 -> 20 + 7 -> 21 + 9 -> 21 + 10 -> 21 + 8 -> 21 + 13 -> 21 + 7 -> 22 + 9 -> 22 + 10 -> 22 + 8 -> 22 + 13 -> 22 + 7 -> 24 + 26 -> 25 + 27 -> 25 + 17 -> 28 + 18 -> 28 + 30 -> 29 + 23 -> 29 + 18 -> 30 + 23 -> 30 + 33 -> 32 + 35 -> 32 + 8 -> 33 + 34 -> 33 + 31 -> 33 + 36 -> 35 + 18 -> 35 + 13 -> 35 + 8 -> 36 + 13 -> 36 + 38 -> 37 + 35 -> 37 + 33 -> 38 + 8 -> 39 + 18 -> 39 + 33 -> 39 + 35 -> 40 + 32 -> 40 + 33 -> 40 + 34 -> 40 + 41 -> 40 + 36 -> 41 + 18 -> 41 + 13 -> 41 + 34 -> 42 + 18 -> 42 + 9 -> 42 + 8 -> 42 + 34 -> 43 + 18 -> 43 + 36 -> 44 + 19 -> 44 + 13 -> 44 + 46 -> 45 + 51 -> 45 + 54 -> 45 + 47 -> 46 + 49 -> 46 + 50 -> 46 + 48 -> 47 + 34 -> 47 + 34 -> 48 + 34 -> 49 + 50 -> 49 + 48 -> 50 + 34 -> 50 + 31 -> 50 + 52 -> 51 + 53 -> 51 + 18 -> 52 + 35 -> 52 + 34 -> 52 + 50 -> 53 + 52 -> 54 + 49 -> 54 + 56 -> 55 + 36 -> 56 + 18 -> 56 + 13 -> 56 + 33 -> 57 + 35 -> 57 + 36 -> 58 + 18 -> 58 + 13 -> 58 + 36 -> 59 + 18 -> 59 + 13 -> 59 + 41 -> 60 + 58 -> 60 + 59 -> 60 + 36 -> 61 + 18 -> 61 + 13 -> 61 + 36 -> 62 + 18 -> 62 + 13 -> 62 + 36 -> 63 + 18 -> 63 + 13 -> 63 + 35 -> 64 + 32 -> 64 + 57 -> 64 + 66 -> 65 + 69 -> 65 + 21 -> 66 + 22 -> 66 + 6 -> 66 + 14 -> 66 + 19 -> 66 + 18 -> 66 + 67 -> 66 + 3 -> 66 + 69 -> 66 + 60 -> 66 + 6 -> 67 + 14 -> 67 + 15 -> 67 + 28 -> 67 + 29 -> 67 + 31 -> 67 + 32 -> 67 + 37 -> 67 + 39 -> 67 + 40 -> 67 + 33 -> 67 + 34 -> 67 + 68 -> 67 + 69 -> 67 + 43 -> 67 + 19 -> 67 + 18 -> 67 + 35 -> 67 + 44 -> 67 + 70 -> 67 + 55 -> 67 + 74 -> 67 + 41 -> 67 + 58 -> 67 + 59 -> 67 + 60 -> 67 + 61 -> 67 + 62 -> 67 + 63 -> 67 + 34 -> 68 + 18 -> 68 + 9 -> 68 + 8 -> 68 + 71 -> 70 + 72 -> 70 + 54 -> 70 + 47 -> 71 + 49 -> 71 + 50 -> 71 + 52 -> 72 + 73 -> 72 + 50 -> 73 + 33 -> 74 + 35 -> 74 + 76 -> 75 + 79 -> 75 + 21 -> 76 + 22 -> 76 + 6 -> 76 + 14 -> 76 + 19 -> 76 + 18 -> 76 + 77 -> 76 + 65 -> 76 + 79 -> 76 + 60 -> 76 + 6 -> 77 + 14 -> 77 + 15 -> 77 + 28 -> 77 + 29 -> 77 + 31 -> 77 + 32 -> 77 + 37 -> 77 + 39 -> 77 + 40 -> 77 + 33 -> 77 + 34 -> 77 + 78 -> 77 + 79 -> 77 + 43 -> 77 + 19 -> 77 + 18 -> 77 + 35 -> 77 + 44 -> 77 + 80 -> 77 + 55 -> 77 + 84 -> 77 + 41 -> 77 + 58 -> 77 + 59 -> 77 + 60 -> 77 + 61 -> 77 + 62 -> 77 + 63 -> 77 + 34 -> 78 + 18 -> 78 + 9 -> 78 + 8 -> 78 + 81 -> 80 + 82 -> 80 + 54 -> 80 + 47 -> 81 + 49 -> 81 + 50 -> 81 + 52 -> 82 + 83 -> 82 + 50 -> 83 + 33 -> 84 + 35 -> 84 + 18 -> 85 + 3 -> 86 + 18 -> 86 + 65 -> 87 + 18 -> 87 + 75 -> 88 + 18 -> 88 + 3 -> 89 + 18 -> 89 + 65 -> 90 + 18 -> 90 + 75 -> 91 + 18 -> 91 } | diff --git a/doc/validation.rst b/doc/validation.rst index 7049e3de..afe7a7f3 100644 --- a/doc/validation.rst +++ b/doc/validation.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -9,7 +9,7 @@ Validation The PyPSA-Eur model workflow provides a built-in mechanism for validation. This allows users to contrast the outcomes of network optimization against the historical behaviour of the European power system. The snakemake rule ``validate_elec_networks`` enables this by generating comparative figures that encapsulate key data points such as dispatch carrier, cross-border flows, and market prices per price zone. -These comparisons utilize data from the 2019 ENTSO-E Transparency Platform. To enable this, an ENTSO-E API key must be inserted into the ``config.yaml`` file. Detailed steps for this process can be found in the user guide `here `_. +These comparisons utilize data from the 2019 ENTSO-E Transparency Platform. To enable this, an ENTSO-E API key must be inserted into the ``config.yaml`` file. Detailed steps for this process can be found in the user guide `here `__. Once the API key is set, the validation workflow can be triggered by running the following command: diff --git a/doc/wildcards.rst b/doc/wildcards.rst index 167a2ded..16681e3d 100644 --- a/doc/wildcards.rst +++ b/doc/wildcards.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -17,7 +17,7 @@ what data to retrieve and what files to produce. .. note:: Detailed explanations of how wildcards work in ``snakemake`` can be found in the - `relevant section of the documentation `_. + `relevant section of the documentation `__. .. _cutout_wc: diff --git a/envs/environment.fixed.yaml b/envs/environment.fixed.yaml index 31a58835..260c17fd 100644 --- a/envs/environment.fixed.yaml +++ b/envs/environment.fixed.yaml @@ -5,492 +5,468 @@ name: pypsa-eur channels: - bioconda -- gurobi - http://conda.anaconda.org/gurobi - conda-forge -- defaults dependencies: -- _libgcc_mutex=0.1 -- _openmp_mutex=4.5 -- affine=2.4.0 -- alsa-lib=1.2.10 -- ampl-mp=3.1.0 -- amply=0.1.6 -- anyio=4.2.0 -- appdirs=1.4.4 -- argon2-cffi=23.1.0 -- argon2-cffi-bindings=21.2.0 -- arrow=1.3.0 -- asttokens=2.4.1 -- async-lru=2.0.4 -- atk-1.0=2.38.0 -- atlite=0.2.12 -- attr=2.5.1 -- attrs=23.2.0 -- aws-c-auth=0.7.8 -- aws-c-cal=0.6.9 -- aws-c-common=0.9.10 -- aws-c-compression=0.2.17 -- aws-c-event-stream=0.3.2 -- aws-c-http=0.7.15 -- aws-c-io=0.13.36 -- aws-c-mqtt=0.10.0 -- aws-c-s3=0.4.7 -- aws-c-sdkutils=0.1.13 -- aws-checksums=0.1.17 -- aws-crt-cpp=0.25.1 -- aws-sdk-cpp=1.11.210 -- babel=2.14.0 -- beautifulsoup4=4.12.2 -- bleach=6.1.0 -- blosc=1.21.5 -- bokeh=3.3.2 -- bottleneck=1.3.7 -- branca=0.7.0 -- brotli=1.1.0 -- brotli-bin=1.1.0 -- brotli-python=1.1.0 -- bzip2=1.0.8 -- c-ares=1.24.0 -- c-blosc2=2.12.0 -- ca-certificates=2023.11.17 -- cached-property=1.5.2 -- cached_property=1.5.2 -- cairo=1.18.0 -- cartopy=0.22.0 -- cdsapi=0.6.1 -- certifi=2023.11.17 -- cffi=1.16.0 -- cfitsio=4.3.1 -- cftime=1.6.3 -- charset-normalizer=3.3.2 -- click=8.1.7 -- click-plugins=1.1.1 -- cligj=0.7.2 -- cloudpickle=3.0.0 -- coin-or-cbc=2.10.10 -- coin-or-cgl=0.60.7 -- coin-or-clp=1.17.8 -- coin-or-osi=0.108.8 -- coin-or-utils=2.11.9 -- coincbc=2.10.10 -- colorama=0.4.6 -- comm=0.1.4 -- configargparse=1.7 -- connection_pool=0.0.3 -- contourpy=1.2.0 -- country_converter=1.2 -- cycler=0.12.1 -- cytoolz=0.12.2 -- dask=2023.12.1 -- dask-core=2023.12.1 -- datrie=0.8.2 -- dbus=1.13.6 -- debugpy=1.8.0 -- decorator=5.1.1 -- defusedxml=0.7.1 -- deprecation=2.1.0 -- descartes=1.1.0 -- distributed=2023.12.1 -- distro=1.8.0 -- docutils=0.20.1 -- dpath=2.1.6 -- entrypoints=0.4 -- entsoe-py=0.6.1 -- et_xmlfile=1.1.0 -- exceptiongroup=1.2.0 -- executing=2.0.1 -- expat=2.5.0 -- fiona=1.9.5 -- folium=0.15.1 -- font-ttf-dejavu-sans-mono=2.37 -- font-ttf-inconsolata=3.000 -- font-ttf-source-code-pro=2.038 -- font-ttf-ubuntu=0.83 -- fontconfig=2.14.2 -- fonts-conda-ecosystem=1 -- fonts-conda-forge=1 -- fonttools=4.47.0 -- fqdn=1.5.1 -- freetype=2.12.1 -- freexl=2.0.0 -- fribidi=1.0.10 -- fsspec=2023.12.2 -- gdal=3.7.3 -- gdk-pixbuf=2.42.10 -- geographiclib=1.52 -- geojson-rewind=1.1.0 -- geopandas=0.14.1 -- geopandas-base=0.14.1 -- geopy=2.4.1 -- geos=3.12.1 -- geotiff=1.7.1 -- gettext=0.21.1 -- gflags=2.2.2 -- giflib=5.2.1 -- gitdb=4.0.11 -- gitpython=3.1.40 -- glib=2.78.3 -- glib-tools=2.78.3 -- glog=0.6.0 -- glpk=5.0 -- gmp=6.3.0 -- graphite2=1.3.13 -- graphviz=9.0.0 -- gst-plugins-base=1.22.8 -- gstreamer=1.22.8 -- gtk2=2.24.33 -- gts=0.7.6 -- gurobi=11.0.0 -- harfbuzz=8.3.0 -- hdf4=4.2.15 -- hdf5=1.14.3 -- humanfriendly=10.0 -- icu=73.2 -- idna=3.6 -- importlib-metadata=7.0.1 -- importlib_metadata=7.0.1 -- importlib_resources=6.1.1 -- iniconfig=2.0.0 -- ipopt=3.14.13 -- ipykernel=6.28.0 -- ipython=8.19.0 -- ipywidgets=8.1.1 -- isoduration=20.11.0 -- jedi=0.19.1 -- jinja2=3.1.2 -- joblib=1.3.2 -- json-c=0.17 -- json5=0.9.14 -- jsonpointer=2.4 -- jsonschema=4.20.0 -- jsonschema-specifications=2023.12.1 -- jsonschema-with-format-nongpl=4.20.0 -- jupyter=1.0.0 -- jupyter-lsp=2.2.1 -- jupyter_client=8.6.0 -- jupyter_console=6.6.3 -- jupyter_core=5.6.1 -- jupyter_events=0.9.0 -- jupyter_server=2.12.1 -- jupyter_server_terminals=0.5.1 -- jupyterlab=4.0.10 -- jupyterlab_pygments=0.3.0 -- jupyterlab_server=2.25.2 -- jupyterlab_widgets=3.0.9 -- kealib=1.5.3 -- keyutils=1.6.1 -- kiwisolver=1.4.5 -- krb5=1.21.2 -- lame=3.100 -- lcms2=2.16 -- ld_impl_linux-64=2.40 -- lerc=4.0.0 -- libabseil=20230802.1 -- libaec=1.1.2 -- libarchive=3.7.2 -- libarrow=14.0.2 -- libarrow-acero=14.0.2 -- libarrow-dataset=14.0.2 -- libarrow-flight=14.0.2 -- libarrow-flight-sql=14.0.2 -- libarrow-gandiva=14.0.2 -- libarrow-substrait=14.0.2 -- libblas=3.9.0 -- libboost-headers=1.84.0 -- libbrotlicommon=1.1.0 -- libbrotlidec=1.1.0 -- libbrotlienc=1.1.0 -- libcap=2.69 -- libcblas=3.9.0 -- libclang=15.0.7 -- libclang13=15.0.7 -- libcrc32c=1.1.2 -- libcups=2.3.3 -- libcurl=8.5.0 -- libdeflate=1.19 -- libedit=3.1.20191231 -- libev=4.33 -- libevent=2.1.12 -- libexpat=2.5.0 -- libffi=3.4.2 -- libflac=1.4.3 -- libgcc-ng=13.2.0 -- libgcrypt=1.10.3 -- libgd=2.3.3 -- libgdal=3.7.3 -- libgfortran-ng=13.2.0 -- libgfortran5=13.2.0 -- libglib=2.78.3 -- libgomp=13.2.0 -- libgoogle-cloud=2.12.0 -- libgpg-error=1.47 -- libgrpc=1.59.3 -- libhwloc=2.9.1 -- libiconv=1.17 -- libjpeg-turbo=3.0.0 -- libkml=1.3.0 -- liblapack=3.9.0 -- liblapacke=3.9.0 -- libllvm15=15.0.7 -- libnetcdf=4.9.2 -- libnghttp2=1.58.0 -- libnl=3.9.0 -- libnsl=2.0.1 -- libnuma=2.0.16 -- libogg=1.3.4 -- libopenblas=0.3.25 -- libopus=1.3.1 -- libparquet=14.0.2 -- libpng=1.6.39 -- libpq=16.1 -- libprotobuf=4.24.4 -- libre2-11=2023.06.02 -- librsvg=2.56.3 -- librttopo=1.1.0 -- libsndfile=1.2.2 -- libsodium=1.0.18 -- libspatialindex=1.9.3 -- libspatialite=5.1.0 -- libspral=2023.08.02 -- libsqlite=3.44.2 -- libssh2=1.11.0 -- libstdcxx-ng=13.2.0 -- libsystemd0=255 -- libthrift=0.19.0 -- libtiff=4.6.0 -- libutf8proc=2.8.0 -- libuuid=2.38.1 -- libvorbis=1.3.7 -- libwebp=1.3.2 -- libwebp-base=1.3.2 -- libxcb=1.15 -- libxcrypt=4.4.36 -- libxkbcommon=1.6.0 -- libxml2=2.11.6 -- libxslt=1.1.37 -- libzip=1.10.1 -- libzlib=1.2.13 -- linopy=0.3.2 -- locket=1.0.0 -- lxml=4.9.3 -- lz4=4.3.2 -- lz4-c=1.9.4 -- lzo=2.10 -- mapclassify=2.6.1 -- markupsafe=2.1.3 -- matplotlib=3.8.2 -- matplotlib-base=3.8.2 -- matplotlib-inline=0.1.6 -- memory_profiler=0.61.0 -- metis=5.1.0 -- minizip=4.0.4 -- mistune=3.0.2 -- mpg123=1.32.3 -- msgpack-python=1.0.7 -- mumps-include=5.2.1 -- mumps-seq=5.2.1 -- munch=4.0.0 -- munkres=1.1.4 -- mysql-common=8.0.33 -- mysql-libs=8.0.33 -- nbclient=0.8.0 -- nbconvert=7.14.0 -- nbconvert-core=7.14.0 -- nbconvert-pandoc=7.14.0 -- nbformat=5.9.2 -- ncurses=6.4 -- nest-asyncio=1.5.8 -- netcdf4=1.6.5 -- networkx=3.2.1 -- nomkl=1.0 -- notebook=7.0.6 -- notebook-shim=0.2.3 -- nspr=4.35 -- nss=3.96 -- numexpr=2.8.8 -- numpy=1.26.2 -- openjdk=21.0.1 -- openjpeg=2.5.0 -- openpyxl=3.1.2 -- openssl=3.2.0 -- orc=1.9.2 -- overrides=7.4.0 -- packaging=23.2 -- pandas=2.1.4 -- pandoc=3.1.3 -- pandocfilters=1.5.0 -- pango=1.50.14 -- parso=0.8.3 -- partd=1.4.1 -- patsy=0.5.5 -- pcre2=10.42 -- pexpect=4.8.0 -- pickleshare=0.7.5 -- pillow=10.2.0 -- pip=23.3.2 -- pixman=0.42.2 -- pkgutil-resolve-name=1.3.10 -- plac=1.4.2 -- platformdirs=4.1.0 -- pluggy=1.3.0 -- ply=3.11 -- poppler=23.12.0 -- poppler-data=0.4.12 -- postgresql=16.1 -- powerplantmatching=0.5.8 -- progressbar2=4.3.2 -- proj=9.3.0 -- prometheus_client=0.19.0 -- prompt-toolkit=3.0.42 -- prompt_toolkit=3.0.42 -- psutil=5.9.7 -- pthread-stubs=0.4 -- ptyprocess=0.7.0 -- pulp=2.7.0 -- pulseaudio-client=16.1 -- pure_eval=0.2.2 -- py-cpuinfo=9.0.0 -- pyarrow=14.0.2 -- pyarrow-hotfix=0.6 -- pycountry=22.3.5 -- pycparser=2.21 -- pygments=2.17.2 -- pyomo=6.6.1 -- pyparsing=3.1.1 -- pyproj=3.6.1 -- pypsa=0.26.2 -- pyqt=5.15.9 -- pyqt5-sip=12.12.2 -- pyshp=2.3.1 -- pysocks=1.7.1 -- pytables=3.9.2 -- pytest=7.4.4 -- python=3.11.7 -- python-dateutil=2.8.2 -- python-fastjsonschema=2.19.1 -- python-json-logger=2.0.7 -- python-tzdata=2023.4 -- python-utils=3.8.1 -- python_abi=3.11 -- pytz=2023.3.post1 -- pyxlsb=1.0.10 -- pyyaml=6.0.1 -- pyzmq=25.1.2 -- qt-main=5.15.8 -- qtconsole-base=5.5.1 -- qtpy=2.4.1 -- rasterio=1.3.9 -- rdma-core=49.0 -- re2=2023.06.02 -- readline=8.2 -- referencing=0.32.0 -- requests=2.31.0 -- reretry=0.11.8 -- rfc3339-validator=0.1.4 -- rfc3986-validator=0.1.1 -- rioxarray=0.15.0 -- rpds-py=0.16.2 -- rtree=1.1.0 -- s2n=1.4.1 -- scikit-learn=1.3.2 -- scipy=1.11.4 -- scotch=6.0.9 -- seaborn=0.13.0 -- seaborn-base=0.13.0 -- send2trash=1.8.2 -- setuptools=69.0.3 -- setuptools-scm=8.0.4 -- setuptools_scm=8.0.4 -- shapely=2.0.2 -- sip=6.7.12 -- six=1.16.0 -- smart_open=6.4.0 -- smmap=5.0.0 -- snakemake-minimal=7.32.4 -- snappy=1.1.10 -- sniffio=1.3.0 -- snuggs=1.4.7 -- sortedcontainers=2.4.0 -- soupsieve=2.5 -- sqlite=3.44.2 -- stack_data=0.6.2 -- statsmodels=0.14.1 -- stopit=1.1.2 -- tabula-py=2.7.0 -- tabulate=0.9.0 -- tblib=3.0.0 -- terminado=0.18.0 -- threadpoolctl=3.2.0 -- throttler=1.2.2 -- tiledb=2.18.2 -- tinycss2=1.2.1 -- tk=8.6.13 -- toml=0.10.2 -- tomli=2.0.1 -- toolz=0.12.0 -- toposort=1.10 -- tornado=6.3.3 -- tqdm=4.66.1 -- traitlets=5.14.1 -- types-python-dateutil=2.8.19.14 -- typing-extensions=4.9.0 -- typing_extensions=4.9.0 -- typing_utils=0.1.0 -- tzcode=2023d -- tzdata=2023d -- ucx=1.15.0 -- unidecode=1.3.7 -- unixodbc=2.3.12 -- uri-template=1.3.0 -- uriparser=0.9.7 -- urllib3=2.1.0 -- validators=0.22.0 -- wcwidth=0.2.12 -- webcolors=1.13 -- webencodings=0.5.1 -- websocket-client=1.7.0 -- wheel=0.42.0 -- widgetsnbextension=4.0.9 -- wrapt=1.16.0 -- xarray=2023.12.0 -- xcb-util=0.4.0 -- xcb-util-image=0.4.0 -- xcb-util-keysyms=0.4.0 -- xcb-util-renderutil=0.3.9 -- xcb-util-wm=0.4.1 -- xerces-c=3.2.5 -- xkeyboard-config=2.40 -- xlrd=2.0.1 -- xorg-fixesproto=5.0 -- xorg-inputproto=2.3.2 -- xorg-kbproto=1.0.7 -- xorg-libice=1.1.1 -- xorg-libsm=1.2.4 -- xorg-libx11=1.8.7 -- xorg-libxau=1.0.11 -- xorg-libxdmcp=1.1.3 -- xorg-libxext=1.3.4 -- xorg-libxfixes=5.0.3 -- xorg-libxi=1.7.10 -- xorg-libxrender=0.9.11 -- xorg-libxt=1.3.0 -- xorg-libxtst=1.2.3 -- xorg-recordproto=1.14.2 -- xorg-renderproto=0.11.1 -- xorg-xextproto=7.3.0 -- xorg-xf86vidmodeproto=2.3.1 -- xorg-xproto=7.0.31 -- xyzservices=2023.10.1 -- xz=5.2.6 -- yaml=0.2.5 -- yte=1.5.4 -- zeromq=4.3.5 -- zict=3.0.0 -- zipp=3.17.0 -- zlib=1.2.13 -- zlib-ng=2.0.7 -- zstd=1.5.5 +- _libgcc_mutex=0.1=conda_forge +- _openmp_mutex=4.5=2_gnu +- affine=2.4.0=pyhd8ed1ab_0 +- alsa-lib=1.2.11=hd590300_1 +- ampl-mp=3.1.0=h2cc385e_1006 +- amply=0.1.6=pyhd8ed1ab_0 +- appdirs=1.4.4=pyh9f0ad1d_0 +- argparse-dataclass=2.0.0=pyhd8ed1ab_0 +- asttokens=2.4.1=pyhd8ed1ab_0 +- atk-1.0=2.38.0=h04ea711_2 +- atlite=0.2.12=pyhd8ed1ab_0 +- attr=2.5.1=h166bdaf_1 +- attrs=23.2.0=pyh71513ae_0 +- aws-c-auth=0.7.18=he0b1f16_0 +- aws-c-cal=0.6.11=heb1d5e4_0 +- aws-c-common=0.9.15=hd590300_0 +- aws-c-compression=0.2.18=hce8ee76_3 +- aws-c-event-stream=0.4.2=h01f5eca_8 +- aws-c-http=0.8.1=hdb68c23_10 +- aws-c-io=0.14.7=hbfbeace_6 +- aws-c-mqtt=0.10.4=h50844eb_0 +- aws-c-s3=0.5.7=h6be9164_2 +- aws-c-sdkutils=0.1.15=hce8ee76_3 +- aws-checksums=0.1.18=hce8ee76_3 +- aws-crt-cpp=0.26.8=h2150271_2 +- aws-sdk-cpp=1.11.267=hddb5a97_7 +- azure-core-cpp=1.11.1=h91d86a7_1 +- azure-identity-cpp=1.6.0=hf1915f5_1 +- azure-storage-blobs-cpp=12.10.0=h00ab1b0_1 +- azure-storage-common-cpp=12.5.0=h94269e2_4 +- beautifulsoup4=4.12.3=pyha770c72_0 +- blosc=1.21.5=hc2324a3_1 +- bokeh=3.4.1=pyhd8ed1ab_0 +- bottleneck=1.3.8=py311h1f0f07a_0 +- branca=0.7.2=pyhd8ed1ab_0 +- brotli=1.1.0=hd590300_1 +- brotli-bin=1.1.0=hd590300_1 +- brotli-python=1.1.0=py311hb755f60_1 +- bzip2=1.0.8=hd590300_5 +- c-ares=1.28.1=hd590300_0 +- c-blosc2=2.14.4=hb4ffafa_1 +- ca-certificates=2024.2.2=hbcca054_0 +- cads-api-client=1.0.0=pyhd8ed1ab_0 +- cairo=1.18.0=h3faef2a_0 +- cartopy=0.23.0=py311h320fe9a_0 +- cdsapi=0.7.0=pyhd8ed1ab_0 +- certifi=2024.2.2=pyhd8ed1ab_0 +- cffi=1.16.0=py311hb3a22ac_0 +- cfgv=3.3.1=pyhd8ed1ab_0 +- cfitsio=4.4.0=hbdc6101_1 +- cftime=1.6.3=py311h1f0f07a_0 +- charset-normalizer=3.3.2=pyhd8ed1ab_0 +- click=8.1.7=unix_pyh707e725_0 +- click-plugins=1.1.1=py_0 +- cligj=0.7.2=pyhd8ed1ab_1 +- cloudpickle=3.0.0=pyhd8ed1ab_0 +- coin-or-cbc=2.10.10=h9002f0b_0 +- coin-or-cgl=0.60.7=h516709c_0 +- coin-or-clp=1.17.8=h1ee7a9c_0 +- coin-or-osi=0.108.10=haf5fa05_0 +- coin-or-utils=2.11.11=hee58242_0 +- coincbc=2.10.10=0_metapackage +- colorama=0.4.6=pyhd8ed1ab_0 +- conda-inject=1.3.1=pyhd8ed1ab_0 +- configargparse=1.7=pyhd8ed1ab_0 +- connection_pool=0.0.3=pyhd3deb0d_0 +- contourpy=1.2.1=py311h9547e67_0 +- country_converter=1.2=pyhd8ed1ab_0 +- cppad=20240000.4=h59595ed_0 +- cycler=0.12.1=pyhd8ed1ab_0 +- cytoolz=0.12.3=py311h459d7ec_0 +- dask=2024.4.2=pyhd8ed1ab_0 +- dask-core=2024.4.2=pyhd8ed1ab_0 +- dask-expr=1.0.14=pyhd8ed1ab_0 +- datrie=0.8.2=py311h459d7ec_7 +- dbus=1.13.6=h5008d03_3 +- decorator=5.1.1=pyhd8ed1ab_0 +- deprecation=2.1.0=pyh9f0ad1d_0 +- descartes=1.1.0=py_4 +- distlib=0.3.8=pyhd8ed1ab_0 +- distributed=2024.4.2=pyhd8ed1ab_0 +- distro=1.9.0=pyhd8ed1ab_0 +- docutils=0.21.2=pyhd8ed1ab_0 +- dpath=2.1.6=pyha770c72_0 +- entsoe-py=0.6.7=pyhd8ed1ab_0 +- et_xmlfile=1.1.0=pyhd8ed1ab_0 +- exceptiongroup=1.2.0=pyhd8ed1ab_2 +- executing=2.0.1=pyhd8ed1ab_0 +- expat=2.6.2=h59595ed_0 +- filelock=3.14.0=pyhd8ed1ab_0 +- fiona=1.9.6=py311hf8e0aa6_0 +- fmt=10.2.1=h00ab1b0_0 +- folium=0.16.0=pyhd8ed1ab_0 +- font-ttf-dejavu-sans-mono=2.37=hab24e00_0 +- font-ttf-inconsolata=3.000=h77eed37_0 +- font-ttf-source-code-pro=2.038=h77eed37_0 +- font-ttf-ubuntu=0.83=h77eed37_2 +- fontconfig=2.14.2=h14ed4e7_0 +- fonts-conda-ecosystem=1=0 +- fonts-conda-forge=1=0 +- fonttools=4.51.0=py311h459d7ec_0 +- freetype=2.12.1=h267a509_2 +- freexl=2.0.0=h743c826_0 +- fribidi=1.0.10=h36c2ea0_0 +- fsspec=2024.3.1=pyhca7485f_0 +- gdal=3.8.5=py311hd032c08_2 +- gdk-pixbuf=2.42.11=hb9ae30d_0 +- geographiclib=2.0=pyhd8ed1ab_0 +- geojson-rewind=1.1.0=pyhd8ed1ab_0 +- geopandas=0.14.4=pyhd8ed1ab_0 +- geopandas-base=0.14.4=pyha770c72_0 +- geopy=2.4.1=pyhd8ed1ab_1 +- geos=3.12.1=h59595ed_0 +- geotiff=1.7.1=h6cf1f90_16 +- gettext=0.22.5=h59595ed_2 +- gettext-tools=0.22.5=h59595ed_2 +- gflags=2.2.2=he1b5a44_1004 +- giflib=5.2.2=hd590300_0 +- gitdb=4.0.11=pyhd8ed1ab_0 +- gitpython=3.1.43=pyhd8ed1ab_0 +- glib=2.80.0=hf2295e7_6 +- glib-tools=2.80.0=hde27a5a_6 +- glog=0.7.0=hed5481d_0 +- glpk=5.0=h445213a_0 +- gmp=6.3.0=h59595ed_1 +- graphite2=1.3.13=h59595ed_1003 +- graphviz=9.0.0=h78e8752_1 +- gst-plugins-base=1.24.1=hfa15dee_2 +- gstreamer=1.24.1=h98fc4e7_2 +- gtk2=2.24.33=h280cfa0_4 +- gts=0.7.6=h977cf35_4 +- harfbuzz=8.4.0=h3d44ed6_0 +- hdf4=4.2.15=h2a13503_7 +- hdf5=1.14.3=nompi_h4f84152_101 +- humanfriendly=10.0=pyhd8ed1ab_6 +- icu=73.2=h59595ed_0 +- identify=2.5.36=pyhd8ed1ab_0 +- idna=3.7=pyhd8ed1ab_0 +- immutables=0.20=py311h459d7ec_1 +- importlib-metadata=7.1.0=pyha770c72_0 +- importlib_metadata=7.1.0=hd8ed1ab_0 +- importlib_resources=6.4.0=pyhd8ed1ab_0 +- iniconfig=2.0.0=pyhd8ed1ab_0 +- ipopt=3.14.16=hf967516_0 +- ipython=8.22.2=pyh707e725_0 +- jedi=0.19.1=pyhd8ed1ab_0 +- jinja2=3.1.3=pyhd8ed1ab_0 +- joblib=1.4.2=pyhd8ed1ab_0 +- json-c=0.17=h7ab15ed_0 +- jsonschema=4.22.0=pyhd8ed1ab_0 +- jsonschema-specifications=2023.12.1=pyhd8ed1ab_0 +- jupyter_core=5.7.2=py311h38be061_0 +- kealib=1.5.3=h2f55d51_0 +- keyutils=1.6.1=h166bdaf_0 +- kiwisolver=1.4.5=py311h9547e67_1 +- krb5=1.21.2=h659d440_0 +- lame=3.100=h166bdaf_1003 +- lcms2=2.16=hb7c19ff_0 +- ld_impl_linux-64=2.40=h55db66e_0 +- lerc=4.0.0=h27087fc_0 +- libabseil=20240116.2=cxx17_h59595ed_0 +- libaec=1.1.3=h59595ed_0 +- libarchive=3.7.2=h2aa1ff5_1 +- libarrow=15.0.2=hefa796f_6_cpu +- libarrow-acero=15.0.2=hbabe93e_6_cpu +- libarrow-dataset=15.0.2=hbabe93e_6_cpu +- libarrow-flight=15.0.2=hc4f8a93_6_cpu +- libarrow-flight-sql=15.0.2=he4f5ca8_6_cpu +- libarrow-gandiva=15.0.2=hc1954e9_6_cpu +- libarrow-substrait=15.0.2=he4f5ca8_6_cpu +- libasprintf=0.22.5=h661eb56_2 +- libasprintf-devel=0.22.5=h661eb56_2 +- libblas=3.9.0=22_linux64_openblas +- libboost-headers=1.84.0=ha770c72_2 +- libbrotlicommon=1.1.0=hd590300_1 +- libbrotlidec=1.1.0=hd590300_1 +- libbrotlienc=1.1.0=hd590300_1 +- libcap=2.69=h0f662aa_0 +- libcblas=3.9.0=22_linux64_openblas +- libclang-cpp15=15.0.7=default_h127d8a8_5 +- libclang13=18.1.4=default_h5d6823c_0 +- libcrc32c=1.1.2=h9c3ff4c_0 +- libcups=2.3.3=h4637d8d_4 +- libcurl=8.7.1=hca28451_0 +- libdeflate=1.20=hd590300_0 +- libedit=3.1.20191231=he28a2e2_2 +- libev=4.33=hd590300_2 +- libevent=2.1.12=hf998b51_1 +- libexpat=2.6.2=h59595ed_0 +- libffi=3.4.2=h7f98852_5 +- libflac=1.4.3=h59595ed_0 +- libgcc-ng=13.2.0=h77fa898_6 +- libgcrypt=1.10.3=hd590300_0 +- libgd=2.3.3=h119a65a_9 +- libgdal=3.8.5=hf9625ee_2 +- libgettextpo=0.22.5=h59595ed_2 +- libgettextpo-devel=0.22.5=h59595ed_2 +- libgfortran-ng=13.2.0=h69a702a_6 +- libgfortran5=13.2.0=h43f5ff8_6 +- libglib=2.80.0=hf2295e7_6 +- libgomp=13.2.0=h77fa898_6 +- libgoogle-cloud=2.23.0=h9be4e54_1 +- libgoogle-cloud-storage=2.23.0=hc7a4891_1 +- libgpg-error=1.49=h4f305b6_0 +- libgrpc=1.62.2=h15f2491_0 +- libhwloc=2.9.3=default_h554bfaf_1009 +- libiconv=1.17=hd590300_2 +- libjpeg-turbo=3.0.0=hd590300_1 +- libkml=1.3.0=h01aab08_1018 +- liblapack=3.9.0=22_linux64_openblas +- liblapacke=3.9.0=22_linux64_openblas +- libllvm15=15.0.7=hb3ce162_4 +- libllvm16=16.0.6=hb3ce162_3 +- libllvm18=18.1.4=h2448989_0 +- libnetcdf=4.9.2=nompi_h9612171_113 +- libnghttp2=1.58.0=h47da74e_1 +- libnl=3.9.0=hd590300_0 +- libnsl=2.0.1=hd590300_0 +- libogg=1.3.4=h7f98852_1 +- libopenblas=0.3.27=pthreads_h413a1c8_0 +- libopus=1.3.1=h7f98852_1 +- libparquet=15.0.2=hacf5a1f_6_cpu +- libpng=1.6.43=h2797004_0 +- libpq=16.2=h33b98f1_1 +- libprotobuf=4.25.3=h08a7969_0 +- libre2-11=2023.09.01=h5a48ba9_2 +- librsvg=2.58.0=hadf69e7_1 +- librttopo=1.1.0=h8917695_15 +- libscotch=7.0.4=h91e35bf_1 +- libsndfile=1.2.2=hc60ed4a_1 +- libspatialindex=1.9.3=h9c3ff4c_4 +- libspatialite=5.1.0=h6f065fc_5 +- libspral=2024.01.18=h6aa6db2_0 +- libsqlite=3.45.3=h2797004_0 +- libssh2=1.11.0=h0841786_0 +- libstdcxx-ng=13.2.0=hc0a3c3a_6 +- libsystemd0=255=h3516f8a_1 +- libthrift=0.19.0=hb90f79a_1 +- libtiff=4.6.0=h1dd3fc0_3 +- libutf8proc=2.8.0=h166bdaf_0 +- libuuid=2.38.1=h0b41bf4_0 +- libvorbis=1.3.7=h9c3ff4c_0 +- libwebp=1.4.0=h2c329e2_0 +- libwebp-base=1.4.0=hd590300_0 +- libxcb=1.15=h0b41bf4_0 +- libxcrypt=4.4.36=hd590300_1 +- libxkbcommon=1.7.0=h662e7e4_0 +- libxml2=2.12.6=h232c23b_2 +- libxslt=1.1.39=h76b75d6_0 +- libzip=1.10.1=h2629f0a_3 +- libzlib=1.2.13=hd590300_5 +- linopy=0.3.8=pyhd8ed1ab_0 +- locket=1.0.0=pyhd8ed1ab_0 +- lxml=5.2.1=py311hc0a218f_0 +- lz4=4.3.3=py311h38e4bf4_0 +- lz4-c=1.9.4=hcb278e6_0 +- lzo=2.10=hd590300_1001 +- mapclassify=2.6.1=pyhd8ed1ab_0 +- markupsafe=2.1.5=py311h459d7ec_0 +- matplotlib=3.8.4=py311h38be061_0 +- matplotlib-base=3.8.4=py311h54ef318_0 +- matplotlib-inline=0.1.7=pyhd8ed1ab_0 +- memory_profiler=0.61.0=pyhd8ed1ab_0 +- metis=5.1.0=h59595ed_1007 +- minizip=4.0.5=h0ab5242_0 +- mpfr=4.2.1=h9458935_1 +- mpg123=1.32.6=h59595ed_0 +- msgpack-python=1.0.7=py311h9547e67_0 +- multiurl=0.3.1=pyhd8ed1ab_0 +- mumps-include=5.6.2=ha770c72_4 +- mumps-seq=5.6.2=hfef103a_4 +- munkres=1.1.4=pyh9f0ad1d_0 +- mysql-common=8.3.0=hf1915f5_4 +- mysql-libs=8.3.0=hca2cd23_4 +- nbformat=5.10.4=pyhd8ed1ab_0 +- ncurses=6.4.20240210=h59595ed_0 +- netcdf4=1.6.5=nompi_py311he8ad708_100 +- networkx=3.3=pyhd8ed1ab_1 +- nodeenv=1.8.0=pyhd8ed1ab_0 +- nomkl=1.0=h5ca1d4c_0 +- nspr=4.35=h27087fc_0 +- nss=3.98=h1d7d5a4_0 +- numexpr=2.9.0=py311h039bad6_100 +- numpy=1.26.4=py311h64a7726_0 +- openjdk=22.0.1=hb622114_0 +- openjpeg=2.5.2=h488ebb8_0 +- openpyxl=3.1.2=py311h459d7ec_1 +- openssl=3.3.0=hd590300_0 +- orc=2.0.0=h17fec99_1 +- packaging=24.0=pyhd8ed1ab_0 +- pandas=2.2.2=py311h320fe9a_0 +- pango=1.52.2=ha41ecd1_0 +- parso=0.8.4=pyhd8ed1ab_0 +- partd=1.4.1=pyhd8ed1ab_0 +- patsy=0.5.6=pyhd8ed1ab_0 +- pcre2=10.43=hcad00b1_0 +- pexpect=4.9.0=pyhd8ed1ab_0 +- pickleshare=0.7.5=py_1003 +- pillow=10.3.0=py311h18e6fac_0 +- pip=24.0=pyhd8ed1ab_0 +- pixman=0.43.2=h59595ed_0 +- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1 +- plac=1.4.3=pyhd8ed1ab_0 +- platformdirs=4.2.1=pyhd8ed1ab_0 +- pluggy=1.5.0=pyhd8ed1ab_0 +- ply=3.11=pyhd8ed1ab_2 +- poppler=24.04.0=hb6cd0d7_0 +- poppler-data=0.4.12=hd8ed1ab_0 +- postgresql=16.2=h82ecc9d_1 +- powerplantmatching=0.5.14=pyhd8ed1ab_0 +- pre-commit=3.7.0=pyha770c72_0 +- progressbar2=4.4.2=pyhd8ed1ab_0 +- proj=9.4.0=h1d62c97_1 +- prompt-toolkit=3.0.42=pyha770c72_0 +- psutil=5.9.8=py311h459d7ec_0 +- pthread-stubs=0.4=h36c2ea0_1001 +- ptyprocess=0.7.0=pyhd3deb0d_0 +- pulp=2.8.0=py311h38be061_0 +- pulseaudio-client=17.0=hb77b528_0 +- pure_eval=0.2.2=pyhd8ed1ab_0 +- py-cpuinfo=9.0.0=pyhd8ed1ab_0 +- pyarrow=15.0.2=py311hd5e4297_6_cpu +- pyarrow-hotfix=0.6=pyhd8ed1ab_0 +- pycountry=22.3.5=pyhd8ed1ab_0 +- pycparser=2.22=pyhd8ed1ab_0 +- pygments=2.17.2=pyhd8ed1ab_0 +- pyomo=6.6.1=py311hb755f60_0 +- pyparsing=3.1.2=pyhd8ed1ab_0 +- pyproj=3.6.1=py311hb3a3e68_6 +- pypsa=0.27.1=pyhd8ed1ab_0 +- pyqt=5.15.9=py311hf0fb5b6_5 +- pyqt5-sip=12.12.2=py311hb755f60_5 +- pyscipopt=5.0.1=py311hb755f60_0 +- pyshp=2.3.1=pyhd8ed1ab_0 +- pysocks=1.7.1=pyha2e5f31_6 +- pytables=3.9.2=py311h3e8b7c9_2 +- pytest=8.2.0=pyhd8ed1ab_0 +- python=3.11.9=hb806964_0_cpython +- python-dateutil=2.9.0=pyhd8ed1ab_0 +- python-fastjsonschema=2.19.1=pyhd8ed1ab_0 +- python-tzdata=2024.1=pyhd8ed1ab_0 +- python-utils=3.8.2=pyhd8ed1ab_0 +- python_abi=3.11=4_cp311 +- pytz=2024.1=pyhd8ed1ab_0 +- pyxlsb=1.0.10=pyhd8ed1ab_0 +- pyyaml=6.0.1=py311h459d7ec_1 +- qt-main=5.15.8=hc9dc06e_21 +- rasterio=1.3.10=py311h375a7ea_0 +- rdma-core=51.0=hd3aeb46_0 +- re2=2023.09.01=h7f4b329_2 +- readline=8.2=h8228510_1 +- referencing=0.35.1=pyhd8ed1ab_0 +- requests=2.31.0=pyhd8ed1ab_0 +- reretry=0.11.8=pyhd8ed1ab_0 +- rioxarray=0.15.5=pyhd8ed1ab_0 +- rpds-py=0.18.0=py311h46250e7_0 +- rtree=1.2.0=py311h3bb2b0f_0 +- s2n=1.4.12=h06160fa_0 +- scikit-learn=1.4.2=py311hc009520_0 +- scip=9.0.0=hded5f35_4 +- scipy=1.13.0=py311h64a7726_0 +- scotch=7.0.4=h23d43cc_1 +- seaborn=0.13.2=hd8ed1ab_2 +- seaborn-base=0.13.2=pyhd8ed1ab_2 +- setuptools=69.5.1=pyhd8ed1ab_0 +- setuptools-scm=8.0.4=pyhd8ed1ab_1 +- setuptools_scm=8.0.4=hd8ed1ab_1 +- shapely=2.0.4=py311h2032efe_0 +- sip=6.7.12=py311hb755f60_0 +- six=1.16.0=pyh6c4a22f_0 +- smart_open=7.0.4=pyhd8ed1ab_0 +- smmap=5.0.0=pyhd8ed1ab_0 +- snakemake-interface-common=1.17.2=pyhdfd78af_0 +- snakemake-interface-executor-plugins=9.1.1=pyhdfd78af_0 +- snakemake-interface-report-plugins=1.0.0=pyhdfd78af_0 +- snakemake-interface-storage-plugins=3.2.2=pyhdfd78af_0 +- snakemake-minimal=8.11.1=pyhdfd78af_0 +- snappy=1.2.0=hdb0a2a9_1 +- snuggs=1.4.7=py_0 +- sortedcontainers=2.4.0=pyhd8ed1ab_0 +- soupsieve=2.5=pyhd8ed1ab_1 +- spdlog=1.13.0=hd2e6256_0 +- sqlite=3.45.3=h2c6b66d_0 +- stack_data=0.6.2=pyhd8ed1ab_0 +- statsmodels=0.14.1=py311h1f0f07a_0 +- stopit=1.1.2=py_0 +- tabula-py=2.7.0=py311h38be061_1 +- tabulate=0.9.0=pyhd8ed1ab_1 +- tbb=2021.11.0=h00ab1b0_1 +- tblib=3.0.0=pyhd8ed1ab_0 +- threadpoolctl=3.5.0=pyhc1e730c_0 +- throttler=1.2.2=pyhd8ed1ab_0 +- tiledb=2.22.0=h27f064a_3 +- tk=8.6.13=noxft_h4845f30_101 +- toml=0.10.2=pyhd8ed1ab_0 +- tomli=2.0.1=pyhd8ed1ab_0 +- toolz=0.12.1=pyhd8ed1ab_0 +- toposort=1.10=pyhd8ed1ab_0 +- tornado=6.4=py311h459d7ec_0 +- tqdm=4.66.2=pyhd8ed1ab_0 +- traitlets=5.14.3=pyhd8ed1ab_0 +- typing-extensions=4.11.0=hd8ed1ab_0 +- typing_extensions=4.11.0=pyha770c72_0 +- tzcode=2024a=h3f72095_0 +- tzdata=2024a=h0c530f3_0 +- ucx=1.15.0=ha691c75_8 +- ukkonen=1.0.1=py311h9547e67_4 +- unidecode=1.3.8=pyhd8ed1ab_0 +- unixodbc=2.3.12=h661eb56_0 +- uriparser=0.9.7=h59595ed_1 +- urllib3=2.2.1=pyhd8ed1ab_0 +- validators=0.28.1=pyhd8ed1ab_0 +- virtualenv=20.26.1=pyhd8ed1ab_0 +- wcwidth=0.2.13=pyhd8ed1ab_0 +- wheel=0.43.0=pyhd8ed1ab_1 +- wrapt=1.16.0=py311h459d7ec_0 +- xarray=2024.3.0=pyhd8ed1ab_0 +- xcb-util=0.4.0=hd590300_1 +- xcb-util-image=0.4.0=h8ee46fc_1 +- xcb-util-keysyms=0.4.0=h8ee46fc_1 +- xcb-util-renderutil=0.3.9=hd590300_1 +- xcb-util-wm=0.4.1=h8ee46fc_1 +- xerces-c=3.2.5=hac6953d_0 +- xkeyboard-config=2.41=hd590300_0 +- xlrd=2.0.1=pyhd8ed1ab_3 +- xorg-fixesproto=5.0=h7f98852_1002 +- xorg-inputproto=2.3.2=h7f98852_1002 +- xorg-kbproto=1.0.7=h7f98852_1002 +- xorg-libice=1.1.1=hd590300_0 +- xorg-libsm=1.2.4=h7391055_0 +- xorg-libx11=1.8.9=h8ee46fc_0 +- xorg-libxau=1.0.11=hd590300_0 +- xorg-libxdmcp=1.1.3=h7f98852_0 +- xorg-libxext=1.3.4=h0b41bf4_2 +- xorg-libxfixes=5.0.3=h7f98852_1004 +- xorg-libxi=1.7.10=h7f98852_0 +- xorg-libxrender=0.9.11=hd590300_0 +- xorg-libxt=1.3.0=hd590300_1 +- xorg-libxtst=1.2.3=h7f98852_1002 +- xorg-recordproto=1.14.2=h7f98852_1002 +- xorg-renderproto=0.11.1=h7f98852_1002 +- xorg-xextproto=7.3.0=h0b41bf4_1003 +- xorg-xf86vidmodeproto=2.3.1=h7f98852_1002 +- xorg-xproto=7.0.31=h7f98852_1007 +- xyzservices=2024.4.0=pyhd8ed1ab_0 +- xz=5.2.6=h166bdaf_0 +- yaml=0.2.5=h7f98852_2 +- yte=1.5.4=pyha770c72_0 +- zict=3.0.0=pyhd8ed1ab_0 +- zipp=3.17.0=pyhd8ed1ab_0 +- zlib=1.2.13=hd590300_5 +- zlib-ng=2.0.7=h0b41bf4_0 +- zstd=1.5.5=hfc55251_0 - pip: - highspy==1.5.3 + - oauthlib==3.2.2 + - requests-oauthlib==1.3.1 + - snakemake-executor-plugin-cluster-generic==1.0.9 + - snakemake-executor-plugin-slurm==0.4.5 + - snakemake-executor-plugin-slurm-jobstep==0.2.1 + - snakemake-storage-plugin-http==0.2.3 - tsam==2.3.1 diff --git a/envs/environment.yaml b/envs/environment.yaml index 535acbdb..cbb1a364 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -20,13 +20,12 @@ dependencies: - openpyxl!=3.1.1 - pycountry - seaborn - # snakemake 8 introduced a number of breaking changes which the workflow has yet to be made compatible with -- snakemake-minimal>=7.7.0,<8.0.0 +- snakemake-minimal>=8.11 - memory_profiler - yaml - pytables - lxml -- powerplantmatching>=0.5.5 +- powerplantmatching>=0.5.13 - numpy - pandas>=2.1 - geopandas>=0.11.0 @@ -35,8 +34,9 @@ dependencies: - netcdf4 - networkx - scipy +- glpk - shapely>=2.0 -- pyomo +- pyscipopt - matplotlib - proj - fiona @@ -47,7 +47,7 @@ dependencies: - tabula-py - pyxlsb - graphviz -- ipopt +- pre-commit # Keep in conda environment when calling ipython - ipython @@ -60,3 +60,7 @@ dependencies: - pip: - tsam>=2.3.1 + - snakemake-storage-plugin-http + - snakemake-executor-plugin-slurm + - snakemake-executor-plugin-cluster-generic + - highspy diff --git a/envs/retrieve.yaml b/envs/retrieve.yaml new file mode 100644 index 00000000..a3c4828c --- /dev/null +++ b/envs/retrieve.yaml @@ -0,0 +1,18 @@ +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +name: pypsa-eur-retrieve +channels: +- conda-forge +- bioconda +dependencies: +- python>=3.8 +- pip +- snakemake-minimal>=8.5 +- pandas>=2.1 +- tqdm +- pip: + - snakemake-storage-plugin-http + - snakemake-executor-plugin-slurm + - snakemake-executor-plugin-cluster-generic diff --git a/graphics/workflow.png b/graphics/workflow.png deleted file mode 100644 index a43f240d..00000000 Binary files a/graphics/workflow.png and /dev/null differ diff --git a/matplotlibrc b/matplotlibrc index f00ed5cd..bf667fb1 100644 --- a/matplotlibrc +++ b/matplotlibrc @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 font.family: sans-serif diff --git a/rules/build_electricity.smk b/rules/build_electricity.smk index dcc3b96f..ac08b6b8 100644 --- a/rules/build_electricity.smk +++ b/rules/build_electricity.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -8,7 +8,7 @@ if config["enable"].get("prepare_links_p_nom", False): output: "data/links_p_nom.csv", log: - LOGS + "prepare_links_p_nom.log", + logs("prepare_links_p_nom.log"), threads: 1 resources: mem_mb=1500, @@ -20,15 +20,21 @@ if config["enable"].get("prepare_links_p_nom", False): rule build_electricity_demand: params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - countries=config["countries"], - load=config["load"], + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + countries=config_provider("countries"), + load=config_provider("load"), input: - ancient(RESOURCES + "load_raw.csv"), + reported=ancient("data/electricity_demand_raw.csv"), + synthetic=lambda w: ( + ancient("data/load_synthetic_raw.csv") + if config_provider("load", "supplement_synthetic")(w) + else [] + ), output: - RESOURCES + "load.csv", + resources("electricity_demand.csv"), log: - LOGS + "build_electricity_demand.log", + logs("build_electricity_demand.log"), resources: mem_mb=5000, conda: @@ -39,17 +45,17 @@ rule build_electricity_demand: rule build_powerplants: params: - powerplants_filter=config["electricity"]["powerplants_filter"], - custom_powerplants=config["electricity"]["custom_powerplants"], - everywhere_powerplants=config["electricity"]["everywhere_powerplants"], - countries=config["countries"], + powerplants_filter=config_provider("electricity", "powerplants_filter"), + custom_powerplants=config_provider("electricity", "custom_powerplants"), + everywhere_powerplants=config_provider("electricity", "everywhere_powerplants"), + countries=config_provider("countries"), input: - base_network=RESOURCES + "networks/base.nc", + base_network=resources("networks/base.nc"), custom_powerplants="data/custom_powerplants.csv", output: - RESOURCES + "powerplants.csv", + resources("powerplants.csv"), log: - LOGS + "build_powerplants.log", + logs("build_powerplants.log"), threads: 1 resources: mem_mb=5000, @@ -61,11 +67,12 @@ rule build_powerplants: rule base_network: params: - countries=config["countries"], - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - lines=config["lines"], - links=config["links"], - transformers=config["transformers"], + countries=config_provider("countries"), + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + lines=config_provider("lines"), + links=config_provider("links"), + transformers=config_provider("transformers"), input: eg_buses="data/entsoegridkit/buses.csv", eg_lines="data/entsoegridkit/lines.csv", @@ -75,15 +82,17 @@ rule base_network: parameter_corrections="data/parameter_corrections.yaml", links_p_nom="data/links_p_nom.csv", links_tyndp="data/links_tyndp.csv", - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", - europe_shape=RESOURCES + "europe_shape.geojson", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), + europe_shape=resources("europe_shape.geojson"), output: - RESOURCES + "networks/base.nc", + base_network=resources("networks/base.nc"), + regions_onshore=resources("regions_onshore.geojson"), + regions_offshore=resources("regions_offshore.geojson"), log: - LOGS + "base_network.log", + logs("base_network.log"), benchmark: - BENCHMARKS + "base_network" + benchmarks("base_network") threads: 1 resources: mem_mb=1500, @@ -95,7 +104,7 @@ rule base_network: rule build_shapes: params: - countries=config["countries"], + countries=config_provider("countries"), input: naturalearth=ancient("data/bundle/naturalearth/ne_10m_admin_0_countries.shp"), eez=ancient("data/bundle/eez/World_EEZ_v8_2014.shp"), @@ -105,12 +114,12 @@ rule build_shapes: ch_cantons=ancient("data/bundle/ch_cantons.csv"), ch_popgdp=ancient("data/bundle/je-e-21.03.02.xls"), output: - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", - europe_shape=RESOURCES + "europe_shape.geojson", - nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), + europe_shape=resources("europe_shape.geojson"), + nuts3_shapes=resources("nuts3_shapes.geojson"), log: - LOGS + "build_shapes.log", + logs("build_shapes.log"), threads: 1 resources: mem_mb=1500, @@ -120,45 +129,24 @@ rule build_shapes: "../scripts/build_shapes.py" -rule build_bus_regions: - params: - countries=config["countries"], - input: - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", - base_network=RESOURCES + "networks/base.nc", - output: - regions_onshore=RESOURCES + "regions_onshore.geojson", - regions_offshore=RESOURCES + "regions_offshore.geojson", - log: - LOGS + "build_bus_regions.log", - threads: 1 - resources: - mem_mb=1000, - conda: - "../envs/environment.yaml" - script: - "../scripts/build_bus_regions.py" - - if config["enable"].get("build_cutout", False): rule build_cutout: params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - cutouts=config["atlite"]["cutouts"], + snapshots=config_provider("snapshots"), + cutouts=config_provider("atlite", "cutouts"), input: - regions_onshore=RESOURCES + "regions_onshore.geojson", - regions_offshore=RESOURCES + "regions_offshore.geojson", + regions_onshore=resources("regions_onshore.geojson"), + regions_offshore=resources("regions_offshore.geojson"), output: protected("cutouts/" + CDIR + "{cutout}.nc"), log: - "logs/" + CDIR + "build_cutout/{cutout}.log", + logs(CDIR + "build_cutout/{cutout}.log"), benchmark: "benchmarks/" + CDIR + "build_cutout_{cutout}" - threads: ATLITE_NPROCESSES + threads: config["atlite"].get("nprocesses", 4) resources: - mem_mb=ATLITE_NPROCESSES * 1000, + mem_mb=config["atlite"].get("nprocesses", 4) * 1000, conda: "../envs/environment.yaml" script: @@ -170,13 +158,16 @@ if config["enable"].get("build_natura_raster", False): rule build_natura_raster: input: natura=ancient("data/bundle/natura/Natura2000_end2015.shp"), - cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - RESOURCES + "natura.tiff", + resources("natura.tiff"), resources: mem_mb=5000, log: - LOGS + "build_natura_raster.log", + logs("build_natura_raster.log"), conda: "../envs/environment.yaml" script: @@ -186,21 +177,18 @@ if config["enable"].get("build_natura_raster", False): rule build_ship_raster: input: ship_density="data/shipdensity_global.zip", - cutouts=expand( - "cutouts/" + CDIR + "{cutout}.nc", - cutout=[ - config["renewable"][k]["cutout"] - for k in config["electricity"]["renewable_carriers"] - ], - ), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - RESOURCES + "shipdensity_raster.tif", + resources("shipdensity_raster.tif"), log: - LOGS + "build_ship_raster.log", + logs("build_ship_raster.log"), resources: mem_mb=5000, benchmark: - BENCHMARKS + "build_ship_raster" + benchmarks("build_ship_raster") conda: "../envs/environment.yaml" script: @@ -214,33 +202,33 @@ rule determine_availability_matrix_MD_UA: wdpa_marine="data/WDPA_WDOECM_marine.gpkg", gebco=lambda w: ( "data/bundle/GEBCO_2014_2D.nc" - if "max_depth" in config["renewable"][w.technology].keys() + if config_provider("renewable", w.technology)(w).get("max_depth") else [] ), ship_density=lambda w: ( - RESOURCES + "shipdensity_raster.tif" - if "ship_threshold" in config["renewable"][w.technology].keys() + resources("shipdensity_raster.tif") + if "ship_threshold" in config_provider("renewable", w.technology)(w).keys() else [] ), - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), regions=lambda w: ( - RESOURCES + "regions_onshore.geojson" + resources("regions_onshore.geojson") if w.technology in ("onwind", "solar") - else RESOURCES + "regions_offshore.geojson" + else resources("regions_offshore.geojson") ), cutout=lambda w: "cutouts/" + CDIR - + config["renewable"][w.technology]["cutout"] + + config_provider("renewable", w.technology, "cutout")(w) + ".nc", output: - availability_matrix=RESOURCES + "availability_matrix_MD-UA_{technology}.nc", - availability_map=RESOURCES + "availability_matrix_MD-UA_{technology}.png", + availability_matrix=resources("availability_matrix_MD-UA_{technology}.nc"), + availability_map=resources("availability_matrix_MD-UA_{technology}.png"), log: - LOGS + "determine_availability_matrix_MD_UA_{technology}.log", - threads: ATLITE_NPROCESSES + logs("determine_availability_matrix_MD_UA_{technology}.log"), + threads: config["atlite"].get("nprocesses", 4) resources: - mem_mb=ATLITE_NPROCESSES * 5000, + mem_mb=config["atlite"].get("nprocesses", 4) * 5000, conda: "../envs/environment.yaml" script: @@ -248,68 +236,71 @@ rule determine_availability_matrix_MD_UA: # Optional input when having Ukraine (UA) or Moldova (MD) in the countries list -if {"UA", "MD"}.intersection(set(config["countries"])): - opt = { - "availability_matrix_MD_UA": RESOURCES - + "availability_matrix_MD-UA_{technology}.nc" - } -else: - opt = {} +def input_ua_md_availability_matrix(w): + countries = set(config_provider("countries")(w)) + if {"UA", "MD"}.intersection(countries): + return { + "availability_matrix_MD_UA": resources( + "availability_matrix_MD-UA_{technology}.nc" + ) + } + return {} rule build_renewable_profiles: params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - renewable=config["renewable"], + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + renewable=config_provider("renewable"), input: - **opt, - base_network=RESOURCES + "networks/base.nc", + unpack(input_ua_md_availability_matrix), + base_network=resources("networks/base.nc"), corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"), natura=lambda w: ( - RESOURCES + "natura.tiff" - if config["renewable"][w.technology]["natura"] + resources("natura.tiff") + if config_provider("renewable", w.technology, "natura")(w) else [] ), luisa=lambda w: ( "data/LUISA_basemap_020321_50m.tif" - if config["renewable"][w.technology].get("luisa") + if config_provider("renewable", w.technology, "luisa")(w) else [] ), gebco=ancient( lambda w: ( "data/bundle/GEBCO_2014_2D.nc" if ( - config["renewable"][w.technology].get("max_depth") - or config["renewable"][w.technology].get("min_depth") + config_provider("renewable", w.technology)(w).get("max_depth") + or config_provider("renewable", w.technology)(w).get("min_depth") ) else [] ) ), ship_density=lambda w: ( - RESOURCES + "shipdensity_raster.tif" - if config["renewable"][w.technology].get("ship_threshold", False) + resources("shipdensity_raster.tif") + if "ship_threshold" in config_provider("renewable", w.technology)(w).keys() else [] ), - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), regions=lambda w: ( - RESOURCES + "regions_onshore.geojson" + resources("regions_onshore.geojson") if w.technology in ("onwind", "solar") - else RESOURCES + "regions_offshore.geojson" + else resources("regions_offshore.geojson") ), cutout=lambda w: "cutouts/" + CDIR - + config["renewable"][w.technology]["cutout"] + + config_provider("renewable", w.technology, "cutout")(w) + ".nc", output: - profile=RESOURCES + "profile_{technology}.nc", + profile=resources("profile_{technology}.nc"), log: - LOGS + "build_renewable_profile_{technology}.log", + logs("build_renewable_profile_{technology}.log"), benchmark: - BENCHMARKS + "build_renewable_profiles_{technology}" - threads: ATLITE_NPROCESSES + benchmarks("build_renewable_profiles_{technology}") + threads: config["atlite"].get("nprocesses", 4) resources: - mem_mb=ATLITE_NPROCESSES * 5000, + mem_mb=config["atlite"].get("nprocesses", 4) * 5000, wildcard_constraints: technology="(?!hydro).*", # Any technology other than hydro conda: @@ -323,10 +314,10 @@ rule build_monthly_prices: co2_price_raw="data/validation/emission-spot-primary-market-auction-report-2019-data.xls", fuel_price_raw="data/validation/energy-price-trends-xlsx-5619002.xlsx", output: - co2_price=RESOURCES + "co2_price.csv", - fuel_price=RESOURCES + "monthly_fuel_price.csv", + co2_price=resources("co2_price.csv"), + fuel_price=resources("monthly_fuel_price.csv"), log: - LOGS + "build_monthly_prices.log", + logs("build_monthly_prices.log"), threads: 1 resources: mem_mb=5000, @@ -338,16 +329,23 @@ rule build_monthly_prices: rule build_hydro_profile: params: - hydro=config["renewable"]["hydro"], - countries=config["countries"], + hydro=config_provider("renewable", "hydro"), + countries=config_provider("countries"), + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), input: - country_shapes=RESOURCES + "country_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), eia_hydro_generation="data/eia_hydro_annual_generation.csv", - cutout=f"cutouts/" + CDIR + config["renewable"]["hydro"]["cutout"] + ".nc", + eia_hydro_capacity="data/eia_hydro_annual_capacity.csv", + era5_runoff="data/era5-annual-runoff-per-country.csv", + cutout=lambda w: f"cutouts/" + + CDIR + + config_provider("renewable", "hydro", "cutout")(w) + + ".nc", output: - RESOURCES + "profile_hydro.nc", + profile=resources("profile_hydro.nc"), log: - LOGS + "build_hydro_profile.log", + logs("build_hydro_profile.log"), resources: mem_mb=5000, conda: @@ -356,75 +354,90 @@ rule build_hydro_profile: "../scripts/build_hydro_profile.py" -if config["lines"]["dynamic_line_rating"]["activate"]: +rule build_line_rating: + params: + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + input: + base_network=resources("networks/base.nc"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("lines", "dynamic_line_rating", "cutout")(w) + + ".nc", + output: + output=resources("networks/line_rating.nc"), + log: + logs("build_line_rating.log"), + benchmark: + benchmarks("build_line_rating") + threads: config["atlite"].get("nprocesses", 4) + resources: + mem_mb=config["atlite"].get("nprocesses", 4) * 1000, + conda: + "../envs/environment.yaml" + script: + "../scripts/build_line_rating.py" - rule build_line_rating: - params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - input: - base_network=RESOURCES + "networks/base.nc", - cutout="cutouts/" - + CDIR - + config["lines"]["dynamic_line_rating"]["cutout"] - + ".nc", - output: - output=RESOURCES + "networks/line_rating.nc", - log: - LOGS + "build_line_rating.log", - benchmark: - BENCHMARKS + "build_line_rating" - threads: ATLITE_NPROCESSES - resources: - mem_mb=ATLITE_NPROCESSES * 1000, - conda: - "../envs/environment.yaml" - script: - "../scripts/build_line_rating.py" + +def input_profile_tech(w): + return { + f"profile_{tech}": resources(f"profile_{tech}.nc") + for tech in config_provider("electricity", "renewable_carriers")(w) + } + + +def input_conventional(w): + return { + f"conventional_{carrier}_{attr}": fn + for carrier, d in config_provider("conventional", default={None: {}})(w).items() + if carrier in config_provider("electricity", "conventional_carriers")(w) + for attr, fn in d.items() + if str(fn).startswith("data/") + } rule add_electricity: params: - length_factor=config["lines"]["length_factor"], - scaling_factor=config["load"]["scaling_factor"], - countries=config["countries"], - renewable=config["renewable"], - electricity=config["electricity"], - conventional=config["conventional"], - costs=config["costs"], + length_factor=config_provider("lines", "length_factor"), + scaling_factor=config_provider("load", "scaling_factor"), + countries=config_provider("countries"), + snapshots=config_provider("snapshots"), + renewable=config_provider("renewable"), + electricity=config_provider("electricity"), + conventional=config_provider("conventional"), + costs=config_provider("costs"), + drop_leap_day=config_provider("enable", "drop_leap_day"), input: - **{ - f"profile_{tech}": RESOURCES + f"profile_{tech}.nc" - for tech in config["electricity"]["renewable_carriers"] - }, - **{ - f"conventional_{carrier}_{attr}": fn - for carrier, d in config.get("conventional", {None: {}}).items() - if carrier in config["electricity"]["conventional_carriers"] - for attr, fn in d.items() - if str(fn).startswith("data/") - }, - base_network=RESOURCES + "networks/base.nc", - line_rating=RESOURCES + "networks/line_rating.nc" - if config["lines"]["dynamic_line_rating"]["activate"] - else RESOURCES + "networks/base.nc", - tech_costs=COSTS, - regions=RESOURCES + "regions_onshore.geojson", - powerplants=RESOURCES + "powerplants.csv", + unpack(input_profile_tech), + unpack(input_conventional), + base_network=resources("networks/base.nc"), + line_rating=lambda w: ( + resources("networks/line_rating.nc") + if config_provider("lines", "dynamic_line_rating", "activate")(w) + else resources("networks/base.nc") + ), + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year') (w)}.csv" + ), + regions=resources("regions_onshore.geojson"), + powerplants=resources("powerplants.csv"), hydro_capacities=ancient("data/bundle/hydro_capacities.csv"), geth_hydro_capacities="data/geth2015_hydro_capacities.csv", unit_commitment="data/unit_commitment.csv", - fuel_price=RESOURCES + "monthly_fuel_price.csv" - if config["conventional"]["dynamic_fuel_price"] - else [], - load=RESOURCES + "load.csv", - nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", + fuel_price=lambda w: ( + resources("monthly_fuel_price.csv") + if config_provider("conventional", "dynamic_fuel_price")(w) + else [] + ), + load=resources("electricity_demand.csv"), + nuts3_shapes=resources("nuts3_shapes.geojson"), ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv", output: - RESOURCES + "networks/elec.nc", + resources("networks/elec.nc"), log: - LOGS + "add_electricity.log", + logs("add_electricity.log"), benchmark: - BENCHMARKS + "add_electricity" + benchmarks("add_electricity") threads: 1 resources: mem_mb=10000, @@ -436,31 +449,32 @@ rule add_electricity: rule simplify_network: params: - simplify_network=config["clustering"]["simplify_network"], - aggregation_strategies=config["clustering"].get("aggregation_strategies", {}), - focus_weights=config["clustering"].get( - "focus_weights", config.get("focus_weights") + simplify_network=config_provider("clustering", "simplify_network"), + aggregation_strategies=config_provider( + "clustering", "aggregation_strategies", default={} ), - renewable_carriers=config["electricity"]["renewable_carriers"], - max_hours=config["electricity"]["max_hours"], - length_factor=config["lines"]["length_factor"], - p_max_pu=config["links"].get("p_max_pu", 1.0), - costs=config["costs"], + focus_weights=config_provider("clustering", "focus_weights", default=None), + renewable_carriers=config_provider("electricity", "renewable_carriers"), + max_hours=config_provider("electricity", "max_hours"), + length_factor=config_provider("lines", "length_factor"), + p_max_pu=config_provider("links", "p_max_pu", default=1.0), + costs=config_provider("costs"), input: - network=RESOURCES + "networks/elec.nc", - tech_costs=COSTS, - regions_onshore=RESOURCES + "regions_onshore.geojson", - regions_offshore=RESOURCES + "regions_offshore.geojson", + network=resources("networks/elec.nc"), + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year') (w)}.csv" + ), + regions_onshore=resources("regions_onshore.geojson"), + regions_offshore=resources("regions_offshore.geojson"), output: - network=RESOURCES + "networks/elec_s{simpl}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}.geojson", - busmap=RESOURCES + "busmap_elec_s{simpl}.csv", - connection_costs=RESOURCES + "connection_costs_s{simpl}.csv", + network=resources("networks/elec_s{simpl}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"), + busmap=resources("busmap_elec_s{simpl}.csv"), log: - LOGS + "simplify_network/elec_s{simpl}.log", + logs("simplify_network/elec_s{simpl}.log"), benchmark: - BENCHMARKS + "simplify_network/elec_s{simpl}" + benchmarks("simplify_network/elec_s{simpl}") threads: 1 resources: mem_mb=12000, @@ -472,38 +486,42 @@ rule simplify_network: rule cluster_network: params: - cluster_network=config["clustering"]["cluster_network"], - aggregation_strategies=config["clustering"].get("aggregation_strategies", {}), - custom_busmap=config["enable"].get("custom_busmap", False), - focus_weights=config["clustering"].get( - "focus_weights", config.get("focus_weights") + cluster_network=config_provider("clustering", "cluster_network"), + aggregation_strategies=config_provider( + "clustering", "aggregation_strategies", default={} ), - renewable_carriers=config["electricity"]["renewable_carriers"], - conventional_carriers=config["electricity"].get("conventional_carriers", []), - max_hours=config["electricity"]["max_hours"], - length_factor=config["lines"]["length_factor"], - costs=config["costs"], + custom_busmap=config_provider("enable", "custom_busmap", default=False), + focus_weights=config_provider("clustering", "focus_weights", default=None), + renewable_carriers=config_provider("electricity", "renewable_carriers"), + conventional_carriers=config_provider( + "electricity", "conventional_carriers", default=[] + ), + max_hours=config_provider("electricity", "max_hours"), + length_factor=config_provider("lines", "length_factor"), + costs=config_provider("costs"), input: - network=RESOURCES + "networks/elec_s{simpl}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}.geojson", - busmap=ancient(RESOURCES + "busmap_elec_s{simpl}.csv"), - custom_busmap=( + network=resources("networks/elec_s{simpl}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"), + busmap=ancient(resources("busmap_elec_s{simpl}.csv")), + custom_busmap=lambda w: ( "data/custom_busmap_elec_s{simpl}_{clusters}.csv" - if config["enable"].get("custom_busmap", False) + if config_provider("enable", "custom_busmap", default=False)(w) else [] ), - tech_costs=COSTS, + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year') (w)}.csv" + ), output: - network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson", - busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", - linemap=RESOURCES + "linemap_elec_s{simpl}_{clusters}.csv", + network=resources("networks/elec_s{simpl}_{clusters}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), + busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), + linemap=resources("linemap_elec_s{simpl}_{clusters}.csv"), log: - LOGS + "cluster_network/elec_s{simpl}_{clusters}.log", + logs("cluster_network/elec_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "cluster_network/elec_s{simpl}_{clusters}" + benchmarks("cluster_network/elec_s{simpl}_{clusters}") threads: 1 resources: mem_mb=10000, @@ -515,18 +533,20 @@ rule cluster_network: rule add_extra_components: params: - extendable_carriers=config["electricity"]["extendable_carriers"], - max_hours=config["electricity"]["max_hours"], - costs=config["costs"], + extendable_carriers=config_provider("electricity", "extendable_carriers"), + max_hours=config_provider("electricity", "max_hours"), + costs=config_provider("costs"), input: - network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", - tech_costs=COSTS, + network=resources("networks/elec_s{simpl}_{clusters}.nc"), + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year') (w)}.csv" + ), output: - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", + resources("networks/elec_s{simpl}_{clusters}_ec.nc"), log: - LOGS + "add_extra_components/elec_s{simpl}_{clusters}.log", + logs("add_extra_components/elec_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "add_extra_components/elec_s{simpl}_{clusters}_ec" + benchmarks("add_extra_components/elec_s{simpl}_{clusters}_ec") threads: 1 resources: mem_mb=4000, @@ -538,30 +558,31 @@ rule add_extra_components: rule prepare_network: params: - snapshots={ - "resolution": config["snapshots"].get("resolution", False), - "segmentation": config["snapshots"].get("segmentation", False), - }, - links=config["links"], - lines=config["lines"], - co2base=config["electricity"]["co2base"], - co2limit_enable=config["electricity"].get("co2limit_enable", False), - co2limit=config["electricity"]["co2limit"], - gaslimit_enable=config["electricity"].get("gaslimit_enable", False), - gaslimit=config["electricity"].get("gaslimit"), - max_hours=config["electricity"]["max_hours"], - costs=config["costs"], - autarky=config["electricity"].get("autarky", {}), + time_resolution=config_provider("clustering", "temporal", "resolution_elec"), + links=config_provider("links"), + lines=config_provider("lines"), + co2base=config_provider("electricity", "co2base"), + co2limit_enable=config_provider("electricity", "co2limit_enable", default=False), + co2limit=config_provider("electricity", "co2limit"), + gaslimit_enable=config_provider("electricity", "gaslimit_enable", default=False), + gaslimit=config_provider("electricity", "gaslimit"), + max_hours=config_provider("electricity", "max_hours"), + costs=config_provider("costs"), + adjustments=config_provider("adjustments", "electricity"), + autarky=config_provider("electricity", "autarky", default={}), + drop_leap_day=config_provider("enable", "drop_leap_day"), input: - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", - tech_costs=COSTS, - co2_price=lambda w: RESOURCES + "co2_price.csv" if "Ept" in w.opts else [], + resources("networks/elec_s{simpl}_{clusters}_ec.nc"), + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year') (w)}.csv" + ), + co2_price=lambda w: resources("co2_price.csv") if "Ept" in w.opts else [], output: - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), log: - LOGS + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log", + logs("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"), benchmark: - (BENCHMARKS + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}") + (benchmarks("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}")) threads: 1 resources: mem_mb=4000, diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 85d99ffa..a6ecb3df 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -1,23 +1,26 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule build_population_layouts: input: - nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", + nuts3_shapes=resources("nuts3_shapes.geojson"), urban_percent="data/urban_percent.csv", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - pop_layout_total=RESOURCES + "pop_layout_total.nc", - pop_layout_urban=RESOURCES + "pop_layout_urban.nc", - pop_layout_rural=RESOURCES + "pop_layout_rural.nc", + pop_layout_total=resources("pop_layout_total.nc"), + pop_layout_urban=resources("pop_layout_urban.nc"), + pop_layout_rural=resources("pop_layout_rural.nc"), log: - LOGS + "build_population_layouts.log", + logs("build_population_layouts.log"), resources: mem_mb=20000, benchmark: - BENCHMARKS + "build_population_layouts" + benchmarks("build_population_layouts") threads: 8 conda: "../envs/environment.yaml" @@ -27,19 +30,22 @@ rule build_population_layouts: rule build_clustered_population_layouts: input: - pop_layout_total=RESOURCES + "pop_layout_total.nc", - pop_layout_urban=RESOURCES + "pop_layout_urban.nc", - pop_layout_rural=RESOURCES + "pop_layout_rural.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout_total=resources("pop_layout_total.nc"), + pop_layout_urban=resources("pop_layout_urban.nc"), + pop_layout_rural=resources("pop_layout_rural.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), log: - LOGS + "build_clustered_population_layouts_{simpl}_{clusters}.log", + logs("build_clustered_population_layouts_{simpl}_{clusters}.log"), resources: mem_mb=10000, benchmark: - BENCHMARKS + "build_clustered_population_layouts/s{simpl}_{clusters}" + benchmarks("build_clustered_population_layouts/s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -48,19 +54,22 @@ rule build_clustered_population_layouts: rule build_simplified_population_layouts: input: - pop_layout_total=RESOURCES + "pop_layout_total.nc", - pop_layout_urban=RESOURCES + "pop_layout_urban.nc", - pop_layout_rural=RESOURCES + "pop_layout_rural.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout_total=resources("pop_layout_total.nc"), + pop_layout_urban=resources("pop_layout_urban.nc"), + pop_layout_rural=resources("pop_layout_rural.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}.csv"), resources: mem_mb=10000, log: - LOGS + "build_simplified_population_layouts_{simpl}", + logs("build_simplified_population_layouts_{simpl}"), benchmark: - BENCHMARKS + "build_simplified_population_layouts/s{simpl}" + benchmarks("build_simplified_population_layouts/s{simpl}") conda: "../envs/environment.yaml" script: @@ -71,11 +80,11 @@ rule build_gas_network: input: gas_network="data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson", output: - cleaned_gas_network=RESOURCES + "gas_network.csv", + cleaned_gas_network=resources("gas_network.csv"), resources: mem_mb=4000, log: - LOGS + "build_gas_network.log", + logs("build_gas_network.log"), conda: "../envs/environment.yaml" script: @@ -84,22 +93,23 @@ rule build_gas_network: rule build_gas_input_locations: input: - gem=HTTP.remote( + gem=storage( "https://globalenergymonitor.org/wp-content/uploads/2023/07/Europe-Gas-Tracker-2023-03-v3.xlsx", keep_local=True, ), entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson", storage="data/gas_network/scigrid-gas/data/IGGIELGN_Storages.geojson", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), output: - gas_input_nodes=RESOURCES + "gas_input_locations_s{simpl}_{clusters}.geojson", - gas_input_nodes_simplified=RESOURCES - + "gas_input_locations_s{simpl}_{clusters}_simplified.csv", + gas_input_nodes=resources("gas_input_locations_s{simpl}_{clusters}.geojson"), + gas_input_nodes_simplified=resources( + "gas_input_locations_s{simpl}_{clusters}_simplified.csv" + ), resources: mem_mb=2000, log: - LOGS + "build_gas_input_locations_s{simpl}_{clusters}.log", + logs("build_gas_input_locations_s{simpl}_{clusters}.log"), conda: "../envs/environment.yaml" script: @@ -108,60 +118,97 @@ rule build_gas_input_locations: rule cluster_gas_network: input: - cleaned_gas_network=RESOURCES + "gas_network.csv", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson", + cleaned_gas_network=resources("gas_network.csv"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), output: - clustered_gas_network=RESOURCES + "gas_network_elec_s{simpl}_{clusters}.csv", + clustered_gas_network=resources("gas_network_elec_s{simpl}_{clusters}.csv"), resources: mem_mb=4000, log: - LOGS + "cluster_gas_network_s{simpl}_{clusters}.log", + logs("cluster_gas_network_s{simpl}_{clusters}.log"), conda: "../envs/environment.yaml" script: "../scripts/cluster_gas_network.py" -rule build_heat_demands: +def heat_demand_cutout(wildcards): + c = config_provider("sector", "heat_demand_cutout")(wildcards) + if c == "default": + return ( + "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(wildcards) + + ".nc" + ) + else: + return "cutouts/" + CDIR + c + ".nc" + + +rule build_daily_heat_demand: params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), input: - pop_layout=RESOURCES + "pop_layout_{scope}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout=resources("pop_layout_{scope}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + cutout=heat_demand_cutout, output: - heat_demand=RESOURCES + "heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + heat_demand=resources("daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, threads: 8 log: - LOGS + "build_heat_demands_{scope}_{simpl}_{clusters}.loc", + logs("build_daily_heat_demand_{scope}_{simpl}_{clusters}.loc"), benchmark: - BENCHMARKS + "build_heat_demands/{scope}_s{simpl}_{clusters}" + benchmarks("build_daily_heat_demand/{scope}_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: - "../scripts/build_heat_demand.py" + "../scripts/build_daily_heat_demand.py" + + +rule build_hourly_heat_demand: + params: + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + input: + heat_profile="data/heat_load_profile_BDEW.csv", + heat_demand=resources("daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc"), + output: + heat_demand=resources("hourly_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc"), + resources: + mem_mb=2000, + threads: 8 + log: + logs("build_hourly_heat_demand_{scope}_{simpl}_{clusters}.loc"), + benchmark: + benchmarks("build_hourly_heat_demand/{scope}_s{simpl}_{clusters}") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_hourly_heat_demand.py" rule build_temperature_profiles: params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), input: - pop_layout=RESOURCES + "pop_layout_{scope}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout=resources("pop_layout_{scope}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + cutout=heat_demand_cutout, output: - temp_soil=RESOURCES + "temp_soil_{scope}_elec_s{simpl}_{clusters}.nc", - temp_air=RESOURCES + "temp_air_{scope}_elec_s{simpl}_{clusters}.nc", + temp_soil=resources("temp_soil_{scope}_elec_s{simpl}_{clusters}.nc"), + temp_air=resources("temp_air_{scope}_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, threads: 8 log: - LOGS + "build_temperature_profiles_{scope}_{simpl}_{clusters}.log", + logs("build_temperature_profiles_{scope}_{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_temperature_profiles/{scope}_s{simpl}_{clusters}" + benchmarks("build_temperature_profiles/{scope}_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -170,50 +217,64 @@ rule build_temperature_profiles: rule build_cop_profiles: params: - heat_pump_sink_T=config["sector"]["heat_pump_sink_T"], + heat_pump_sink_T=config_provider("sector", "heat_pump_sink_T"), input: - temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", - temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", - temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", - temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", - temp_air_rural=RESOURCES + "temp_air_rural_elec_s{simpl}_{clusters}.nc", - temp_air_urban=RESOURCES + "temp_air_urban_elec_s{simpl}_{clusters}.nc", + temp_soil_total=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"), + temp_soil_rural=resources("temp_soil_rural_elec_s{simpl}_{clusters}.nc"), + temp_soil_urban=resources("temp_soil_urban_elec_s{simpl}_{clusters}.nc"), + temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), + temp_air_rural=resources("temp_air_rural_elec_s{simpl}_{clusters}.nc"), + temp_air_urban=resources("temp_air_urban_elec_s{simpl}_{clusters}.nc"), output: - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_soil_rural=RESOURCES + "cop_soil_rural_elec_s{simpl}_{clusters}.nc", - cop_soil_urban=RESOURCES + "cop_soil_urban_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc", - cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc", + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_soil_rural=resources("cop_soil_rural_elec_s{simpl}_{clusters}.nc"), + cop_soil_urban=resources("cop_soil_urban_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), + cop_air_rural=resources("cop_air_rural_elec_s{simpl}_{clusters}.nc"), + cop_air_urban=resources("cop_air_urban_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, log: - LOGS + "build_cop_profiles_s{simpl}_{clusters}.log", + logs("build_cop_profiles_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_cop_profiles/s{simpl}_{clusters}" + benchmarks("build_cop_profiles/s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: "../scripts/build_cop_profiles.py" +def solar_thermal_cutout(wildcards): + c = config_provider("solar_thermal", "cutout")(wildcards) + if c == "default": + return ( + "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(wildcards) + + ".nc" + ) + else: + return "cutouts/" + CDIR + c + ".nc" + + rule build_solar_thermal_profiles: params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - solar_thermal=config["solar_thermal"], + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + solar_thermal=config_provider("solar_thermal"), input: - pop_layout=RESOURCES + "pop_layout_{scope}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout=resources("pop_layout_{scope}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + cutout=solar_thermal_cutout, output: - solar_thermal=RESOURCES + "solar_thermal_{scope}_elec_s{simpl}_{clusters}.nc", + solar_thermal=resources("solar_thermal_{scope}_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, threads: 16 log: - LOGS + "build_solar_thermal_profiles_{scope}_s{simpl}_{clusters}.log", + logs("build_solar_thermal_profiles_{scope}_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_solar_thermal_profiles/{scope}_s{simpl}_{clusters}" + benchmarks("build_solar_thermal_profiles/{scope}_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -222,147 +283,151 @@ rule build_solar_thermal_profiles: rule build_energy_totals: params: - countries=config["countries"], - energy=config["energy"], + countries=config_provider("countries"), + energy=config_provider("energy"), input: - nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", + nuts3_shapes=resources("nuts3_shapes.geojson"), co2="data/bundle-sector/eea/UNFCCC_v23.csv", - swiss="data/bundle-sector/switzerland-sfoe/switzerland-new_format.csv", + swiss="data/switzerland-new_format-all_years.csv", + swiss_transport="data/gr-e-11.03.02.01.01-cc.csv", idees="data/bundle-sector/jrc-idees-2015", district_heat_share="data/district_heat_share.csv", - eurostat=input_eurostat, + eurostat="data/eurostat/eurostat-energy_balances-april_2023_edition", output: - energy_name=RESOURCES + "energy_totals.csv", - co2_name=RESOURCES + "co2_totals.csv", - transport_name=RESOURCES + "transport_data.csv", + energy_name=resources("energy_totals.csv"), + co2_name=resources("co2_totals.csv"), + transport_name=resources("transport_data.csv"), + district_heat_share=resources("district_heat_share.csv"), threads: 16 resources: mem_mb=10000, log: - LOGS + "build_energy_totals.log", + logs("build_energy_totals.log"), benchmark: - BENCHMARKS + "build_energy_totals" + benchmarks("build_energy_totals") conda: "../envs/environment.yaml" script: "../scripts/build_energy_totals.py" +rule build_heat_totals: + input: + hdd="data/era5-annual-HDD-per-country.csv", + energy_totals=resources("energy_totals.csv"), + output: + heat_totals=resources("heat_totals.csv"), + threads: 1 + resources: + mem_mb=2000, + log: + logs("build_heat_totals.log"), + benchmark: + benchmarks("build_heat_totals") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_heat_totals.py" + + rule build_biomass_potentials: params: - biomass=config["biomass"], + biomass=config_provider("biomass"), input: - enspreso_biomass=HTTP.remote( + enspreso_biomass=storage( "https://zenodo.org/records/10356004/files/ENSPRESO_BIOMASS.xlsx", keep_local=True, ), nuts2="data/bundle-sector/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21 - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"), swiss_cantons=ancient("data/bundle/ch_cantons.csv"), swiss_population=ancient("data/bundle/je-e-21.03.02.xls"), - country_shapes=RESOURCES + "country_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), output: - biomass_potentials_all=RESOURCES - + "biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv", - biomass_potentials=RESOURCES - + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", + biomass_potentials_all=resources( + "biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + biomass_potentials=resources( + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log", + logs("build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log"), benchmark: - BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}" + benchmarks("build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}") conda: "../envs/environment.yaml" script: "../scripts/build_biomass_potentials.py" -if config["sector"]["biomass_transport"] or config["sector"]["biomass_spatial"]: - - rule build_biomass_transport_costs: - input: - transport_cost_data=HTTP.remote( - "publications.jrc.ec.europa.eu/repository/bitstream/JRC98626/biomass potentials in europe_web rev.pdf", - keep_local=True, - ), - output: - biomass_transport_costs=RESOURCES + "biomass_transport_costs.csv", - threads: 1 - resources: - mem_mb=1000, - log: - LOGS + "build_biomass_transport_costs.log", - benchmark: - BENCHMARKS + "build_biomass_transport_costs" - conda: - "../envs/environment.yaml" - script: - "../scripts/build_biomass_transport_costs.py" - - build_biomass_transport_costs_output = rules.build_biomass_transport_costs.output +rule build_biomass_transport_costs: + input: + transport_cost_data=storage( + "https://publications.jrc.ec.europa.eu/repository/bitstream/JRC98626/biomass potentials in europe_web rev.pdf", + keep_local=True, + ), + output: + biomass_transport_costs=resources("biomass_transport_costs.csv"), + threads: 1 + resources: + mem_mb=1000, + log: + logs("build_biomass_transport_costs.log"), + benchmark: + benchmarks("build_biomass_transport_costs") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_biomass_transport_costs.py" -if not (config["sector"]["biomass_transport"] or config["sector"]["biomass_spatial"]): - # this is effecively an `else` statement which is however not liked by snakefmt - build_biomass_transport_costs_output = {} - - -if config["sector"]["regional_co2_sequestration_potential"]["enable"]: - - rule build_sequestration_potentials: - params: - sequestration_potential=config["sector"][ - "regional_co2_sequestration_potential" - ], - input: - sequestration_potential=HTTP.remote( - "https://raw.githubusercontent.com/ericzhou571/Co2Storage/main/resources/complete_map_2020_unit_Mt.geojson", - keep_local=True, - ), - regions_onshore=RESOURCES - + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES - + "regions_offshore_elec_s{simpl}_{clusters}.geojson", - output: - sequestration_potential=RESOURCES - + "co2_sequestration_potential_elec_s{simpl}_{clusters}.csv", - threads: 1 - resources: - mem_mb=4000, - log: - LOGS + "build_sequestration_potentials_s{simpl}_{clusters}.log", - benchmark: - BENCHMARKS + "build_sequestration_potentials_s{simpl}_{clusters}" - conda: - "../envs/environment.yaml" - script: - "../scripts/build_sequestration_potentials.py" - - build_sequestration_potentials_output = rules.build_sequestration_potentials.output - - -if not config["sector"]["regional_co2_sequestration_potential"]["enable"]: - # this is effecively an `else` statement which is however not liked by snakefmt - build_sequestration_potentials_output = {} +rule build_sequestration_potentials: + params: + sequestration_potential=config_provider( + "sector", "regional_co2_sequestration_potential" + ), + input: + sequestration_potential=storage( + "https://raw.githubusercontent.com/ericzhou571/Co2Storage/main/resources/complete_map_2020_unit_Mt.geojson", + keep_local=True, + ), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), + output: + sequestration_potential=resources( + "co2_sequestration_potential_elec_s{simpl}_{clusters}.csv" + ), + threads: 1 + resources: + mem_mb=4000, + log: + logs("build_sequestration_potentials_s{simpl}_{clusters}.log"), + benchmark: + benchmarks("build_sequestration_potentials_s{simpl}_{clusters}") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_sequestration_potentials.py" rule build_salt_cavern_potentials: input: salt_caverns="data/bundle-sector/h2_salt_caverns_GWh_per_sqkm.geojson", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), output: - h2_cavern_potential=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv", + h2_cavern_potential=resources("salt_cavern_potentials_s{simpl}_{clusters}.csv"), threads: 1 resources: mem_mb=2000, log: - LOGS + "build_salt_cavern_potentials_s{simpl}_{clusters}.log", + logs("build_salt_cavern_potentials_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_salt_cavern_potentials_s{simpl}_{clusters}" + benchmarks("build_salt_cavern_potentials_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -370,19 +435,17 @@ rule build_salt_cavern_potentials: rule build_ammonia_production: - params: - countries=config["countries"], input: usgs="data/bundle-sector/myb1-2017-nitro.xls", output: - ammonia_production=RESOURCES + "ammonia_production.csv", + ammonia_production=resources("ammonia_production.csv"), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_ammonia_production.log", + logs("build_ammonia_production.log"), benchmark: - BENCHMARKS + "build_ammonia_production" + benchmarks("build_ammonia_production") conda: "../envs/environment.yaml" script: @@ -391,44 +454,73 @@ rule build_ammonia_production: rule build_industry_sector_ratios: params: - industry=config["industry"], - ammonia=config["sector"].get("ammonia", False), + industry=config_provider("industry"), + ammonia=config_provider("sector", "ammonia", default=False), input: - ammonia_production=RESOURCES + "ammonia_production.csv", + ammonia_production=resources("ammonia_production.csv"), idees="data/bundle-sector/jrc-idees-2015", output: - industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv", + industry_sector_ratios=resources("industry_sector_ratios.csv"), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_industry_sector_ratios.log", + logs("build_industry_sector_ratios.log"), benchmark: - BENCHMARKS + "build_industry_sector_ratios" + benchmarks("build_industry_sector_ratios") conda: "../envs/environment.yaml" script: "../scripts/build_industry_sector_ratios.py" +rule build_industry_sector_ratios_intermediate: + params: + industry=config_provider("industry"), + input: + industry_sector_ratios=resources("industry_sector_ratios.csv"), + industrial_energy_demand_per_country_today=resources( + "industrial_energy_demand_per_country_today.csv" + ), + industrial_production_per_country=resources( + "industrial_production_per_country.csv" + ), + output: + industry_sector_ratios=resources( + "industry_sector_ratios_{planning_horizons}.csv" + ), + threads: 1 + resources: + mem_mb=1000, + log: + logs("build_industry_sector_ratios_{planning_horizons}.log"), + benchmark: + benchmarks("build_industry_sector_ratios_{planning_horizons}") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_industry_sector_ratios_intermediate.py" + + rule build_industrial_production_per_country: params: - industry=config["industry"], - countries=config["countries"], + industry=config_provider("industry"), + countries=config_provider("countries"), input: - ammonia_production=RESOURCES + "ammonia_production.csv", + ammonia_production=resources("ammonia_production.csv"), jrc="data/bundle-sector/jrc-idees-2015", - eurostat="data/bundle-sector/eurostat-energy_balances-may_2018_edition", + eurostat="data/eurostat/eurostat-energy_balances-april_2023_edition", output: - industrial_production_per_country=RESOURCES - + "industrial_production_per_country.csv", + industrial_production_per_country=resources( + "industrial_production_per_country.csv" + ), threads: 8 resources: mem_mb=1000, log: - LOGS + "build_industrial_production_per_country.log", + logs("build_industrial_production_per_country.log"), benchmark: - BENCHMARKS + "build_industrial_production_per_country" + benchmarks("build_industrial_production_per_country") conda: "../envs/environment.yaml" script: @@ -437,23 +529,25 @@ rule build_industrial_production_per_country: rule build_industrial_production_per_country_tomorrow: params: - industry=config["industry"], + industry=config_provider("industry"), input: - industrial_production_per_country=RESOURCES - + "industrial_production_per_country.csv", + industrial_production_per_country=resources( + "industrial_production_per_country.csv" + ), output: - industrial_production_per_country_tomorrow=RESOURCES - + "industrial_production_per_country_tomorrow_{planning_horizons}.csv", + industrial_production_per_country_tomorrow=resources( + "industrial_production_per_country_tomorrow_{planning_horizons}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS - + "build_industrial_production_per_country_tomorrow_{planning_horizons}.log", + logs("build_industrial_production_per_country_tomorrow_{planning_horizons}.log"), benchmark: ( - BENCHMARKS - + "build_industrial_production_per_country_tomorrow_{planning_horizons}" + benchmarks( + "build_industrial_production_per_country_tomorrow_{planning_horizons}" + ) ) conda: "../envs/environment.yaml" @@ -463,22 +557,25 @@ rule build_industrial_production_per_country_tomorrow: rule build_industrial_distribution_key: params: - hotmaps_locate_missing=config["industry"].get("hotmaps_locate_missing", False), - countries=config["countries"], + hotmaps_locate_missing=config_provider( + "industry", "hotmaps_locate_missing", default=False + ), + countries=config_provider("countries"), input: - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), hotmaps_industrial_database="data/bundle-sector/Industrial_Database.csv", output: - industrial_distribution_key=RESOURCES - + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv", + industrial_distribution_key=resources( + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_industrial_distribution_key_s{simpl}_{clusters}.log", + logs("build_industrial_distribution_key_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_industrial_distribution_key/s{simpl}_{clusters}" + benchmarks("build_industrial_distribution_key/s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -487,23 +584,28 @@ rule build_industrial_distribution_key: rule build_industrial_production_per_node: input: - industrial_distribution_key=RESOURCES - + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv", - industrial_production_per_country_tomorrow=RESOURCES - + "industrial_production_per_country_tomorrow_{planning_horizons}.csv", + industrial_distribution_key=resources( + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv" + ), + industrial_production_per_country_tomorrow=resources( + "industrial_production_per_country_tomorrow_{planning_horizons}.csv" + ), output: - industrial_production_per_node=RESOURCES - + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + industrial_production_per_node=resources( + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS - + "build_industrial_production_per_node_s{simpl}_{clusters}_{planning_horizons}.log", + logs( + "build_industrial_production_per_node_s{simpl}_{clusters}_{planning_horizons}.log" + ), benchmark: ( - BENCHMARKS - + "build_industrial_production_per_node/s{simpl}_{clusters}_{planning_horizons}" + benchmarks( + "build_industrial_production_per_node/s{simpl}_{clusters}_{planning_horizons}" + ) ) conda: "../envs/environment.yaml" @@ -513,24 +615,31 @@ rule build_industrial_production_per_node: rule build_industrial_energy_demand_per_node: input: - industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv", - industrial_production_per_node=RESOURCES - + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - industrial_energy_demand_per_node_today=RESOURCES - + "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv", + industry_sector_ratios=resources( + "industry_sector_ratios_{planning_horizons}.csv" + ), + industrial_production_per_node=resources( + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + industrial_energy_demand_per_node_today=resources( + "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv" + ), output: - industrial_energy_demand_per_node=RESOURCES - + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + industrial_energy_demand_per_node=resources( + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS - + "build_industrial_energy_demand_per_node_s{simpl}_{clusters}_{planning_horizons}.log", + logs( + "build_industrial_energy_demand_per_node_s{simpl}_{clusters}_{planning_horizons}.log" + ), benchmark: ( - BENCHMARKS - + "build_industrial_energy_demand_per_node/s{simpl}_{clusters}_{planning_horizons}" + benchmarks( + "build_industrial_energy_demand_per_node/s{simpl}_{clusters}_{planning_horizons}" + ) ) conda: "../envs/environment.yaml" @@ -540,23 +649,24 @@ rule build_industrial_energy_demand_per_node: rule build_industrial_energy_demand_per_country_today: params: - countries=config["countries"], - industry=config["industry"], + countries=config_provider("countries"), + industry=config_provider("industry"), input: jrc="data/bundle-sector/jrc-idees-2015", - ammonia_production=RESOURCES + "ammonia_production.csv", - industrial_production_per_country=RESOURCES - + "industrial_production_per_country.csv", + industrial_production_per_country=resources( + "industrial_production_per_country.csv" + ), output: - industrial_energy_demand_per_country_today=RESOURCES - + "industrial_energy_demand_per_country_today.csv", + industrial_energy_demand_per_country_today=resources( + "industrial_energy_demand_per_country_today.csv" + ), threads: 8 resources: mem_mb=1000, log: - LOGS + "build_industrial_energy_demand_per_country_today.log", + logs("build_industrial_energy_demand_per_country_today.log"), benchmark: - BENCHMARKS + "build_industrial_energy_demand_per_country_today" + benchmarks("build_industrial_energy_demand_per_country_today") conda: "../envs/environment.yaml" script: @@ -565,76 +675,72 @@ rule build_industrial_energy_demand_per_country_today: rule build_industrial_energy_demand_per_node_today: input: - industrial_distribution_key=RESOURCES - + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv", - industrial_energy_demand_per_country_today=RESOURCES - + "industrial_energy_demand_per_country_today.csv", + industrial_distribution_key=resources( + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv" + ), + industrial_energy_demand_per_country_today=resources( + "industrial_energy_demand_per_country_today.csv" + ), output: - industrial_energy_demand_per_node_today=RESOURCES - + "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv", + industrial_energy_demand_per_node_today=resources( + "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_industrial_energy_demand_per_node_today_s{simpl}_{clusters}.log", + logs("build_industrial_energy_demand_per_node_today_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_industrial_energy_demand_per_node_today/s{simpl}_{clusters}" + benchmarks("build_industrial_energy_demand_per_node_today/s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: "../scripts/build_industrial_energy_demand_per_node_today.py" -if config["sector"]["retrofitting"]["retro_endogen"]: - - rule build_retro_cost: - params: - retrofitting=config["sector"]["retrofitting"], - countries=config["countries"], - input: - building_stock="data/retro/data_building_stock.csv", - data_tabula="data/bundle-sector/retro/tabula-calculator-calcsetbuilding.csv", - air_temperature=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", - u_values_PL="data/retro/u_values_poland.csv", - tax_w="data/retro/electricity_taxes_eu.csv", - construction_index="data/retro/comparative_level_investment.csv", - floor_area_missing="data/retro/floor_area_missing.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - cost_germany="data/retro/retro_cost_germany.csv", - window_assumptions="data/retro/window_assumptions.csv", - output: - retro_cost=RESOURCES + "retro_cost_elec_s{simpl}_{clusters}.csv", - floor_area=RESOURCES + "floor_area_elec_s{simpl}_{clusters}.csv", - resources: - mem_mb=1000, - log: - LOGS + "build_retro_cost_s{simpl}_{clusters}.log", - benchmark: - BENCHMARKS + "build_retro_cost/s{simpl}_{clusters}" - conda: - "../envs/environment.yaml" - script: - "../scripts/build_retro_cost.py" - - build_retro_cost_output = rules.build_retro_cost.output - - -if not config["sector"]["retrofitting"]["retro_endogen"]: - # this is effecively an `else` statement which is however not liked by snakefmt - build_retro_cost_output = {} +rule build_retro_cost: + params: + retrofitting=config_provider("sector", "retrofitting"), + countries=config_provider("countries"), + input: + building_stock="data/retro/data_building_stock.csv", + data_tabula="data/bundle-sector/retro/tabula-calculator-calcsetbuilding.csv", + air_temperature=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), + u_values_PL="data/retro/u_values_poland.csv", + tax_w="data/retro/electricity_taxes_eu.csv", + construction_index="data/retro/comparative_level_investment.csv", + floor_area_missing="data/retro/floor_area_missing.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + cost_germany="data/retro/retro_cost_germany.csv", + window_assumptions="data/retro/window_assumptions.csv", + output: + retro_cost=resources("retro_cost_elec_s{simpl}_{clusters}.csv"), + floor_area=resources("floor_area_elec_s{simpl}_{clusters}.csv"), + resources: + mem_mb=1000, + log: + logs("build_retro_cost_s{simpl}_{clusters}.log"), + benchmark: + benchmarks("build_retro_cost/s{simpl}_{clusters}") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_retro_cost.py" rule build_population_weighted_energy_totals: + params: + snapshots=config_provider("snapshots"), input: - energy_totals=RESOURCES + "energy_totals.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + energy_totals=resources("{kind}_totals.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), output: - RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + resources("pop_weighted_{kind}_totals_s{simpl}_{clusters}.csv"), threads: 1 resources: mem_mb=2000, log: - LOGS + "build_population_weighted_energy_totals_s{simpl}_{clusters}.log", + logs("build_population_weighted_{kind}_totals_s{simpl}_{clusters}.log"), conda: "../envs/environment.yaml" script: @@ -644,16 +750,18 @@ rule build_population_weighted_energy_totals: rule build_shipping_demand: input: ports="data/attributed_ports.json", - scope=RESOURCES + "europe_shape.geojson", - regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - demand=RESOURCES + "energy_totals.csv", + scope=resources("europe_shape.geojson"), + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + demand=resources("energy_totals.csv"), + params: + energy_totals_year=config_provider("energy", "energy_totals_year"), output: - RESOURCES + "shipping_demand_s{simpl}_{clusters}.csv", + resources("shipping_demand_s{simpl}_{clusters}.csv"), threads: 1 resources: mem_mb=2000, log: - LOGS + "build_shipping_demand_s{simpl}_{clusters}.log", + logs("build_shipping_demand_s{simpl}_{clusters}.log"), conda: "../envs/environment.yaml" script: @@ -662,112 +770,218 @@ rule build_shipping_demand: rule build_transport_demand: params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - sector=config["sector"], + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + sector=config_provider("sector"), + energy_totals_year=config_provider("energy", "energy_totals_year"), input: - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - pop_weighted_energy_totals=RESOURCES - + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", - transport_data=RESOURCES + "transport_data.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + pop_weighted_energy_totals=resources( + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv" + ), + transport_data=resources("transport_data.csv"), traffic_data_KFZ="data/bundle-sector/emobility/KFZ__count", traffic_data_Pkw="data/bundle-sector/emobility/Pkw__count", - temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", + temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), output: - transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv", - transport_data=RESOURCES + "transport_data_s{simpl}_{clusters}.csv", - avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv", - dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv", + transport_demand=resources("transport_demand_s{simpl}_{clusters}.csv"), + transport_data=resources("transport_data_s{simpl}_{clusters}.csv"), + avail_profile=resources("avail_profile_s{simpl}_{clusters}.csv"), + dsm_profile=resources("dsm_profile_s{simpl}_{clusters}.csv"), threads: 1 resources: mem_mb=2000, log: - LOGS + "build_transport_demand_s{simpl}_{clusters}.log", + logs("build_transport_demand_s{simpl}_{clusters}.log"), conda: "../envs/environment.yaml" script: "../scripts/build_transport_demand.py" +rule build_district_heat_share: + params: + sector=config_provider("sector"), + energy_totals_year=config_provider("energy", "energy_totals_year"), + input: + district_heat_share=resources("district_heat_share.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + output: + district_heat_share=resources( + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + threads: 1 + resources: + mem_mb=1000, + log: + logs("build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log"), + conda: + "../envs/environment.yaml" + script: + "../scripts/build_district_heat_share.py" + + +rule build_existing_heating_distribution: + params: + baseyear=config_provider("scenario", "planning_horizons", 0), + sector=config_provider("sector"), + existing_capacities=config_provider("existing_capacities"), + input: + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + clustered_pop_energy_layout=resources( + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv" + ), + district_heat_share=resources( + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + output: + existing_heating_distribution=resources( + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + threads: 1 + resources: + mem_mb=2000, + log: + logs( + "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log" + ), + benchmark: + benchmarks( + "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/build_existing_heating_distribution.py" + + +def input_profile_offwind(w): + return { + f"profile_{tech}": resources(f"profile_{tech}.nc") + for tech in ["offwind-ac", "offwind-dc", "offwind-float"] + if (tech in config_provider("electricity", "renewable_carriers")(w)) + } + + rule prepare_sector_network: params: - co2_budget=config["co2_budget"], - conventional_carriers=config["existing_capacities"]["conventional_carriers"], - foresight=config["foresight"], - costs=config["costs"], - sector=config["sector"], - industry=config["industry"], - pypsa_eur=config["pypsa_eur"], - length_factor=config["lines"]["length_factor"], - planning_horizons=config["scenario"]["planning_horizons"], - countries=config["countries"], - emissions_scope=config["energy"]["emissions"], - eurostat_report_year=config["energy"]["eurostat_report_year"], + time_resolution=config_provider("clustering", "temporal", "resolution_sector"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + co2_budget=config_provider("co2_budget"), + conventional_carriers=config_provider( + "existing_capacities", "conventional_carriers" + ), + foresight=config_provider("foresight"), + costs=config_provider("costs"), + sector=config_provider("sector"), + industry=config_provider("industry"), + lines=config_provider("lines"), + pypsa_eur=config_provider("pypsa_eur"), + length_factor=config_provider("lines", "length_factor"), + planning_horizons=config_provider("scenario", "planning_horizons"), + countries=config_provider("countries"), + adjustments=config_provider("adjustments", "sector"), + emissions_scope=config_provider("energy", "emissions"), RDIR=RDIR, input: - **build_retro_cost_output, - **build_biomass_transport_costs_output, + unpack(input_profile_offwind), **rules.cluster_gas_network.output, **rules.build_gas_input_locations.output, - **build_sequestration_potentials_output, - network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - energy_totals_name=RESOURCES + "energy_totals.csv", - eurostat=input_eurostat, - pop_weighted_energy_totals=RESOURCES - + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", - shipping_demand=RESOURCES + "shipping_demand_s{simpl}_{clusters}.csv", - transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv", - transport_data=RESOURCES + "transport_data_s{simpl}_{clusters}.csv", - avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv", - dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv", - co2_totals_name=RESOURCES + "co2_totals.csv", + retro_cost=lambda w: ( + resources("retro_cost_elec_s{simpl}_{clusters}.csv") + if config_provider("sector", "retrofitting", "retro_endogen")(w) + else [] + ), + floor_area=lambda w: ( + resources("floor_area_elec_s{simpl}_{clusters}.csv") + if config_provider("sector", "retrofitting", "retro_endogen")(w) + else [] + ), + biomass_transport_costs=lambda w: ( + resources("biomass_transport_costs.csv") + if config_provider("sector", "biomass_transport")(w) + or config_provider("sector", "biomass_spatial")(w) + else [] + ), + sequestration_potential=lambda w: ( + resources("co2_sequestration_potential_elec_s{simpl}_{clusters}.csv") + if config_provider( + "sector", "regional_co2_sequestration_potential", "enable" + )(w) + else [] + ), + network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), + eurostat="data/eurostat/eurostat-energy_balances-april_2023_edition", + pop_weighted_energy_totals=resources( + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv" + ), + pop_weighted_heat_totals=resources( + "pop_weighted_heat_totals_s{simpl}_{clusters}.csv" + ), + shipping_demand=resources("shipping_demand_s{simpl}_{clusters}.csv"), + transport_demand=resources("transport_demand_s{simpl}_{clusters}.csv"), + transport_data=resources("transport_data_s{simpl}_{clusters}.csv"), + avail_profile=resources("avail_profile_s{simpl}_{clusters}.csv"), + dsm_profile=resources("dsm_profile_s{simpl}_{clusters}.csv"), + co2_totals_name=resources("co2_totals.csv"), co2="data/bundle-sector/eea/UNFCCC_v23.csv", - biomass_potentials=RESOURCES - + "biomass_potentials_s{simpl}_{clusters}_" - + "{}.csv".format(config["biomass"]["year"]) - if config["foresight"] == "overnight" - else RESOURCES - + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_profile="data/heat_load_profile_BDEW.csv", - costs="data/costs_{}.csv".format(config["costs"]["year"]) - if config["foresight"] == "overnight" - else "data/costs_{planning_horizons}.csv", - profile_offwind_ac=RESOURCES + "profile_offwind-ac.nc", - profile_offwind_dc=RESOURCES + "profile_offwind-dc.nc", - profile_offwind_float=RESOURCES + "profile_offwind-float.nc", - h2_cavern=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv", - busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", - busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", - industrial_demand=RESOURCES - + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_demand_urban=RESOURCES + "heat_demand_urban_elec_s{simpl}_{clusters}.nc", - heat_demand_rural=RESOURCES + "heat_demand_rural_elec_s{simpl}_{clusters}.nc", - heat_demand_total=RESOURCES + "heat_demand_total_elec_s{simpl}_{clusters}.nc", - temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", - temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", - temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", - temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", - temp_air_rural=RESOURCES + "temp_air_rural_elec_s{simpl}_{clusters}.nc", - temp_air_urban=RESOURCES + "temp_air_urban_elec_s{simpl}_{clusters}.nc", - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_soil_rural=RESOURCES + "cop_soil_rural_elec_s{simpl}_{clusters}.nc", - cop_soil_urban=RESOURCES + "cop_soil_urban_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc", - cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc", - solar_thermal_total=RESOURCES - + "solar_thermal_total_elec_s{simpl}_{clusters}.nc" - if config["sector"]["solar_thermal"] - else [], - solar_thermal_urban=RESOURCES - + "solar_thermal_urban_elec_s{simpl}_{clusters}.nc" - if config["sector"]["solar_thermal"] - else [], - solar_thermal_rural=RESOURCES - + "solar_thermal_rural_elec_s{simpl}_{clusters}.nc" - if config["sector"]["solar_thermal"] - else [], + biomass_potentials=lambda w: ( + resources( + "biomass_potentials_s{simpl}_{clusters}_" + + "{}.csv".format(config_provider("biomass", "year")(w)) + ) + if config_provider("foresight")(w) == "overnight" + else resources( + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv" + ) + ), + costs=lambda w: ( + resources("costs_{}.csv".format(config_provider("costs", "year")(w))) + if config_provider("foresight")(w) == "overnight" + else resources("costs_{planning_horizons}.csv") + ), + h2_cavern=resources("salt_cavern_potentials_s{simpl}_{clusters}.csv"), + busmap_s=resources("busmap_elec_s{simpl}.csv"), + busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + simplified_pop_layout=resources("pop_layout_elec_s{simpl}.csv"), + industrial_demand=resources( + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + hourly_heat_demand_total=resources( + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc" + ), + district_heat_share=resources( + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + temp_soil_total=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"), + temp_soil_rural=resources("temp_soil_rural_elec_s{simpl}_{clusters}.nc"), + temp_soil_urban=resources("temp_soil_urban_elec_s{simpl}_{clusters}.nc"), + temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), + temp_air_rural=resources("temp_air_rural_elec_s{simpl}_{clusters}.nc"), + temp_air_urban=resources("temp_air_urban_elec_s{simpl}_{clusters}.nc"), + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_soil_rural=resources("cop_soil_rural_elec_s{simpl}_{clusters}.nc"), + cop_soil_urban=resources("cop_soil_urban_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), + cop_air_rural=resources("cop_air_rural_elec_s{simpl}_{clusters}.nc"), + cop_air_urban=resources("cop_air_urban_elec_s{simpl}_{clusters}.nc"), + solar_thermal_total=lambda w: ( + resources("solar_thermal_total_elec_s{simpl}_{clusters}.nc") + if config_provider("sector", "solar_thermal")(w) + else [] + ), + solar_thermal_urban=lambda w: ( + resources("solar_thermal_urban_elec_s{simpl}_{clusters}.nc") + if config_provider("sector", "solar_thermal")(w) + else [] + ), + solar_thermal_rural=lambda w: ( + resources("solar_thermal_rural_elec_s{simpl}_{clusters}.nc") + if config_provider("sector", "solar_thermal")(w) + else [] + ), output: RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -775,12 +989,12 @@ rule prepare_sector_network: resources: mem_mb=2000, log: - LOGS - + "prepare_sector_network_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + RESULTS + + "logs/prepare_sector_network_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", benchmark: ( - BENCHMARKS - + "prepare_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/prepare_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" diff --git a/rules/collect.smk b/rules/collect.smk index c9bb10ea..214b8102 100644 --- a/rules/collect.smk +++ b/rules/collect.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -11,26 +11,32 @@ localrules: prepare_sector_networks, solve_elec_networks, solve_sector_networks, - plot_networks, rule cluster_networks: input: - expand(RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]), + expand( + resources("networks/elec_s{simpl}_{clusters}.nc"), + **config["scenario"], + run=config["run"]["name"], + ), rule extra_components_networks: input: expand( - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", **config["scenario"] + resources("networks/elec_s{simpl}_{clusters}_ec.nc"), + **config["scenario"], + run=config["run"]["name"], ), rule prepare_elec_networks: input: expand( - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - **config["scenario"] + resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), + **config["scenario"], + run=config["run"]["name"], ), @@ -39,7 +45,8 @@ rule prepare_sector_networks: expand( RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), @@ -47,7 +54,8 @@ rule solve_elec_networks: input: expand( RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), @@ -56,25 +64,18 @@ rule solve_sector_networks: expand( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), rule solve_sector_networks_perfect: - input: - expand( - RESULTS - + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - **config["scenario"] - ), - - -rule plot_networks: input: expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), @@ -83,11 +84,13 @@ rule validate_elec_networks: expand( RESULTS + "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), expand( RESULTS + "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", **config["scenario"], - kind=["production", "prices", "cross_border"] + run=config["run"]["name"], + kind=["production", "prices", "cross_border"], ), diff --git a/rules/common.smk b/rules/common.smk index 2298ff91..2b8495e1 100644 --- a/rules/common.smk +++ b/rules/common.smk @@ -1,16 +1,88 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +import copy +from functools import partial, lru_cache + import os, sys, glob -helper_source_path = [match for match in glob.glob("**/_helpers.py", recursive=True)] +path = workflow.source_path("../scripts/_helpers.py") +sys.path.insert(0, os.path.dirname(path)) -for path in helper_source_path: - path = os.path.dirname(os.path.abspath(path)) - sys.path.insert(0, os.path.abspath(path)) +from _helpers import validate_checksum, update_config_from_wildcards +from snakemake.utils import update_config -from _helpers import validate_checksum + +def get_config(config, keys, default=None): + """Retrieve a nested value from a dictionary using a tuple of keys.""" + value = config + for key in keys: + if isinstance(value, list): + value = value[key] + else: + value = value.get(key, default) + if value == default: + return default + return value + + +def merge_configs(base_config, scenario_config): + """Merge base config with a specific scenario without modifying the original.""" + merged = copy.deepcopy(base_config) + update_config(merged, scenario_config) + return merged + + +@lru_cache +def scenario_config(scenario_name): + """Retrieve a scenario config based on the overrides from the scenario file.""" + return merge_configs(config, scenarios[scenario_name]) + + +def static_getter(wildcards, keys, default): + """Getter function for static config values.""" + config_with_wildcards = update_config_from_wildcards( + config, wildcards, inplace=False + ) + return get_config(config_with_wildcards, keys, default) + + +def dynamic_getter(wildcards, keys, default): + """Getter function for dynamic config values based on scenario.""" + if "run" not in wildcards.keys(): + return get_config(config, keys, default) + scenario_name = wildcards.run + if scenario_name not in scenarios: + raise ValueError( + f"Scenario {scenario_name} not found in file {config['run']['scenario']['file']}." + ) + config_with_scenario = scenario_config(scenario_name) + config_with_wildcards = update_config_from_wildcards( + config_with_scenario, wildcards, inplace=False + ) + return get_config(config_with_wildcards, keys, default) + + +def config_provider(*keys, default=None): + """Dynamically provide config values based on 'run' -> 'name'. + + Usage in Snakemake rules would look something like: + params: + my_param=config_provider("key1", "key2", default="some_default_value") + """ + # Using functools.partial to freeze certain arguments in our getter functions. + if config["run"].get("scenarios", {}).get("enable", False): + return partial(dynamic_getter, keys=keys, default=default) + else: + return partial(static_getter, keys=keys, default=default) + + +def solver_threads(w): + solver_options = config_provider("solving", "solver_options")(w) + option_set = config_provider("solving", "solver", "options")(w) + threads = solver_options[option_set].get("threads", 4) + return threads def memory(w): @@ -34,9 +106,11 @@ def memory(w): def input_custom_extra_functionality(w): - path = config["solving"]["options"].get("custom_extra_functionality", False) + path = config_provider( + "solving", "options", "custom_extra_functionality", default=False + )(w) if path: - return workflow.source_path(path) + return os.path.join(os.path.dirname(workflow.snakefile), path) return [] @@ -56,16 +130,11 @@ def has_internet_access(url="www.zenodo.org") -> bool: conn.close() -def input_eurostat(w): - # 2016 includes BA, 2017 does not - report_year = config["energy"]["eurostat_report_year"] - return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition" - - -def solved_previous_horizon(wildcards): - planning_horizons = config["scenario"]["planning_horizons"] - i = planning_horizons.index(int(wildcards.planning_horizons)) +def solved_previous_horizon(w): + planning_horizons = config_provider("scenario", "planning_horizons")(w) + i = planning_horizons.index(int(w.planning_horizons)) planning_horizon_p = str(planning_horizons[i - 1]) + return ( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 9f4ac78e..e7df2e66 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -1,106 +1,192 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT -localrules: - copy_config, - - if config["foresight"] != "perfect": - rule plot_network: + rule plot_power_network_clustered: params: - foresight=config["foresight"], - plotting=config["plotting"], + plotting=config_provider("plotting"), + input: + network=resources("networks/elec_s{simpl}_{clusters}.nc"), + regions_onshore=resources( + "regions_onshore_elec_s{simpl}_{clusters}.geojson" + ), + output: + map=resources("maps/power-network-s{simpl}-{clusters}.pdf"), + threads: 1 + resources: + mem_mb=4000, + benchmark: + benchmarks("plot_power_network_clustered/elec_s{simpl}_{clusters}") + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_power_network_clustered.py" + + rule plot_power_network: + params: + plotting=config_provider("plotting"), input: network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), output: map=RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - today=RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf", threads: 2 resources: mem_mb=10000, + log: + RESULTS + + "logs/plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", benchmark: ( - BENCHMARKS - + "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" script: - "../scripts/plot_network.py" + "../scripts/plot_power_network.py" + + rule plot_hydrogen_network: + params: + plotting=config_provider("plotting"), + foresight=config_provider("foresight"), + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + log: + RESULTS + + "logs/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + benchmark: + ( + RESULTS + + "benchmarks/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_hydrogen_network.py" + + rule plot_gas_network: + params: + plotting=config_provider("plotting"), + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + log: + RESULTS + + "logs/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + benchmark: + ( + RESULTS + + "benchmarks/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_gas_network.py" if config["foresight"] == "perfect": - rule plot_network: + def output_map_year(w): + return { + f"map_{year}": RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_" + + f"{year}.pdf" + for year in config_provider("scenario", "planning_horizons")(w) + } + + rule plot_power_network_perfect: params: - foresight=config["foresight"], - plotting=config["plotting"], + plotting=config_provider("plotting"), input: network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), output: - **{ - f"map_{year}": RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_" - + f"{year}.pdf" - for year in config["scenario"]["planning_horizons"] - }, + unpack(output_map_year), threads: 2 resources: mem_mb=10000, - benchmark: - BENCHMARKS - +"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark" conda: "../envs/environment.yaml" script: - "../scripts/plot_network.py" - - -rule copy_config: - params: - RDIR=RDIR, - output: - RESULTS + "config.yaml", - threads: 1 - resources: - mem_mb=1000, - benchmark: - BENCHMARKS + "copy_config" - conda: - "../envs/environment.yaml" - script: - "../scripts/copy_config.py" + "../scripts/plot_power_network_perfect.py" rule make_summary: params: - foresight=config["foresight"], - costs=config["costs"], - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - scenario=config["scenario"], + foresight=config_provider("foresight"), + costs=config_provider("costs"), + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + scenario=config_provider("scenario"), RDIR=RDIR, input: networks=expand( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - **config["scenario"] + **config["scenario"], + allow_missing=True, ), - costs="data/costs_{}.csv".format(config["costs"]["year"]) - if config["foresight"] == "overnight" - else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - plots=expand( + costs=lambda w: ( + resources("costs_{}.csv".format(config_provider("costs", "year")(w))) + if config_provider("foresight")(w) == "overnight" + else resources( + "costs_{}.csv".format( + config_provider("scenario", "planning_horizons", 0)(w) + ) + ) + ), + ac_plot=expand( + resources("maps/power-network-s{simpl}-{clusters}.pdf"), + **config["scenario"], + allow_missing=True, + ), + costs_plot=expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] + **config["scenario"], + allow_missing=True, + ), + h2_plot=lambda w: expand( + ( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf" + if config_provider("sector", "H2_network")(w) + else [] + ), + **config["scenario"], + allow_missing=True, + ), + ch4_plot=lambda w: expand( + ( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf" + if config_provider("sector", "gas_network")(w) + else [] + ), + **config["scenario"], + allow_missing=True, ), output: nodal_costs=RESULTS + "csvs/nodal_costs.csv", @@ -122,9 +208,7 @@ rule make_summary: resources: mem_mb=10000, log: - LOGS + "make_summary.log", - benchmark: - BENCHMARKS + "make_summary" + RESULTS + "logs/make_summary.log", conda: "../envs/environment.yaml" script: @@ -133,18 +217,19 @@ rule make_summary: rule plot_summary: params: - countries=config["countries"], - planning_horizons=config["scenario"]["planning_horizons"], - sector_opts=config["scenario"]["sector_opts"], - emissions_scope=config["energy"]["emissions"], - eurostat_report_year=config["energy"]["eurostat_report_year"], - plotting=config["plotting"], + countries=config_provider("countries"), + planning_horizons=config_provider("scenario", "planning_horizons"), + emissions_scope=config_provider("energy", "emissions"), + plotting=config_provider("plotting"), + foresight=config_provider("foresight"), + co2_budget=config_provider("co2_budget"), + sector=config_provider("sector"), RDIR=RDIR, input: costs=RESULTS + "csvs/costs.csv", energy=RESULTS + "csvs/energy.csv", balances=RESULTS + "csvs/supply_energy.csv", - eurostat=input_eurostat, + eurostat="data/eurostat/eurostat-energy_balances-april_2023_edition", co2="data/bundle-sector/eea/UNFCCC_v23.csv", output: costs=RESULTS + "graphs/costs.pdf", @@ -154,9 +239,7 @@ rule plot_summary: resources: mem_mb=10000, log: - LOGS + "plot_summary.log", - benchmark: - BENCHMARKS + "plot_summary" + RESULTS + "logs/plot_summary.log", conda: "../envs/environment.yaml" script: @@ -178,7 +261,7 @@ STATISTICS_BARPLOTS = [ rule plot_elec_statistics: params: - plotting=config["plotting"], + plotting=config_provider("plotting"), barplots=STATISTICS_BARPLOTS, input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", diff --git a/rules/retrieve.smk b/rules/retrieve.smk index 7a180e22..d0345f36 100644 --- a/rules/retrieve.smk +++ b/rules/retrieve.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -32,41 +32,22 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle", output: protected(expand("data/bundle/{file}", file=datafiles)), log: - LOGS + "retrieve_databundle.log", + "logs/retrieve_databundle.log", resources: mem_mb=1000, retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_databundle.py" -if config["enable"].get("retrieve_irena"): - - rule retrieve_irena: - output: - offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", - onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", - solar="data/existing_infrastructure/solar_capacity_IRENA.csv", - log: - LOGS + "retrieve_irena.log", - resources: - mem_mb=1000, - retries: 2 - conda: - "../envs/environment.yaml" - script: - "../scripts/retrieve_irena.py" - - if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True): rule retrieve_cutout: input: - HTTP.remote( - "zenodo.org/record/6382570/files/{cutout}.nc", - static=True, + storage( + "https://zenodo.org/record/6382570/files/{cutout}.nc", ), output: protected("cutouts/" + CDIR + "{cutout}.nc"), @@ -83,23 +64,19 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True if config["enable"]["retrieve"] and config["enable"].get("retrieve_cost_data", True): rule retrieve_cost_data: - input: - HTTP.remote( - "raw.githubusercontent.com/PyPSA/technology-data/{}/outputs/".format( - config["costs"]["version"] - ) - + "costs_{year}.csv", - keep_local=True, - ), + params: + version=config_provider("costs", "version"), output: - "data/costs_{year}.csv", + resources("costs_{year}.csv"), log: - LOGS + "retrieve_cost_data_{year}.log", + logs("retrieve_cost_data_{year}.log"), resources: mem_mb=1000, retries: 2 - run: - move(input[0], output[0]) + conda: + "../envs/retrieve.yaml" + script: + "../scripts/retrieve_cost_data.py" if config["enable"]["retrieve"] and config["enable"].get( @@ -108,20 +85,19 @@ if config["enable"]["retrieve"] and config["enable"].get( rule retrieve_natura_raster: input: - HTTP.remote( - "zenodo.org/record/4706686/files/natura.tiff", + storage( + "https://zenodo.org/record/4706686/files/natura.tiff", keep_local=True, - static=True, ), output: - RESOURCES + "natura.tiff", + resources("natura.tiff"), log: - LOGS + "retrieve_natura_raster.log", + logs("retrieve_natura_raster.log"), resources: mem_mb=5000, retries: 2 run: - move(input[0], output[0]) + copyfile(input[0], output[0]) validate_checksum(output[0], input[0]) @@ -139,28 +115,27 @@ if config["enable"]["retrieve"] and config["enable"].get( "h2_salt_caverns_GWh_per_sqkm.geojson", ] - datafolders = [ - protected( - directory("data/bundle-sector/eurostat-energy_balances-june_2016_edition") - ), - protected( - directory("data/bundle-sector/eurostat-energy_balances-may_2018_edition") - ), - protected(directory("data/bundle-sector/jrc-idees-2015")), - ] - rule retrieve_sector_databundle: output: protected(expand("data/bundle-sector/{files}", files=datafiles)), - *datafolders, + protected(directory("data/bundle-sector/jrc-idees-2015")), log: - LOGS + "retrieve_sector_databundle.log", + "logs/retrieve_sector_databundle.log", retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_sector_databundle.py" + rule retrieve_eurostat_data: + output: + directory("data/eurostat/eurostat-energy_balances-april_2023_edition"), + log: + "logs/retrieve_eurostat_data.log", + retries: 2 + script: + "../scripts/retrieve_eurostat_data.py" + if config["enable"]["retrieve"]: datafiles = [ @@ -173,14 +148,12 @@ if config["enable"]["retrieve"]: rule retrieve_gas_infrastructure_data: output: - protected( - expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles) - ), + expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles), log: - LOGS + "retrieve_gas_infrastructure_data.log", + "logs/retrieve_gas_infrastructure_data.log", retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_gas_infrastructure_data.py" @@ -188,20 +161,32 @@ if config["enable"]["retrieve"]: if config["enable"]["retrieve"]: rule retrieve_electricity_demand: + params: + versions=["2019-06-05", "2020-10-06"], + output: + "data/electricity_demand_raw.csv", + log: + "logs/retrieve_electricity_demand.log", + resources: + mem_mb=5000, + retries: 2 + conda: + "../envs/retrieve.yaml" + script: + "../scripts/retrieve_electricity_demand.py" + + +if config["enable"]["retrieve"]: + + rule retrieve_synthetic_electricity_demand: input: - HTTP.remote( - "data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format( - version="2019-06-05" - if config["snapshots"]["end"] < "2019" - else "2020-10-06" - ), - keep_local=True, - static=True, + storage( + "https://zenodo.org/records/10820928/files/demand_hourly.csv", ), output: - RESOURCES + "load_raw.csv", + "data/load_synthetic_raw.csv", log: - LOGS + "retrieve_electricity_demand.log", + "logs/retrieve_synthetic_electricity_demand.log", resources: mem_mb=5000, retries: 2 @@ -213,15 +198,14 @@ if config["enable"]["retrieve"]: rule retrieve_ship_raster: input: - HTTP.remote( + storage( "https://zenodo.org/record/6953563/files/shipdensity_global.zip", keep_local=True, - static=True, ), output: protected("data/shipdensity_global.zip"), log: - LOGS + "retrieve_ship_raster.log", + "logs/retrieve_ship_raster.log", resources: mem_mb=5000, retries: 2 @@ -236,9 +220,8 @@ if config["enable"]["retrieve"]: # Website: https://land.copernicus.eu/global/products/lc rule download_copernicus_land_cover: input: - HTTP.remote( - "zenodo.org/record/3939050/files/PROBAV_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif", - static=True, + storage( + "https://zenodo.org/record/3939050/files/PROBAV_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif", ), output: "data/Copernicus_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif", @@ -253,9 +236,8 @@ if config["enable"]["retrieve"]: # Website: https://ec.europa.eu/jrc/en/luisa rule retrieve_luisa_land_cover: input: - HTTP.remote( - "jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/LUISA/EUROPE/Basemaps/LandUse/2018/LATEST/LUISA_basemap_020321_50m.tif", - static=True, + storage( + "https://jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/LUISA/EUROPE/Basemaps/LandUse/2018/LATEST/LUISA_basemap_020321_50m.tif", ), output: "data/LUISA_basemap_020321_50m.tif", @@ -298,11 +280,7 @@ if config["enable"]["retrieve"]: # Website: https://www.protectedplanet.net/en/thematic-areas/wdpa rule download_wdpa: input: - HTTP.remote( - url, - static=True, - keep_local=True, - ), + storage(url, keep_local=True), params: zip="data/WDPA_shp.zip", folder=directory("data/WDPA"), @@ -316,7 +294,7 @@ if config["enable"]["retrieve"]: layer_path = ( f"/vsizip/{params.folder}/WDPA_{bYYYY}_Public_shp_{i}.zip" ) - print(f"Adding layer {i+1} of 3 to combined output file.") + print(f"Adding layer {i + 1} of 3 to combined output file.") shell("ogr2ogr -f gpkg -update -append {output.gpkg} {layer_path}") rule download_wdpa_marine: @@ -324,9 +302,8 @@ if config["enable"]["retrieve"]: # extract the main zip and then merge the contained 3 zipped shapefiles # Website: https://www.protectedplanet.net/en/thematic-areas/marine-protected-areas input: - HTTP.remote( - f"d1gam3xoknrgr2.cloudfront.net/current/WDPA_WDOECM_{bYYYY}_Public_marine_shp.zip", - static=True, + storage( + f"https://d1gam3xoknrgr2.cloudfront.net/current/WDPA_WDOECM_{bYYYY}_Public_marine_shp.zip", keep_local=True, ), params: @@ -340,7 +317,7 @@ if config["enable"]["retrieve"]: for i in range(3): # vsizip is special driver for directly working with zipped shapefiles in ogr2ogr layer_path = f"/vsizip/{params.folder}/WDPA_WDOECM_{bYYYY}_Public_marine_shp_{i}.zip" - print(f"Adding layer {i+1} of 3 to combined output file.") + print(f"Adding layer {i + 1} of 3 to combined output file.") shell("ogr2ogr -f gpkg -update -append {output.gpkg} {layer_path}") @@ -349,15 +326,14 @@ if config["enable"]["retrieve"]: rule retrieve_monthly_co2_prices: input: - HTTP.remote( + storage( "https://www.eex.com/fileadmin/EEX/Downloads/EUA_Emission_Spot_Primary_Market_Auction_Report/Archive_Reports/emission-spot-primary-market-auction-report-2019-data.xls", keep_local=True, - static=True, ), output: "data/validation/emission-spot-primary-market-auction-report-2019-data.xls", log: - LOGS + "retrieve_monthly_co2_prices.log", + "logs/retrieve_monthly_co2_prices.log", resources: mem_mb=5000, retries: 2 @@ -371,11 +347,11 @@ if config["enable"]["retrieve"]: output: "data/validation/energy-price-trends-xlsx-5619002.xlsx", log: - LOGS + "retrieve_monthly_fuel_prices.log", + "logs/retrieve_monthly_fuel_prices.log", resources: mem_mb=5000, retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_monthly_fuel_prices.py" diff --git a/rules/solve_electricity.smk b/rules/solve_electricity.smk index 7f6092be..389687a0 100644 --- a/rules/solve_electricity.smk +++ b/rules/solve_electricity.smk @@ -1,36 +1,37 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule solve_network: params: - solving=config["solving"], - foresight=config["foresight"], - planning_horizons=config["scenario"]["planning_horizons"], - co2_sequestration_potential=config["sector"].get( - "co2_sequestration_potential", 200 + solving=config_provider("solving"), + foresight=config_provider("foresight"), + planning_horizons=config_provider("scenario", "planning_horizons"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 ), custom_extra_functionality=input_custom_extra_functionality, input: - network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - config=RESULTS + "config.yaml", + network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), output: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + config=RESULTS + "configs/config.elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.yaml", log: solver=normpath( - LOGS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" + RESULTS + + "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" ), - python=LOGS - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + python=RESULTS + + "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", benchmark: - BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" - threads: 4 + (RESULTS + "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}") + threads: solver_threads resources: mem_mb=memory, - walltime=config["solving"].get("walltime", "12:00:00"), + runtime=config_provider("solving", "runtime", default="6h"), shadow: - "minimal" + "shallow" conda: "../envs/environment.yaml" script: @@ -39,29 +40,29 @@ rule solve_network: rule solve_operations_network: params: - options=config["solving"]["options"], + options=config_provider("solving", "options"), input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", output: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc", log: solver=normpath( - LOGS - + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log" + RESULTS + + "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log" ), - python=LOGS - + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", + python=RESULTS + + "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", benchmark: ( - BENCHMARKS - + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + RESULTS + + "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" ) threads: 4 resources: mem_mb=(lambda w: 10000 + 372 * int(w.clusters)), - walltime=config["solving"].get("walltime", "12:00:00"), + runtime=config_provider("solving", "runtime", default="6h"), shadow: - "minimal" + "shallow" conda: "../envs/environment.yaml" script: diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 7ca8857d..6220af2a 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -1,43 +1,49 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-4 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule add_existing_baseyear: params: - baseyear=config["scenario"]["planning_horizons"][0], - sector=config["sector"], - existing_capacities=config["existing_capacities"], - costs=config["costs"], + baseyear=config_provider("scenario", "planning_horizons", 0), + sector=config_provider("sector"), + existing_capacities=config_provider("existing_capacities"), + costs=config_provider("costs"), input: network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - powerplants=RESOURCES + "powerplants.csv", - busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", - busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - existing_heating="data/existing_infrastructure/existing_heating_raw.csv", - existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", - existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", - existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", + powerplants=resources("powerplants.csv"), + busmap_s=resources("busmap_elec_s{simpl}.csv"), + busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + costs=lambda w: resources( + "costs_{}.csv".format( + config_provider("scenario", "planning_horizons", 0)(w) + ) + ), + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), + existing_heating_distribution=resources( + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), output: RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", wildcard_constraints: + # TODO: The first planning_horizon needs to be aligned across scenarios + # snakemake does not support passing functions to wildcard_constraints + # reference: https://github.com/snakemake/snakemake/issues/2703 planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear threads: 1 resources: mem_mb=2000, log: - LOGS - + "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + RESULTS + + "logs/add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", benchmark: ( - BENCHMARKS - + "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" @@ -45,18 +51,34 @@ rule add_existing_baseyear: "../scripts/add_existing_baseyear.py" +def input_profile_tech_brownfield(w): + return { + f"profile_{tech}": resources(f"profile_{tech}.nc") + for tech in config_provider("electricity", "renewable_carriers")(w) + if tech != "hydro" + } + + rule add_brownfield: params: - H2_retrofit=config["sector"]["H2_retrofit"], - H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"], - threshold_capacity=config["existing_capacities"]["threshold_capacity"], + H2_retrofit=config_provider("sector", "H2_retrofit"), + H2_retrofit_capacity_per_CH4=config_provider( + "sector", "H2_retrofit_capacity_per_CH4" + ), + threshold_capacity=config_provider("existing_capacities", " threshold_capacity"), + snapshots=config_provider("snapshots"), + drop_leap_day=config_provider("enable", "drop_leap_day"), + carriers=config_provider("electricity", "renewable_carriers"), input: + unpack(input_profile_tech_brownfield), + simplify_busmap=resources("busmap_elec_s{simpl}.csv"), + cluster_busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", network_p=solved_previous_horizon, #solved network at previous time step - costs="data/costs_{planning_horizons}.csv", - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + costs=resources("costs_{planning_horizons}.csv"), + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), output: RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -64,12 +86,12 @@ rule add_brownfield: resources: mem_mb=10000, log: - LOGS - + "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + RESULTS + + "logs/add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", benchmark: ( - BENCHMARKS - + "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" @@ -82,36 +104,39 @@ ruleorder: add_existing_baseyear > add_brownfield rule solve_sector_network_myopic: params: - solving=config["solving"], - foresight=config["foresight"], - planning_horizons=config["scenario"]["planning_horizons"], - co2_sequestration_potential=config["sector"].get( - "co2_sequestration_potential", 200 + solving=config_provider("solving"), + foresight=config_provider("foresight"), + planning_horizons=config_provider("scenario", "planning_horizons"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 ), custom_extra_functionality=input_custom_extra_functionality, input: network=RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - costs="data/costs_{planning_horizons}.csv", - config=RESULTS + "config.yaml", + costs=resources("costs_{planning_horizons}.csv"), output: - RESULTS + network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + config=RESULTS + + "configs/config.elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.yaml", shadow: "shallow" log: - solver=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: 4 + solver=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + memory=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", + python=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + threads: solver_threads resources: - mem_mb=config["solving"]["mem"], - walltime=config["solving"].get("walltime", "12:00:00"), + mem_mb=config_provider("solving", "mem_mb"), + runtime=config_provider("solving", "runtime", default="6h"), benchmark: ( - BENCHMARKS - + "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" diff --git a/rules/solve_overnight.smk b/rules/solve_overnight.smk index a3fed042..26dee7a6 100644 --- a/rules/solve_overnight.smk +++ b/rules/solve_overnight.smk @@ -1,40 +1,42 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule solve_sector_network: params: - solving=config["solving"], - foresight=config["foresight"], - planning_horizons=config["scenario"]["planning_horizons"], - co2_sequestration_potential=config["sector"].get( - "co2_sequestration_potential", 200 + solving=config_provider("solving"), + foresight=config_provider("foresight"), + planning_horizons=config_provider("scenario", "planning_horizons"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 ), custom_extra_functionality=input_custom_extra_functionality, input: network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - config=RESULTS + "config.yaml", output: - RESULTS + network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + config=RESULTS + + "configs/config.elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.yaml", shadow: "shallow" log: - solver=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: config["solving"]["solver"].get("threads", 4) + solver=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + memory=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", + python=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + threads: solver_threads resources: - mem_mb=config["solving"]["mem"], - walltime=config["solving"].get("walltime", "12:00:00"), + mem_mb=config_provider("solving", "mem_mb"), + runtime=config_provider("solving", "runtime", default="6h"), benchmark: ( RESULTS - + BENCHMARKS - + "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk index a7856fa9..51cb3920 100644 --- a/rules/solve_perfect.smk +++ b/rules/solve_perfect.smk @@ -1,26 +1,30 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule add_existing_baseyear: params: - baseyear=config["scenario"]["planning_horizons"][0], - sector=config["sector"], - existing_capacities=config["existing_capacities"], - costs=config["costs"], + baseyear=config_provider("scenario", "planning_horizons", 0), + sector=config_provider("sector"), + existing_capacities=config_provider("existing_capacities"), + costs=config_provider("costs"), input: network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - powerplants=RESOURCES + "powerplants.csv", - busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", - busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + powerplants=resources("powerplants.csv"), + busmap_s=resources("busmap_elec_s{simpl}.csv"), + busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + costs=lambda w: resources( + "costs_{}.csv".format( + config_provider("scenario", "planning_horizons", 0)(w) + ) + ), + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), + existing_heating_distribution=resources( + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), existing_heating="data/existing_infrastructure/existing_heating_raw.csv", - existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", - existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", - existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", output: RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -28,14 +32,15 @@ rule add_existing_baseyear: planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear threads: 1 resources: - mem_mb=2000, + mem_mb=config_provider("solving", "mem_mb"), + runtime=config_provider("solving", "runtime", default="24h"), log: - LOGS - + "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + logs( + "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" + ), benchmark: - ( - BENCHMARKS - + "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + benchmarks( + "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" @@ -43,51 +48,28 @@ rule add_existing_baseyear: "../scripts/add_existing_baseyear.py" -rule add_brownfield: - params: - H2_retrofit=config["sector"]["H2_retrofit"], - H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"], - threshold_capacity=config["existing_capacities"]["threshold_capacity"], - input: - network=RESULTS - + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - network_p=solved_previous_horizon, #solved network at previous time step - costs="data/costs_{planning_horizons}.csv", - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - output: - RESULTS - + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - threads: 4 - resources: - mem_mb=10000, - log: - LOGS - + "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", - benchmark: - ( - BENCHMARKS - + "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" - ) - conda: - "../envs/environment.yaml" - script: - "../scripts/add_brownfield.py" +def input_network_year(w): + return { + f"network_{year}": RESULTS + + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}" + + f"_{year}.nc" + for year in config_provider("scenario", "planning_horizons")(w)[1:] + } rule prepare_perfect_foresight: + params: + costs=config_provider("costs"), + time_resolution=config_provider("clustering", "temporal", "sector"), input: - **{ - f"network_{year}": RESULTS - + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" - + f"{year}.nc" - for year in config["scenario"]["planning_horizons"][1:] - }, + unpack(input_network_year), brownfield_network=lambda w: ( RESULTS + "prenetworks-brownfield/" + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" - + "{}.nc".format(str(config["scenario"]["planning_horizons"][0])) + + "{}.nc".format( + str(config_provider("scenario", "planning_horizons", 0)(w)) + ) ), output: RESULTS @@ -96,12 +78,12 @@ rule prepare_perfect_foresight: resources: mem_mb=10000, log: - LOGS - + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log", + logs( + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log" + ), benchmark: - ( - BENCHMARKS - + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}" + benchmarks( + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}" ) conda: "../envs/environment.yaml" @@ -111,25 +93,26 @@ rule prepare_perfect_foresight: rule solve_sector_network_perfect: params: - solving=config["solving"], - foresight=config["foresight"], - sector=config["sector"], - planning_horizons=config["scenario"]["planning_horizons"], - co2_sequestration_potential=config["sector"].get( - "co2_sequestration_potential", 200 + solving=config_provider("solving"), + foresight=config_provider("foresight"), + sector=config_provider("sector"), + planning_horizons=config_provider("scenario", "planning_horizons"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 ), custom_extra_functionality=input_custom_extra_functionality, input: network=RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - costs="data/costs_2030.csv", - config=RESULTS + "config.yaml", + costs=resources("costs_2030.csv"), output: - RESULTS + network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - threads: 4 + config=RESULTS + + "configs/config.elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.yaml", + threads: solver_threads resources: - mem_mb=config["solving"]["mem"], + mem_mb=config_provider("solving", "mem"), shadow: "shallow" log: @@ -141,8 +124,8 @@ rule solve_sector_network_perfect: + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log", benchmark: ( - BENCHMARKS - + "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}" + RESULTS + + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}" ) conda: "../envs/environment.yaml" @@ -150,18 +133,22 @@ rule solve_sector_network_perfect: "../scripts/solve_network.py" +def input_networks_make_summary_perfect(w): + return { + f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS + + f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc" + for simpl in config_provider("scenario", "simpl")(w) + for clusters in config_provider("scenario", "clusters")(w) + for opts in config_provider("scenario", "opts")(w) + for sector_opts in config_provider("scenario", "sector_opts")(w) + for ll in config_provider("scenario", "ll")(w) + } + + rule make_summary_perfect: input: - **{ - f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS - + f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc" - for simpl in config["scenario"]["simpl"] - for clusters in config["scenario"]["clusters"] - for opts in config["scenario"]["opts"] - for sector_opts in config["scenario"]["sector_opts"] - for ll in config["scenario"]["ll"] - }, - costs="data/costs_2020.csv", + unpack(input_networks_make_summary_perfect), + costs=resources("costs_2020.csv"), output: nodal_costs=RESULTS + "csvs/nodal_costs.csv", nodal_capacities=RESULTS + "csvs/nodal_capacities.csv", @@ -183,13 +170,10 @@ rule make_summary_perfect: resources: mem_mb=10000, log: - LOGS + "make_summary_perfect.log", + logs("make_summary_perfect.log"), benchmark: - (BENCHMARKS + "make_summary_perfect") + benchmarks("make_summary_perfect") conda: "../envs/environment.yaml" script: "../scripts/make_summary_perfect.py" - - -ruleorder: add_existing_baseyear > add_brownfield diff --git a/rules/validate.smk b/rules/validate.smk index 0fa1f607..91fe6e91 100644 --- a/rules/validate.smk +++ b/rules/validate.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -17,12 +17,12 @@ rule build_electricity_production: The data is used for validation of the optimization results. """ params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - countries=config["countries"], + snapshots=config_provider("snapshots"), + countries=config_provider("countries"), output: - RESOURCES + "historical_electricity_production.csv", + resources("historical_electricity_production.csv"), log: - LOGS + "build_electricity_production.log", + logs("build_electricity_production.log"), resources: mem_mb=5000, script: @@ -35,14 +35,14 @@ rule build_cross_border_flows: The data is used for validation of the optimization results. """ params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - countries=config["countries"], + snapshots=config_provider("snapshots"), + countries=config_provider("countries"), input: - network=RESOURCES + "networks/base.nc", + network=resources("networks/base.nc"), output: - RESOURCES + "historical_cross_border_flows.csv", + resources("historical_cross_border_flows.csv"), log: - LOGS + "build_cross_border_flows.log", + logs("build_cross_border_flows.log"), resources: mem_mb=5000, script: @@ -55,12 +55,12 @@ rule build_electricity_prices: The data is used for validation of the optimization results. """ params: - snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, - countries=config["countries"], + snapshots=config_provider("snapshots"), + countries=config_provider("countries"), output: - RESOURCES + "historical_electricity_prices.csv", + resources("historical_electricity_prices.csv"), log: - LOGS + "build_electricity_prices.log", + logs("build_electricity_prices.log"), resources: mem_mb=5000, script: @@ -70,7 +70,7 @@ rule build_electricity_prices: rule plot_validation_electricity_production: input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - electricity_production=RESOURCES + "historical_electricity_production.csv", + electricity_production=resources("historical_electricity_production.csv"), output: **{ plot: RESULTS @@ -85,10 +85,10 @@ rule plot_validation_electricity_production: rule plot_validation_cross_border_flows: params: - countries=config["countries"], + countries=config_provider("countries"), input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - cross_border_flows=RESOURCES + "historical_cross_border_flows.csv", + cross_border_flows=resources("historical_cross_border_flows.csv"), output: **{ plot: RESULTS @@ -104,7 +104,7 @@ rule plot_validation_cross_border_flows: rule plot_validation_electricity_prices: input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - electricity_prices=RESOURCES + "historical_electricity_prices.csv", + electricity_prices=resources("historical_electricity_prices.csv"), output: **{ plot: RESULTS diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..fc781c2f --- /dev/null +++ b/scripts/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT diff --git a/scripts/_benchmark.py b/scripts/_benchmark.py index 4e3413e9..58fc3d39 100644 --- a/scripts/_benchmark.py +++ b/scripts/_benchmark.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -13,15 +13,15 @@ import os import sys import time +from memory_profiler import _get_memory, choose_backend + logger = logging.getLogger(__name__) # TODO: provide alternative when multiprocessing is not available try: from multiprocessing import Pipe, Process except ImportError: - from multiprocessing.dummy import Process, Pipe - -from memory_profiler import _get_memory, choose_backend + from multiprocessing.dummy import Pipe, Process # The memory logging facilities have been adapted from memory_profiler diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 03bde840..dfedcaea 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -1,22 +1,25 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT import contextlib +import copy import hashlib import logging import os import re import urllib +from functools import partial +from os.path import exists from pathlib import Path +from shutil import copyfile import pandas as pd import pytz import requests import yaml -from pypsa.components import component_attrs, components -from pypsa.descriptors import Dict +from snakemake.utils import update_config from tqdm import tqdm logger = logging.getLogger(__name__) @@ -24,6 +27,114 @@ logger = logging.getLogger(__name__) REGION_COLS = ["geometry", "name", "x", "y", "country"] +def copy_default_files(workflow): + default_files = { + "config/config.default.yaml": "config/config.yaml", + "config/scenarios.template.yaml": "config/scenarios.yaml", + } + for template, target in default_files.items(): + target = os.path.join(workflow.current_basedir, target) + template = os.path.join(workflow.current_basedir, template) + if not exists(target) and exists(template): + copyfile(template, target) + + +def get_scenarios(run): + scenario_config = run.get("scenarios", {}) + if run["name"] and scenario_config.get("enable"): + fn = Path(scenario_config["file"]) + if fn.exists(): + scenarios = yaml.safe_load(fn.read_text()) + if run["name"] == "all": + run["name"] = list(scenarios.keys()) + return scenarios + return {} + + +def get_rdir(run): + scenario_config = run.get("scenarios", {}) + if run["name"] and scenario_config.get("enable"): + RDIR = "{run}/" + elif run["name"]: + RDIR = run["name"] + "/" + else: + RDIR = "" + + prefix = run.get("prefix", "") + if prefix: + RDIR = f"{prefix}/{RDIR}" + + return RDIR + + +def get_run_path(fn, dir, rdir, shared_resources): + """ + Dynamically provide paths based on shared resources and filename. + + Use this function for snakemake rule inputs or outputs that should be + optionally shared across runs or created individually for each run. + + Parameters + ---------- + fn : str + The filename for the path to be generated. + dir : str + The base directory. + rdir : str + Relative directory for non-shared resources. + shared_resources : str or bool + Specifies which resources should be shared. + - If string is "base", special handling for shared "base" resources (see notes). + - If random string other than "base", this folder is used instead of the `rdir` keyword. + - If boolean, directly specifies if the resource is shared. + + Returns + ------- + str + Full path where the resource should be stored. + + Notes + ----- + Special case for "base" allows no wildcards other than "technology", "year" + and "scope" and excludes filenames starting with "networks/elec" or + "add_electricity". All other resources are shared. + """ + if shared_resources == "base": + pattern = r"\{([^{}]+)\}" + existing_wildcards = set(re.findall(pattern, fn)) + irrelevant_wildcards = {"technology", "year", "scope", "kind"} + no_relevant_wildcards = not existing_wildcards - irrelevant_wildcards + no_elec_rule = not fn.startswith("networks/elec") and not fn.startswith( + "add_electricity" + ) + is_shared = no_relevant_wildcards and no_elec_rule + rdir = "" if is_shared else rdir + elif isinstance(shared_resources, str): + rdir = shared_resources + "/" + elif isinstance(shared_resources, bool): + rdir = "" if shared_resources else rdir + else: + raise ValueError( + "shared_resources must be a boolean, str, or 'base' for special handling." + ) + + return f"{dir}{rdir}{fn}" + + +def path_provider(dir, rdir, shared_resources): + """ + Returns a partial function that dynamically provides paths based on shared + resources and the filename. + + Returns + ------- + partial function + A partial function that takes a filename as input and + returns the path to the file based on the shared_resources parameter. + """ + return partial(get_run_path, dir=dir, rdir=rdir, shared_resources=shared_resources) + + def get_opt(opts, expr, flags=None): """ Return the first option matching the regular expression. @@ -45,9 +156,9 @@ def find_opt(opts, expr): """ for o in opts: if expr in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) + m = re.findall(r"m?\d+(?:[\.p]\d+)?", o) if len(m) > 0: - return True, float(m[0]) + return True, float(m[-1].replace("p", ".").replace("m", "-")) else: return True, None return False, None @@ -61,6 +172,21 @@ def mute_print(): yield +def set_scenario_config(snakemake): + scenario = snakemake.config["run"].get("scenarios", {}) + if scenario.get("enable") and "run" in snakemake.wildcards.keys(): + try: + with open(scenario["file"], "r") as f: + scenario_config = yaml.safe_load(f) + except FileNotFoundError: + # fallback for mock_snakemake + script_dir = Path(__file__).parent.resolve() + root_dir = script_dir.parent + with open(root_dir / scenario["file"], "r") as f: + scenario_config = yaml.safe_load(f) + update_config(snakemake.config, scenario_config[snakemake.wildcards.run]) + + def configure_logging(snakemake, skip_handlers=False): """ Configure the basic behaviour for the logging module. @@ -80,6 +206,7 @@ def configure_logging(snakemake, skip_handlers=False): Do (not) skip the default handlers created for redirecting output to STDERR and file. """ import logging + import sys kwargs = snakemake.config.get("logging", dict()).copy() kwargs.setdefault("level", "INFO") @@ -103,6 +230,16 @@ def configure_logging(snakemake, skip_handlers=False): ) logging.basicConfig(**kwargs) + # Setup a function to handle uncaught exceptions and include them with their stacktrace into logfiles + def handle_exception(exc_type, exc_value, exc_traceback): + # Log the exception + logger = logging.getLogger() + logger.error( + "Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback) + ) + + sys.excepthook = handle_exception + def update_p_nom_max(n): # if extendable carriers (solar/onwind/...) have capacity >= 0, @@ -223,7 +360,13 @@ def progress_retrieve(url, file, disable=False): urllib.request.urlretrieve(url, file, reporthook=update_to) -def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): +def mock_snakemake( + rulename, + root_dir=None, + configfiles=None, + submodule_dir="workflow/submodules/pypsa-eur", + **wildcards, +): """ This function is expected to be executed from the 'scripts'-directory of ' the snakemake project. It returns a snakemake.script.Snakemake object, @@ -239,6 +382,9 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): path to the root directory of the snakemake project configfiles: list, str list of configfiles to be used to update the config + submodule_dir: str, Path + in case PyPSA-Eur is used as a submodule, submodule_dir is + the path of pypsa-eur relative to the project directory. **wildcards: keyword arguments fixing the wildcards. Only necessary if wildcards are needed. @@ -246,9 +392,17 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): import os import snakemake as sm - from packaging.version import Version, parse from pypsa.descriptors import Dict + from snakemake.api import Workflow + from snakemake.common import SNAKEFILE_CHOICES from snakemake.script import Snakemake + from snakemake.settings import ( + ConfigSettings, + DAGSettings, + ResourceSettings, + StorageSettings, + WorkflowSettings, + ) script_dir = Path(__file__).parent.resolve() if root_dir is None: @@ -257,7 +411,10 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): root_dir = Path(root_dir).resolve() user_in_script_dir = Path.cwd().resolve() == script_dir - if user_in_script_dir: + if str(submodule_dir) in __file__: + # the submodule_dir path is only need to locate the project dir + os.chdir(Path(__file__[: __file__.find(str(submodule_dir))])) + elif user_in_script_dir: os.chdir(root_dir) elif Path.cwd().resolve() != root_dir: raise RuntimeError( @@ -265,17 +422,28 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): f" {root_dir} or scripts directory {script_dir}" ) try: - for p in sm.SNAKEFILE_CHOICES: + for p in SNAKEFILE_CHOICES: if os.path.exists(p): snakefile = p break - kwargs = ( - dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {} - ) - if isinstance(configfiles, str): + if configfiles is None: + configfiles = [] + elif isinstance(configfiles, str): configfiles = [configfiles] - workflow = sm.Workflow(snakefile, overwrite_configfiles=configfiles, **kwargs) + resource_settings = ResourceSettings() + config_settings = ConfigSettings(configfiles=map(Path, configfiles)) + workflow_settings = WorkflowSettings() + storage_settings = StorageSettings() + dag_settings = DAGSettings(rerun_triggers=[]) + workflow = Workflow( + config_settings, + resource_settings, + workflow_settings, + storage_settings, + dag_settings, + storage_provider_settings=dict(), + ) workflow.include(snakefile) if configfiles: @@ -292,7 +460,7 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): def make_accessable(*ios): for io in ios: - for i in range(len(io)): + for i, _ in enumerate(io): io[i] = os.path.abspath(io[i]) make_accessable(job.input, job.output, job.log) @@ -339,17 +507,202 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None): return week_df -def parse(l): - return yaml.safe_load(l[0]) if len(l) == 1 else {l.pop(0): parse(l)} +def parse(infix): + """ + Recursively parse a chained wildcard expression into a dictionary or a YAML + object. + + Parameters + ---------- + list_to_parse : list + The list to parse. + + Returns + ------- + dict or YAML object + The parsed list. + """ + if len(infix) == 1: + return yaml.safe_load(infix[0]) + else: + return {infix.pop(0): parse(infix)} -def update_config_with_sector_opts(config, sector_opts): - from snakemake.utils import update_config +def update_config_from_wildcards(config, w, inplace=True): + """ + Parses configuration settings from wildcards and updates the config. + """ - for o in sector_opts.split("-"): - if o.startswith("CF+"): - l = o.split("+")[1:] - update_config(config, parse(l)) + if not inplace: + config = copy.deepcopy(config) + + if w.get("opts"): + opts = w.opts.split("-") + + if nhours := get_opt(opts, r"^\d+(h|seg)$"): + config["clustering"]["temporal"]["resolution_elec"] = nhours + + co2l_enable, co2l_value = find_opt(opts, "Co2L") + if co2l_enable: + config["electricity"]["co2limit_enable"] = True + if co2l_value is not None: + config["electricity"]["co2limit"] = ( + co2l_value * config["electricity"]["co2base"] + ) + + gasl_enable, gasl_value = find_opt(opts, "CH4L") + if gasl_enable: + config["electricity"]["gaslimit_enable"] = True + if gasl_value is not None: + config["electricity"]["gaslimit"] = gasl_value * 1e6 + + if "Ept" in opts: + config["costs"]["emission_prices"]["co2_monthly_prices"] = True + + ep_enable, ep_value = find_opt(opts, "Ep") + if ep_enable: + config["costs"]["emission_prices"]["enable"] = True + if ep_value is not None: + config["costs"]["emission_prices"]["co2"] = ep_value + + if "ATK" in opts: + config["autarky"]["enable"] = True + if "ATKc" in opts: + config["autarky"]["by_country"] = True + + attr_lookup = { + "p": "p_nom_max", + "e": "e_nom_max", + "c": "capital_cost", + "m": "marginal_cost", + } + for o in opts: + flags = ["+e", "+p", "+m", "+c"] + if all(flag not in o for flag in flags): + continue + carrier, attr_factor = o.split("+") + attr = attr_lookup[attr_factor[0]] + factor = float(attr_factor[1:]) + if not isinstance(config["adjustments"]["electricity"], dict): + config["adjustments"]["electricity"] = dict() + update_config( + config["adjustments"]["electricity"], {attr: {carrier: factor}} + ) + + if w.get("sector_opts"): + opts = w.sector_opts.split("-") + + if "T" in opts: + config["sector"]["transport"] = True + + if "H" in opts: + config["sector"]["heating"] = True + + if "B" in opts: + config["sector"]["biomass"] = True + + if "I" in opts: + config["sector"]["industry"] = True + + if "A" in opts: + config["sector"]["agriculture"] = True + + if "CCL" in opts: + config["solving"]["constraints"]["CCL"] = True + + eq_value = get_opt(opts, r"^EQ+\d*\.?\d+(c|)") + for o in opts: + if eq_value is not None: + config["solving"]["constraints"]["EQ"] = eq_value + elif "EQ" in o: + config["solving"]["constraints"]["EQ"] = True + break + + if "BAU" in opts: + config["solving"]["constraints"]["BAU"] = True + + if "SAFE" in opts: + config["solving"]["constraints"]["SAFE"] = True + + if nhours := get_opt(opts, r"^\d+(h|sn|seg)$"): + config["clustering"]["temporal"]["resolution_sector"] = nhours + + if "decentral" in opts: + config["sector"]["electricity_transmission_grid"] = False + + if "noH2network" in opts: + config["sector"]["H2_network"] = False + + if "nowasteheat" in opts: + config["sector"]["use_fischer_tropsch_waste_heat"] = False + config["sector"]["use_methanolisation_waste_heat"] = False + config["sector"]["use_haber_bosch_waste_heat"] = False + config["sector"]["use_methanation_waste_heat"] = False + config["sector"]["use_fuel_cell_waste_heat"] = False + config["sector"]["use_electrolysis_waste_heat"] = False + + if "nodistrict" in opts: + config["sector"]["district_heating"]["progress"] = 0.0 + + dg_enable, dg_factor = find_opt(opts, "dist") + if dg_enable: + config["sector"]["electricity_distribution_grid"] = True + if dg_factor is not None: + config["sector"][ + "electricity_distribution_grid_cost_factor" + ] = dg_factor + + if "biomasstransport" in opts: + config["sector"]["biomass_transport"] = True + + _, maxext = find_opt(opts, "linemaxext") + if maxext is not None: + config["lines"]["max_extension"] = maxext * 1e3 + config["links"]["max_extension"] = maxext * 1e3 + + _, co2l_value = find_opt(opts, "Co2L") + if co2l_value is not None: + config["co2_budget"] = float(co2l_value) + + if co2_distribution := get_opt(opts, r"^(cb)\d+(\.\d+)?(ex|be)$"): + config["co2_budget"] = co2_distribution + + if co2_budget := get_opt(opts, r"^(cb)\d+(\.\d+)?$"): + config["co2_budget"] = float(co2_budget[2:]) + + attr_lookup = { + "p": "p_nom_max", + "e": "e_nom_max", + "c": "capital_cost", + "m": "marginal_cost", + } + for o in opts: + flags = ["+e", "+p", "+m", "+c"] + if all(flag not in o for flag in flags): + continue + carrier, attr_factor = o.split("+") + attr = attr_lookup[attr_factor[0]] + factor = float(attr_factor[1:]) + if not isinstance(config["adjustments"]["sector"], dict): + config["adjustments"]["sector"] = dict() + update_config(config["adjustments"]["sector"], {attr: {carrier: factor}}) + + _, sdr_value = find_opt(opts, "sdr") + if sdr_value is not None: + config["costs"]["social_discountrate"] = sdr_value / 100 + + _, seq_limit = find_opt(opts, "seq") + if seq_limit is not None: + config["sector"]["co2_sequestration_potential"] = seq_limit + + # any config option can be represented in wildcard + for o in opts: + if o.startswith("CF+"): + infix = o.split("+")[1:] + update_config(config, parse(infix)) + + if not inplace: + return config def get_checksum_from_zenodo(file_url): @@ -410,3 +763,15 @@ def validate_checksum(file_path, zenodo_url=None, checksum=None): assert ( calculated_checksum == checksum ), "Checksum is invalid. This may be due to an incomplete download. Delete the file and re-execute the rule." + + +def get_snapshots(snapshots, drop_leap_day=False, freq="h", **kwargs): + """ + Returns pandas DateTimeIndex potentially without leap days. + """ + + time = pd.date_range(freq=freq, **snapshots, **kwargs) + if drop_leap_day and time.is_leap_year.any(): + time = time[~((time.month == 2) & (time.day == 29))] + + return time diff --git a/scripts/add_brownfield.py b/scripts/add_brownfield.py index cb1f51c8..16b4e087 100644 --- a/scripts/add_brownfield.py +++ b/scripts/add_brownfield.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,16 +8,21 @@ Prepares brownfield data from previous planning horizon. import logging -logger = logging.getLogger(__name__) - -import pandas as pd - -idx = pd.IndexSlice - import numpy as np +import pandas as pd import pypsa -from _helpers import update_config_with_sector_opts +import xarray as xr +from _helpers import ( + configure_logging, + get_snapshots, + set_scenario_config, + update_config_from_wildcards, +) from add_existing_baseyear import add_build_year_to_new_assets +from pypsa.clustering.spatial import normed_or_uniform + +logger = logging.getLogger(__name__) +idx = pd.IndexSlice def add_brownfield(n, n_p, year): @@ -35,8 +40,8 @@ def add_brownfield(n, n_p, year): # CO2 or global EU values since these are already in n n_p.mremove(c.name, c.df.index[c.df.lifetime == np.inf]) - # remove assets whose build_year + lifetime < year - n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime < year]) + # remove assets whose build_year + lifetime <= year + n_p.mremove(c.name, c.df.index[c.df.build_year + c.df.lifetime <= year]) # remove assets if their optimized nominal capacity is lower than a threshold # since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible @@ -120,29 +125,108 @@ def add_brownfield(n, n_p, year): n.links.loc[new_pipes, "p_nom_min"] = 0.0 -def disable_grid_expansion_if_LV_limit_hit(n): - if not "lv_limit" in n.global_constraints.index: - return +def disable_grid_expansion_if_limit_hit(n): + """ + Check if transmission expansion limit is already reached; then turn off. - total_expansion = ( - n.lines.eval("s_nom_min * length").sum() - + n.links.query("carrier == 'DC'").eval("p_nom_min * length").sum() - ).sum() + In particular, this function checks if the total transmission + capital cost or volume implied by s_nom_min and p_nom_min are + numerically close to the respective global limit set in + n.global_constraints. If so, the nominal capacities are set to the + minimum and extendable is turned off; the corresponding global + constraint is then dropped. + """ + cols = {"cost": "capital_cost", "volume": "length"} + for limit_type in ["cost", "volume"]: + glcs = n.global_constraints.query( + f"type == 'transmission_expansion_{limit_type}_limit'" + ) - lv_limit = n.global_constraints.at["lv_limit", "constant"] + for name, glc in glcs.iterrows(): + total_expansion = ( + ( + n.lines.query("s_nom_extendable") + .eval(f"s_nom_min * {cols[limit_type]}") + .sum() + ) + + ( + n.links.query("carrier == 'DC' and p_nom_extendable") + .eval(f"p_nom_min * {cols[limit_type]}") + .sum() + ) + ).sum() - # allow small numerical differences - if lv_limit - total_expansion < 1: - logger.info(f"LV is already reached, disabling expansion and LV limit") - extendable_acs = n.lines.query("s_nom_extendable").index - n.lines.loc[extendable_acs, "s_nom_extendable"] = False - n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[extendable_acs, "s_nom_min"] + # Allow small numerical differences + if np.abs(glc.constant - total_expansion) / glc.constant < 1e-6: + logger.info( + f"Transmission expansion {limit_type} is already reached, disabling expansion and limit" + ) + extendable_acs = n.lines.query("s_nom_extendable").index + n.lines.loc[extendable_acs, "s_nom_extendable"] = False + n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[ + extendable_acs, "s_nom_min" + ] - extendable_dcs = n.links.query("carrier == 'DC' and p_nom_extendable").index - n.links.loc[extendable_dcs, "p_nom_extendable"] = False - n.links.loc[extendable_dcs, "p_nom"] = n.links.loc[extendable_dcs, "p_nom_min"] + extendable_dcs = n.links.query( + "carrier == 'DC' and p_nom_extendable" + ).index + n.links.loc[extendable_dcs, "p_nom_extendable"] = False + n.links.loc[extendable_dcs, "p_nom"] = n.links.loc[ + extendable_dcs, "p_nom_min" + ] - n.global_constraints.drop("lv_limit", inplace=True) + n.global_constraints.drop(name, inplace=True) + + +def adjust_renewable_profiles(n, input_profiles, params, year): + """ + Adjusts renewable profiles according to the renewable technology specified, + using the latest year below or equal to the selected year. + """ + + # spatial clustering + cluster_busmap = pd.read_csv(snakemake.input.cluster_busmap, index_col=0).squeeze() + simplify_busmap = pd.read_csv( + snakemake.input.simplify_busmap, index_col=0 + ).squeeze() + clustermaps = simplify_busmap.map(cluster_busmap) + clustermaps.index = clustermaps.index.astype(str) + + # temporal clustering + dr = get_snapshots(params["snapshots"], params["drop_leap_day"]) + snapshotmaps = ( + pd.Series(dr, index=dr).where(lambda x: x.isin(n.snapshots), pd.NA).ffill() + ) + + for carrier in params["carriers"]: + if carrier == "hydro": + continue + with xr.open_dataset(getattr(input_profiles, "profile_" + carrier)) as ds: + if ds.indexes["bus"].empty or "year" not in ds.indexes: + continue + + closest_year = max( + (y for y in ds.year.values if y <= year), default=min(ds.year.values) + ) + + p_max_pu = ( + ds["profile"] + .sel(year=closest_year) + .transpose("time", "bus") + .to_pandas() + ) + + # spatial clustering + weight = ds["weight"].sel(year=closest_year).to_pandas() + weight = weight.groupby(clustermaps).transform(normed_or_uniform) + p_max_pu = (p_max_pu * weight).T.groupby(clustermaps).sum().T + p_max_pu.columns = p_max_pu.columns + f" {carrier}" + + # temporal_clustering + p_max_pu = p_max_pu.groupby(snapshotmaps).mean() + + # replace renewable time series + n.generators_t.p_max_pu.loc[:, p_max_pu.columns] = p_max_pu if __name__ == "__main__": @@ -155,13 +239,14 @@ if __name__ == "__main__": clusters="37", opts="", ll="v1.0", - sector_opts="168H-T-H-B-I-solar+p3-dist1", + sector_opts="168H-T-H-B-I-dist1", planning_horizons=2030, ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}") @@ -169,13 +254,15 @@ if __name__ == "__main__": n = pypsa.Network(snakemake.input.network) + adjust_renewable_profiles(n, snakemake.input, snakemake.params, year) + add_build_year_to_new_assets(n, year) n_p = pypsa.Network(snakemake.input.network_p) add_brownfield(n, n_p, year) - disable_grid_expansion_if_LV_limit_hit(n) + disable_grid_expansion_if_limit_hit(n) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index e626f456..7e60203f 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -52,7 +52,7 @@ Inputs :scale: 34 % - ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used! -- ``resources/load.csv`` Hourly per-country load profiles. +- ``resources/electricity_demand.csv`` Hourly per-country electricity demand profiles. - ``resources/regions_onshore.geojson``: confer :ref:`busregions` - ``resources/nuts3_shapes.geojson``: confer :ref:`shapes` - ``resources/powerplants.csv``: confer :ref:`powerplants` @@ -93,7 +93,12 @@ import powerplantmatching as pm import pypsa import scipy.sparse as sparse import xarray as xr -from _helpers import configure_logging, update_p_nom_max +from _helpers import ( + configure_logging, + get_snapshots, + set_scenario_config, + update_p_nom_max, +) from powerplantmatching.export import map_country_bus from shapely.prepared import prep @@ -178,6 +183,16 @@ def sanitize_carriers(n, config): n.carriers["color"] = n.carriers.color.where(n.carriers.color != "", colors) +def sanitize_locations(n): + if "location" in n.buses.columns: + n.buses["x"] = n.buses.x.where(n.buses.x != 0, n.buses.location.map(n.buses.x)) + n.buses["y"] = n.buses.y.where(n.buses.y != 0, n.buses.location.map(n.buses.y)) + n.buses["country"] = n.buses.country.where( + n.buses.country.ne("") & n.buses.country.notnull(), + n.buses.location.map(n.buses.country), + ) + + def add_co2_emissions(n, costs, carriers): """ Add CO2 emissions to the network's carriers attribute. @@ -288,16 +303,16 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1. ua_md_gdp = pd.read_csv(ua_md_gdp, dtype={"name": "str"}).set_index("name") - logger.info(f"Load data scaled with scalling factor {scaling}.") + logger.info(f"Load data scaled by factor {scaling}.") opsd_load *= scaling nuts3 = gpd.read_file(nuts3_shapes).set_index("index") def upsample(cntry, group): - l = opsd_load[cntry] + load = opsd_load[cntry] if len(group) == 1: - return pd.DataFrame({group.index[0]: l}) + return pd.DataFrame({group.index[0]: load}) nuts3_cntry = nuts3.loc[nuts3.country == cntry] transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr() gdp_n = pd.Series( @@ -314,8 +329,8 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1. # overwrite factor because nuts3 provides no data for UA+MD factors = normed(ua_md_gdp.loc[group.index, "GDP_PPP"].squeeze()) return pd.DataFrame( - factors.values * l.values[:, np.newaxis], - index=l.index, + factors.values * load.values[:, np.newaxis], + index=load.index, columns=factors.index, ) @@ -327,7 +342,9 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1. axis=1, ) - n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) + n.madd( + "Load", substation_lv_i, bus=substation_lv_i, p_set=load + ) # carrier="electricity" def update_transmission_costs(n, costs, length_factor=1.0): @@ -374,6 +391,10 @@ def attach_wind_and_solar( if ds.indexes["bus"].empty: continue + # if-statement for compatibility with old profiles + if "year" in ds.indexes: + ds = ds.sel(year=ds.year.min(), drop=True) + supcar = car.split("-", 2)[0] if supcar == "offwind": underwater_fraction = ds["underwater_fraction"].to_pandas() @@ -504,8 +525,8 @@ def attach_conventional_generators( snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0 ).iloc[:, 0] bus_values = n.buses.country.map(values) - n.generators[attr].update( - n.generators.loc[idx].bus.map(bus_values).dropna() + n.generators.update( + {attr: n.generators.loc[idx].bus.map(bus_values).dropna()} ) else: # Single value affecting all generators of technology k indiscriminantely of country @@ -571,7 +592,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par # fill missing max hours to params value and # assume no natural inflow due to lack of data max_hours = params.get("PHS_max_hours", 6) - phs = phs.replace({"max_hours": {0: max_hours}}) + phs = phs.replace({"max_hours": {0: max_hours, np.nan: max_hours}}) n.madd( "StorageUnit", phs.index, @@ -622,7 +643,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par hydro.max_hours > 0, hydro.country.map(max_hours_country) ).fillna(6) - if flatten_dispatch := params.get("flatten_dispatch", False): + if params.get("flatten_dispatch", False): buffer = params.get("flatten_dispatch_buffer", 0.2) average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"] p_max_pu = (average_capacity_factor + buffer).clip(upper=1) @@ -647,77 +668,6 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par ) -def attach_extendable_generators(n, costs, ppl, carriers): - logger.warning( - "The function `attach_extendable_generators` is deprecated in v0.5.0." - ) - add_missing_carriers(n, carriers) - add_co2_emissions(n, costs, carriers) - - for tech in carriers: - if tech.startswith("OCGT"): - ocgt = ( - ppl.query("carrier in ['OCGT', 'CCGT']") - .groupby("bus", as_index=False) - .first() - ) - n.madd( - "Generator", - ocgt.index, - suffix=" OCGT", - bus=ocgt["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["OCGT", "capital_cost"], - marginal_cost=costs.at["OCGT", "marginal_cost"], - efficiency=costs.at["OCGT", "efficiency"], - ) - - elif tech.startswith("CCGT"): - ccgt = ( - ppl.query("carrier in ['OCGT', 'CCGT']") - .groupby("bus", as_index=False) - .first() - ) - n.madd( - "Generator", - ccgt.index, - suffix=" CCGT", - bus=ccgt["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["CCGT", "capital_cost"], - marginal_cost=costs.at["CCGT", "marginal_cost"], - efficiency=costs.at["CCGT", "efficiency"], - ) - - elif tech.startswith("nuclear"): - nuclear = ( - ppl.query("carrier == 'nuclear'").groupby("bus", as_index=False).first() - ) - n.madd( - "Generator", - nuclear.index, - suffix=" nuclear", - bus=nuclear["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["nuclear", "capital_cost"], - marginal_cost=costs.at["nuclear", "marginal_cost"], - efficiency=costs.at["nuclear", "efficiency"], - ) - - else: - raise NotImplementedError( - "Adding extendable generators for carrier " - "'{tech}' is not implemented, yet. " - "Only OCGT, CCGT and nuclear are allowed at the moment." - ) - - def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) -> None: """ Attach renewable capacities from the OPSD dataset to the network. @@ -749,8 +699,8 @@ def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) -> caps = caps.groupby(["bus"]).Capacity.sum() caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) - n.generators.p_nom.update(gens.bus.map(caps).dropna()) - n.generators.p_nom_min.update(gens.bus.map(caps).dropna()) + n.generators.update({"p_nom": gens.bus.map(caps).dropna()}) + n.generators.update({"p_nom_min": gens.bus.map(caps).dropna()}) def estimate_renewable_capacities( @@ -846,10 +796,15 @@ if __name__ == "__main__": snakemake = mock_snakemake("add_electricity") configure_logging(snakemake) + set_scenario_config(snakemake) params = snakemake.params n = pypsa.Network(snakemake.input.base_network) + + time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) + n.set_snapshots(time) + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 costs = load_costs( diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index e7894324..77000ade 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,25 +8,25 @@ horizon. """ import logging - -logger = logging.getLogger(__name__) - -import pandas as pd - -idx = pd.IndexSlice - from types import SimpleNamespace import country_converter as coco import numpy as np +import pandas as pd +import powerplantmatching as pm import pypsa import xarray as xr -from _helpers import update_config_with_sector_opts +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from add_electricity import sanitize_carriers from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs +logger = logging.getLogger(__name__) cc = coco.CountryConverter() - +idx = pd.IndexSlice spatial = SimpleNamespace() @@ -53,22 +53,30 @@ def add_build_year_to_new_assets(n, baseyear): "series" ) & n.component_attrs[c.name].status.str.contains("Input") for attr in n.component_attrs[c.name].index[selection]: - c.pnl[attr].rename(columns=rename, inplace=True) + c.pnl[attr] = c.pnl[attr].rename(columns=rename) -def add_existing_renewables(df_agg): +def add_existing_renewables(df_agg, costs): """ Append existing renewables to the df_agg pd.DataFrame with the conventional power plants. """ - carriers = {"solar": "solar", "onwind": "onwind", "offwind": "offwind-ac"} + tech_map = {"solar": "PV", "onwind": "Onshore", "offwind": "Offshore"} - for tech in ["solar", "onwind", "offwind"]: - carrier = carriers[tech] + countries = snakemake.config["countries"] + irena = pm.data.IRENASTAT().powerplant.convert_country_to_alpha2() + irena = irena.query("Country in @countries") + irena = irena.groupby(["Technology", "Country", "Year"]).Capacity.sum() - df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.0) + irena = irena.unstack().reset_index() + + for carrier, tech in tech_map.items(): + df = ( + irena[irena.Technology.str.contains(tech)] + .drop(columns=["Technology"]) + .set_index("Country") + ) df.columns = df.columns.astype(int) - df.index = cc.convert(df.index, to="iso2") # calculate yearly differences df.insert(loc=0, value=0.0, column="1999") @@ -98,12 +106,16 @@ def add_existing_renewables(df_agg): for year in nodal_df.columns: for node in nodal_df.index: - name = f"{node}-{tech}-{year}" + name = f"{node}-{carrier}-{year}" capacity = nodal_df.loc[node, year] if capacity > 0.0: - df_agg.at[name, "Fueltype"] = tech + df_agg.at[name, "Fueltype"] = carrier df_agg.at[name, "Capacity"] = capacity df_agg.at[name, "DateIn"] = year + df_agg.at[name, "lifetime"] = costs.at[carrier, "lifetime"] + df_agg.at[name, "DateOut"] = ( + year + costs.at[carrier, "lifetime"] - 1 + ) df_agg.at[name, "cluster_bus"] = node @@ -168,14 +180,6 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas ) df_agg.loc[biomass_i, "DateOut"] = df_agg.loc[biomass_i, "DateOut"].fillna(dateout) - # drop assets which are already phased out / decommissioned - phased_out = df_agg[df_agg["DateOut"] < baseyear].index - df_agg.drop(phased_out, inplace=True) - - # calculate remaining lifetime before phase-out (+1 because assuming - # phase out date at the end of the year) - df_agg["lifetime"] = df_agg.DateOut - df_agg.DateIn + 1 - # assign clustered bus busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0).squeeze() busmap = pd.read_csv(snakemake.input.busmap, index_col=0).squeeze() @@ -190,12 +194,31 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas df_agg["cluster_bus"] = df_agg.bus.map(clustermaps) # include renewables in df_agg - add_existing_renewables(df_agg) + add_existing_renewables(df_agg, costs) + + # drop assets which are already phased out / decommissioned + phased_out = df_agg[df_agg["DateOut"] < baseyear].index + df_agg.drop(phased_out, inplace=True) + + older_assets = (df_agg.DateIn < min(grouping_years)).sum() + if older_assets: + logger.warning( + f"There are {older_assets} assets with build year " + f"before first power grouping year {min(grouping_years)}. " + "These assets are dropped and not considered." + "Consider to redefine the grouping years to keep them." + ) + to_drop = df_agg[df_agg.DateIn < min(grouping_years)].index + df_agg.drop(to_drop, inplace=True) df_agg["grouping_year"] = np.take( - grouping_years, np.digitize(df_agg.DateIn, grouping_years, right=True) + grouping_years[::-1], np.digitize(df_agg.DateIn, grouping_years[::-1]) ) + # calculate (adjusted) remaining lifetime before phase-out (+1 because assuming + # phase out date at the end of the year) + df_agg["lifetime"] = df_agg.DateOut - df_agg["grouping_year"] + 1 + df = df_agg.pivot_table( index=["grouping_year", "Fueltype"], columns="cluster_bus", @@ -258,13 +281,21 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas # for offshore the splitting only includes coastal regions inv_ind = [ - i for i in inv_ind if (i + name_suffix) in n.generators.index + i + for i in inv_ind + if (i + name_suffix) + in n.generators.index.str.replace( + str(baseyear), str(grouping_year) + ) ] p_max_pu = n.generators_t.p_max_pu[ [i + name_suffix for i in inv_ind] ] - p_max_pu.columns = [i + name_suffix for i in inv_ind] + p_max_pu.columns = [ + i + name_suffix.replace(str(grouping_year), str(baseyear)) + for i in inv_ind + ] n.madd( "Generator", @@ -290,7 +321,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas n.madd( "Generator", new_capacity.index, - suffix=" " + name_suffix, + suffix=name_suffix, bus=new_capacity.index, carrier=generator, p_nom=new_capacity, @@ -353,13 +384,20 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas ) else: key = "central solid biomass CHP" + central_heat = n.buses.query( + "carrier == 'urban central heat'" + ).location.unique() + heat_buses = new_capacity.index.map( + lambda i: i + " urban central heat" if i in central_heat else "" + ) + n.madd( "Link", new_capacity.index, suffix=name_suffix, bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values, bus1=new_capacity.index, - bus2=new_capacity.index + " urban central heat", + bus2=heat_buses, carrier=generator, p_nom=new_capacity / costs.at[key, "efficiency"], capital_cost=costs.at[key, "fixed"] @@ -407,104 +445,23 @@ def add_heating_capacities_installed_before_baseyear( """ logger.debug(f"Adding heating capacities installed before {baseyear}") - # Add existing heating capacities, data comes from the study - # "Mapping and analyses of the current and future (2020 - 2030) - # heating/cooling fuel deployment (fossil/renewables) " - # https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 - # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". - # TODO start from original file - - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] - df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - - # data for Albania, Montenegro and Macedonia not included in database - df.loc["Albania"] = np.nan - df.loc["Montenegro"] = np.nan - df.loc["Macedonia"] = np.nan - - df.fillna(0.0, inplace=True) - - # convert GW to MW - df *= 1e3 - - df.index = cc.convert(df.index, to="iso2") - - # coal and oil boilers are assimilated to oil boilers - df["oil boiler"] = df["oil boiler"] + df["coal boiler"] - df.drop(["coal boiler"], axis=1, inplace=True) - - # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - - nodal_df = df.loc[pop_layout.ct] - nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) - - # split existing capacities between residential and services - # proportional to energy demand - p_set_sum = n.loads_t.p_set.sum() - ratio_residential = pd.Series( - [ - ( - p_set_sum[f"{node} residential rural heat"] - / ( - p_set_sum[f"{node} residential rural heat"] - + p_set_sum[f"{node} services rural heat"] - ) - ) - # if rural heating demand for one of the nodes doesn't exist, - # then columns were dropped before and heating demand share should be 0.0 - if all( - f"{node} {service} rural heat" in p_set_sum.index - for service in ["residential", "services"] - ) - else 0.0 - for node in nodal_df.index - ], - index=nodal_df.index, + existing_heating = pd.read_csv( + snakemake.input.existing_heating_distribution, header=[0, 1], index_col=0 ) - for tech in techs: - nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential - nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) + techs = existing_heating.columns.get_level_values(1).unique() - names = [ - "residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central", - ] - - nodes = {} - p_nom = {} - for name in names: + for name in existing_heating.columns.get_level_values(0).unique(): name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index( - [ - n.buses.at[index, "location"] - for index in n.buses.index[ - n.buses.index.str.contains(name) - & n.buses.index.str.contains("heat") - ] - ] - ) - heat_pump_type = "air" if "urban" in name else "ground" - heat_type = "residential" if "residential" in name else "services" - if name == "urban central": - p_nom[name] = nodal_df["air heat pump"][nodes[name]] + nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) + + if (name_type != "central") and options["electricity_distribution_grid"]: + nodes_elec = nodes + " low voltage" else: - p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ - nodes[name] - ] + nodes_elec = nodes + + heat_pump_type = "air" if "urban" in name else "ground" # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -512,40 +469,54 @@ def add_heating_capacities_installed_before_baseyear( cop = {"air": ashp_cop, "ground": gshp_cop} if time_dep_hp_cop: - efficiency = cop[heat_pump_type][nodes[name]] + efficiency = cop[heat_pump_type][nodes] else: efficiency = costs.at[costs_name, "efficiency"] - for i, grouping_year in enumerate(grouping_years): - if int(grouping_year) + default_lifetime <= int(baseyear): - continue + valid_grouping_years = pd.Series( + [ + int(grouping_year) + for grouping_year in grouping_years + if int(grouping_year) + default_lifetime > int(baseyear) + and int(grouping_year) < int(baseyear) + ] + ) - # installation is assumed to be linear for the past 25 years (default lifetime) - ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime + # get number of years of each interval + _years = ( + valid_grouping_years.diff() + .shift(-1) + .fillna(baseyear - valid_grouping_years.iloc[-1]) + ) + # Installation is assumed to be linear for the past + ratios = _years / _years.sum() + + for ratio, grouping_year in zip(ratios, valid_grouping_years): n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes_elec, + bus1=nodes + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] + * ratio + / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers - # (50% capacities to rural buses, 50% to urban buses) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} resistive heater-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes_elec, + bus1=nodes + " " + name + " heat", carrier=name + " resistive heater", efficiency=costs.at[f"{name_type} resistive heater", "efficiency"], capital_cost=( @@ -553,21 +524,20 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} resistive heater"][nodes[name]] + existing_heating.loc[nodes, (name, "resistive heater")] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), build_year=int(grouping_year), - lifetime=costs.at[costs_name, "lifetime"], + lifetime=costs.at[f"{name_type} resistive heater", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} gas boiler-{grouping_year}", - bus0=spatial.gas.nodes, - bus1=nodes[name] + " " + name + " heat", + bus0="EU gas" if "EU gas" in spatial.gas.nodes else nodes + " gas", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[f"{name_type} gas boiler", "efficiency"], @@ -577,8 +547,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} gas boiler"][nodes[name]] + existing_heating.loc[nodes, (name, "gas boiler")] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -588,20 +557,21 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} oil boiler"][nodes[name]] - * ratio - / costs.at["decentral oil boiler", "efficiency"], + p_nom=( + existing_heating.loc[nodes, (name, "oil boiler")] + * ratio + / costs.at["decentral oil boiler", "efficiency"] + ), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) @@ -627,32 +597,29 @@ def add_heating_capacities_installed_before_baseyear( ], ) - # drop assets which are at the end of their lifetime - links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index - n.mremove("Link", links_i) - +# %% if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake( "add_existing_baseyear", - # configfiles="config/test/config.myopic.yaml", + configfiles="config/test/config.myopic.yaml", simpl="", clusters="37", ll="v1.0", opts="", - sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1", + sector_opts="8760-T-H-B-I-A-dist1", planning_horizons=2020, ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) options = snakemake.params.sector - opts = snakemake.wildcards.sector_opts.split("-") baseyear = snakemake.params.baseyear @@ -675,7 +642,7 @@ if __name__ == "__main__": n, grouping_years_power, costs, baseyear ) - if "H" in opts: + if options["heating"]: time_dep_hp_cop = options["time_dep_hp_cop"] ashp_cop = ( xr.open_dataarray(snakemake.input.cop_air_total) @@ -687,7 +654,9 @@ if __name__ == "__main__": .to_pandas() .reindex(index=n.snapshots) ) - default_lifetime = snakemake.params.costs["fill_values"]["lifetime"] + default_lifetime = snakemake.params.existing_capacities[ + "default_heating_lifetime" + ] add_heating_capacities_installed_before_baseyear( n, baseyear, diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index e00e1e5f..eb14436e 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -55,8 +55,8 @@ import logging import numpy as np import pandas as pd import pypsa -from _helpers import configure_logging -from add_electricity import load_costs, sanitize_carriers +from _helpers import configure_logging, set_scenario_config +from add_electricity import load_costs, sanitize_carriers, sanitize_locations idx = pd.IndexSlice @@ -100,10 +100,9 @@ def attach_stores(n, costs, extendable_carriers): n.madd("Carrier", carriers) buses_i = n.buses.index - bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} if "H2" in carriers: - h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) + h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", location=buses_i) n.madd( "Store", @@ -143,7 +142,7 @@ def attach_stores(n, costs, extendable_carriers): if "battery" in carriers: b_buses_i = n.madd( - "Bus", buses_i + " battery", carrier="battery", **bus_sub_dict + "Bus", buses_i + " battery", carrier="battery", location=buses_i ) n.madd( @@ -231,6 +230,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5) configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.network) extendable_carriers = snakemake.params.extendable_carriers @@ -246,6 +246,7 @@ if __name__ == "__main__": attach_hydrogen_pipelines(n, costs, extendable_carriers) sanitize_carriers(n, snakemake.config) + sanitize_locations(n) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/base_network.py b/scripts/base_network.py index eeb87bf5..432813cf 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -1,22 +1,17 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT # coding: utf-8 """ -Creates the network topology from a `ENTSO-E map extract. - -`_ (March 2022) as a PyPSA -network. +Creates the network topology from an `ENTSO-E map extract `_ (March 2022) as a PyPSA network. Relevant Settings ----------------- .. code:: yaml - snapshots: - countries: electricity: @@ -61,8 +56,19 @@ Outputs .. image:: img/base.png :scale: 33 % +- ``resources/regions_onshore.geojson``: + + .. image:: img/regions_onshore.png + :scale: 33 % + +- ``resources/regions_offshore.geojson``: + + .. image:: img/regions_offshore.png + :scale: 33 % + Description ----------- +Creates the network topology from an ENTSO-E map extract, and create Voronoi shapes for each bus representing both onshore and offshore regions. """ import logging @@ -77,10 +83,13 @@ import shapely import shapely.prepared import shapely.wkt import yaml -from _helpers import configure_logging +from _helpers import REGION_COLS, configure_logging, get_snapshots, set_scenario_config +from packaging.version import Version, parse from scipy import spatial from scipy.sparse import csgraph -from shapely.geometry import LineString, Point +from shapely.geometry import LineString, Point, Polygon + +PD_GE_2_2 = parse(pd.__version__) >= Version("2.2") logger = logging.getLogger(__name__) @@ -138,7 +147,9 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec): ) buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"}) - buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool) + buses["under_construction"] = buses.under_construction.where( + lambda s: s.notnull(), False + ).astype(bool) # remove all buses outside of all countries including exclusive economic zones (offshore) europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"] @@ -522,12 +533,13 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): ) return pd.Series(key, index) + compat_kws = dict(include_groups=False) if PD_GE_2_2 else {} gb = buses.loc[substation_b].groupby( ["x", "y"], as_index=False, group_keys=False, sort=False ) - bus_map_low = gb.apply(prefer_voltage, "min") + bus_map_low = gb.apply(prefer_voltage, "min", **compat_kws) lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False) - bus_map_high = gb.apply(prefer_voltage, "max") + bus_map_high = gb.apply(prefer_voltage, "max", **compat_kws) hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False) onshore_b = pd.Series(False, buses.index) @@ -553,6 +565,7 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): for b, df in product(("bus0", "bus1"), (n.lines, n.links)): has_connections_b |= ~df.groupby(b).under_construction.min() + buses["onshore_bus"] = onshore_b buses["substation_lv"] = ( lv_b & onshore_b & (~buses["under_construction"]) & has_connections_b ) @@ -560,7 +573,7 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): ~buses["under_construction"] ) - c_nan_b = buses.country == "na" + c_nan_b = buses.country.fillna("na") == "na" if c_nan_b.sum() > 0: c_tag = _get_country(buses.loc[c_nan_b]) c_tag.loc[~c_tag.isin(countries)] = np.nan @@ -693,6 +706,22 @@ def _adjust_capacities_of_under_construction_branches(n, config): return n +def _set_shapes(n, country_shapes, offshore_shapes): + # Write the geodataframes country_shapes and offshore_shapes to the network.shapes component + country_shapes = gpd.read_file(country_shapes).rename(columns={"name": "idx"}) + country_shapes["type"] = "country" + offshore_shapes = gpd.read_file(offshore_shapes).rename(columns={"name": "idx"}) + offshore_shapes["type"] = "offshore" + all_shapes = pd.concat([country_shapes, offshore_shapes], ignore_index=True) + n.madd( + "Shape", + all_shapes.index, + geometry=all_shapes.geometry, + idx=all_shapes.idx, + type=all_shapes["type"], + ) + + def base_network( eg_buses, eg_converters, @@ -725,12 +754,12 @@ def base_network( transformers = _set_electrical_parameters_transformers(transformers, config) links = _set_electrical_parameters_links(links, config, links_p_nom) converters = _set_electrical_parameters_converters(converters, config) - snapshots = snakemake.params.snapshots n = pypsa.Network() n.name = "PyPSA-Eur" - n.set_snapshots(pd.date_range(freq="h", **snapshots)) + time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) + n.set_snapshots(time) n.madd("Carrier", ["AC", "DC"]) n.import_components_from_dataframe(buses, "Bus") @@ -753,15 +782,157 @@ def base_network( n = _adjust_capacities_of_under_construction_branches(n, config) + _set_shapes(n, country_shapes, offshore_shapes) + return n +def voronoi_partition_pts(points, outline): + """ + Compute the polygons of a voronoi partition of `points` within the polygon + `outline`. Taken from + https://github.com/FRESNA/vresutils/blob/master/vresutils/graph.py. + + Attributes + ---------- + points : Nx2 - ndarray[dtype=float] + outline : Polygon + Returns + ------- + polygons : N - ndarray[dtype=Polygon|MultiPolygon] + """ + points = np.asarray(points) + + if len(points) == 1: + polygons = [outline] + else: + xmin, ymin = np.amin(points, axis=0) + xmax, ymax = np.amax(points, axis=0) + xspan = xmax - xmin + yspan = ymax - ymin + + # to avoid any network positions outside all Voronoi cells, append + # the corners of a rectangle framing these points + vor = spatial.Voronoi( + np.vstack( + ( + points, + [ + [xmin - 3.0 * xspan, ymin - 3.0 * yspan], + [xmin - 3.0 * xspan, ymax + 3.0 * yspan], + [xmax + 3.0 * xspan, ymin - 3.0 * yspan], + [xmax + 3.0 * xspan, ymax + 3.0 * yspan], + ], + ) + ) + ) + + polygons = [] + for i in range(len(points)): + poly = Polygon(vor.vertices[vor.regions[vor.point_region[i]]]) + + if not poly.is_valid: + poly = poly.buffer(0) + + with np.errstate(invalid="ignore"): + poly = poly.intersection(outline) + + polygons.append(poly) + + return polygons + + +def build_bus_shapes(n, country_shapes, offshore_shapes, countries): + country_shapes = gpd.read_file(country_shapes).set_index("name")["geometry"] + offshore_shapes = gpd.read_file(offshore_shapes) + offshore_shapes = offshore_shapes.reindex(columns=REGION_COLS).set_index("name")[ + "geometry" + ] + + onshore_regions = [] + offshore_regions = [] + + for country in countries: + c_b = n.buses.country == country + + onshore_shape = country_shapes[country] + onshore_locs = ( + n.buses.loc[c_b & n.buses.onshore_bus] + .sort_values( + by="substation_lv", ascending=False + ) # preference for substations + .drop_duplicates(subset=["x", "y"], keep="first")[["x", "y"]] + ) + onshore_regions.append( + gpd.GeoDataFrame( + { + "name": onshore_locs.index, + "x": onshore_locs["x"], + "y": onshore_locs["y"], + "geometry": voronoi_partition_pts( + onshore_locs.values, onshore_shape + ), + "country": country, + } + ) + ) + + if country not in offshore_shapes.index: + continue + offshore_shape = offshore_shapes[country] + offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]] + offshore_regions_c = gpd.GeoDataFrame( + { + "name": offshore_locs.index, + "x": offshore_locs["x"], + "y": offshore_locs["y"], + "geometry": voronoi_partition_pts(offshore_locs.values, offshore_shape), + "country": country, + } + ) + offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2] + offshore_regions.append(offshore_regions_c) + + shapes = pd.concat(onshore_regions, ignore_index=True) + + return onshore_regions, offshore_regions, shapes + + +def append_bus_shapes(n, shapes, type): + """ + Append shapes to the network. If shapes with the same component and type + already exist, they will be removed. + + Parameters: + n (pypsa.Network): The network to which the shapes will be appended. + shapes (geopandas.GeoDataFrame): The shapes to be appended. + **kwargs: Additional keyword arguments used in `n.madd`. + + Returns: + None + """ + remove = n.shapes.query("component == 'Bus' and type == @type").index + n.mremove("Shape", remove) + + offset = n.shapes.index.astype(int).max() + 1 if not n.shapes.empty else 0 + shapes = shapes.rename(lambda x: int(x) + offset) + n.madd( + "Shape", + shapes.index, + geometry=shapes.geometry, + idx=shapes.name, + component="Bus", + type=type, + ) + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake("base_network") configure_logging(snakemake) + set_scenario_config(snakemake) n = base_network( snakemake.input.eg_buses, @@ -778,5 +949,22 @@ if __name__ == "__main__": snakemake.config, ) + onshore_regions, offshore_regions, shapes = build_bus_shapes( + n, + snakemake.input.country_shapes, + snakemake.input.offshore_shapes, + snakemake.params.countries, + ) + + shapes.to_file(snakemake.output.regions_onshore) + append_bus_shapes(n, shapes, "onshore") + + if offshore_regions: + shapes = pd.concat(offshore_regions, ignore_index=True) + shapes.to_file(snakemake.output.regions_offshore) + append_bus_shapes(n, shapes, "offshore") + else: + offshore_shapes.to_frame().to_file(snakemake.output.regions_offshore) + n.meta = snakemake.config - n.export_to_netcdf(snakemake.output[0]) + n.export_to_netcdf(snakemake.output.base_network) diff --git a/scripts/build_ammonia_production.py b/scripts/build_ammonia_production.py index 1bcdf9ae..84d547da 100644 --- a/scripts/build_ammonia_production.py +++ b/scripts/build_ammonia_production.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,6 +8,7 @@ Build historical annual ammonia production per country in ktonNH3/a. import country_converter as coco import pandas as pd +from _helpers import set_scenario_config cc = coco.CountryConverter() @@ -18,6 +19,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_ammonia_production") + set_scenario_config(snakemake) + ammonia = pd.read_excel( snakemake.input.usgs, sheet_name="T12", @@ -25,13 +28,14 @@ if __name__ == "__main__": header=0, index_col=0, skipfooter=19, + na_values=["--"], ) ammonia.index = cc.convert(ammonia.index, to="iso2") years = [str(i) for i in range(2013, 2018)] - countries = ammonia.index.intersection(snakemake.params.countries) - ammonia = ammonia.loc[countries, years].astype(float) + + ammonia = ammonia[years] # convert from ktonN to ktonNH3 ammonia *= 17 / 14 diff --git a/scripts/build_biomass_potentials.py b/scripts/build_biomass_potentials.py index aae1fb98..79e2c203 100644 --- a/scripts/build_biomass_potentials.py +++ b/scripts/build_biomass_potentials.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,13 +9,15 @@ using data from JRC ENSPRESO. import logging -logger = logging.getLogger(__name__) import geopandas as gpd import numpy as np import pandas as pd +logger = logging.getLogger(__name__) AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050] +from _helpers import configure_logging, set_scenario_config + def build_nuts_population_data(year=2013): pop = pd.read_csv( @@ -132,14 +134,14 @@ def disaggregate_nuts0(bio): pop = build_nuts_population_data() # get population in nuts2 - pop_nuts2 = pop.loc[pop.index.str.len() == 4] + pop_nuts2 = pop.loc[pop.index.str.len() == 4].copy() by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum() - pop_nuts2.loc[:, "fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country) + pop_nuts2["fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country) # distribute nuts0 data to nuts2 by population bio_nodal = bio.loc[pop_nuts2.ct] bio_nodal.index = pop_nuts2.index - bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0) + bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0).astype(float) # update inplace bio.update(bio_nodal) @@ -221,6 +223,9 @@ if __name__ == "__main__": planning_horizons=2050, ) + configure_logging(snakemake) + set_scenario_config(snakemake) + overnight = snakemake.config["foresight"] == "overnight" params = snakemake.params.biomass investment_year = int(snakemake.wildcards.planning_horizons) diff --git a/scripts/build_biomass_transport_costs.py b/scripts/build_biomass_transport_costs.py index 9271b600..9c825c47 100644 --- a/scripts/build_biomass_transport_costs.py +++ b/scripts/build_biomass_transport_costs.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -80,4 +80,9 @@ def build_biomass_transport_costs(): if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("build_biomass_transport_costs") + build_biomass_transport_costs() diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py deleted file mode 100644 index a6500bb0..00000000 --- a/scripts/build_bus_regions.py +++ /dev/null @@ -1,177 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: MIT -""" -Creates Voronoi shapes for each bus representing both onshore and offshore -regions. - -Relevant Settings ------------------ - -.. code:: yaml - - countries: - -.. seealso:: - Documentation of the configuration file ``config/config.yaml`` at - :ref:`toplevel_cf` - -Inputs ------- - -- ``resources/country_shapes.geojson``: confer :ref:`shapes` -- ``resources/offshore_shapes.geojson``: confer :ref:`shapes` -- ``networks/base.nc``: confer :ref:`base` - -Outputs -------- - -- ``resources/regions_onshore.geojson``: - - .. image:: img/regions_onshore.png - :scale: 33 % - -- ``resources/regions_offshore.geojson``: - - .. image:: img/regions_offshore.png - :scale: 33 % - -Description ------------ -""" - -import logging - -import geopandas as gpd -import numpy as np -import pandas as pd -import pypsa -from _helpers import REGION_COLS, configure_logging -from scipy.spatial import Voronoi -from shapely.geometry import Polygon - -logger = logging.getLogger(__name__) - - -def voronoi_partition_pts(points, outline): - """ - Compute the polygons of a voronoi partition of `points` within the polygon - `outline`. Taken from - https://github.com/FRESNA/vresutils/blob/master/vresutils/graph.py. - - Attributes - ---------- - points : Nx2 - ndarray[dtype=float] - outline : Polygon - Returns - ------- - polygons : N - ndarray[dtype=Polygon|MultiPolygon] - """ - points = np.asarray(points) - - if len(points) == 1: - polygons = [outline] - else: - xmin, ymin = np.amin(points, axis=0) - xmax, ymax = np.amax(points, axis=0) - xspan = xmax - xmin - yspan = ymax - ymin - - # to avoid any network positions outside all Voronoi cells, append - # the corners of a rectangle framing these points - vor = Voronoi( - np.vstack( - ( - points, - [ - [xmin - 3.0 * xspan, ymin - 3.0 * yspan], - [xmin - 3.0 * xspan, ymax + 3.0 * yspan], - [xmax + 3.0 * xspan, ymin - 3.0 * yspan], - [xmax + 3.0 * xspan, ymax + 3.0 * yspan], - ], - ) - ) - ) - - polygons = [] - for i in range(len(points)): - poly = Polygon(vor.vertices[vor.regions[vor.point_region[i]]]) - - if not poly.is_valid: - poly = poly.buffer(0) - - with np.errstate(invalid="ignore"): - poly = poly.intersection(outline) - - polygons.append(poly) - - return polygons - - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - snakemake = mock_snakemake("build_bus_regions") - configure_logging(snakemake) - - countries = snakemake.params.countries - - n = pypsa.Network(snakemake.input.base_network) - - country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index("name")[ - "geometry" - ] - offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes) - offshore_shapes = offshore_shapes.reindex(columns=REGION_COLS).set_index("name")[ - "geometry" - ] - - onshore_regions = [] - offshore_regions = [] - - for country in countries: - c_b = n.buses.country == country - - onshore_shape = country_shapes[country] - onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]] - onshore_regions.append( - gpd.GeoDataFrame( - { - "name": onshore_locs.index, - "x": onshore_locs["x"], - "y": onshore_locs["y"], - "geometry": voronoi_partition_pts( - onshore_locs.values, onshore_shape - ), - "country": country, - } - ) - ) - - if country not in offshore_shapes.index: - continue - offshore_shape = offshore_shapes[country] - offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]] - offshore_regions_c = gpd.GeoDataFrame( - { - "name": offshore_locs.index, - "x": offshore_locs["x"], - "y": offshore_locs["y"], - "geometry": voronoi_partition_pts(offshore_locs.values, offshore_shape), - "country": country, - } - ) - offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2] - offshore_regions.append(offshore_regions_c) - - pd.concat(onshore_regions, ignore_index=True).to_file( - snakemake.output.regions_onshore - ) - if offshore_regions: - pd.concat(offshore_regions, ignore_index=True).to_file( - snakemake.output.regions_offshore - ) - else: - offshore_shapes.to_frame().to_file(snakemake.output.regions_offshore) diff --git a/scripts/build_clustered_population_layouts.py b/scripts/build_clustered_population_layouts.py index 2f237656..2d9c6acb 100644 --- a/scripts/build_clustered_population_layouts.py +++ b/scripts/build_clustered_population_layouts.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ import atlite import geopandas as gpd import pandas as pd import xarray as xr +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): @@ -22,13 +23,15 @@ if __name__ == "__main__": clusters=48, ) + set_scenario_config(snakemake) + cutout = atlite.Cutout(snakemake.input.cutout) clustered_regions = ( gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop = {} for item in ["total", "urban", "rural"]: diff --git a/scripts/build_cop_profiles.py b/scripts/build_cop_profiles.py index 4b1d952e..16e44c18 100644 --- a/scripts/build_cop_profiles.py +++ b/scripts/build_cop_profiles.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -14,6 +14,7 @@ https://doi.org/10.1039/C2EE22653G. """ import xarray as xr +from _helpers import set_scenario_config def coefficient_of_performance(delta_T, source="air"): @@ -35,6 +36,8 @@ if __name__ == "__main__": clusters=48, ) + set_scenario_config(snakemake) + for area in ["total", "urban", "rural"]: for source in ["air", "soil"]: source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"]) diff --git a/scripts/build_cross_border_flows.py b/scripts/build_cross_border_flows.py index b9fc3fe8..d463d234 100644 --- a/scripts/build_cross_border_flows.py +++ b/scripts/build_cross_border_flows.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -8,7 +8,7 @@ import logging import pandas as pd import pypsa -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from entsoe import EntsoePandasClient from entsoe.exceptions import InvalidBusinessParameterError, NoMatchingDataError from requests import HTTPError @@ -21,6 +21,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_cross_border_flows") configure_logging(snakemake) + set_scenario_config(snakemake) api_key = snakemake.config["private"]["keys"]["entsoe_api"] client = EntsoePandasClient(api_key=api_key) diff --git a/scripts/build_cutout.py b/scripts/build_cutout.py index 9a7f9e00..1edb18ce 100644 --- a/scripts/build_cutout.py +++ b/scripts/build_cutout.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -95,7 +95,7 @@ import logging import atlite import geopandas as gpd import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config logger = logging.getLogger(__name__) @@ -105,6 +105,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5") configure_logging(snakemake) + set_scenario_config(snakemake) cutout_params = snakemake.params.cutouts[snakemake.wildcards.cutout] diff --git a/scripts/build_heat_demand.py b/scripts/build_daily_heat_demand.py similarity index 65% rename from scripts/build_heat_demand.py rename to scripts/build_daily_heat_demand.py index 77768404..54c5c386 100644 --- a/scripts/build_heat_demand.py +++ b/scripts/build_daily_heat_demand.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ import geopandas as gpd import numpy as np import pandas as pd import xarray as xr +from _helpers import get_snapshots, set_scenario_config from dask.distributed import Client, LocalCluster if __name__ == "__main__": @@ -18,23 +19,33 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_daily_heat_demands", + scope="total", simpl="", clusters=48, ) + set_scenario_config(snakemake) nprocesses = int(snakemake.threads) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) client = Client(cluster, asynchronous=True) - time = pd.date_range(freq="h", **snakemake.params.snapshots) - cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) + cutout_name = snakemake.input.cutout + + time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) + daily = get_snapshots( + snakemake.params.snapshots, + snakemake.params.drop_leap_day, + freq="D", + ) + + cutout = atlite.Cutout(cutout_name).sel(time=time) clustered_regions = ( gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop_layout = xr.open_dataarray(snakemake.input.pop_layout) @@ -46,6 +57,6 @@ if __name__ == "__main__": index=clustered_regions.index, dask_kwargs=dict(scheduler=client), show_progress=False, - ) + ).sel(time=daily) heat_demand.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py new file mode 100644 index 00000000..178f2c0d --- /dev/null +++ b/scripts/build_district_heat_share.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build district heat shares at each node, depending on investment year. +""" + +import logging + +import pandas as pd +from _helpers import configure_logging, set_scenario_config +from prepare_sector_network import get + +logger = logging.getLogger(__name__) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_district_heat_share", + simpl="", + clusters=60, + planning_horizons="2050", + ) + configure_logging(snakemake) + set_scenario_config(snakemake) + + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + year = str(snakemake.params.energy_totals_year) + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, index_col=0)[ + year + ] + + # make ct-based share nodal + district_heat_share = district_heat_share.reindex(pop_layout.ct).fillna(0) + district_heat_share.index = pop_layout.index + + # total urban population per country + ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() + + # distribution of urban population within a country + pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) + + # fraction of node that is urban + urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) + + # maximum potential of urban demand covered by district heating + central_fraction = snakemake.config["sector"]["district_heating"]["potential"] + + # district heating share at each node + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) + + # if district heating share larger than urban fraction -> set urban + # fraction to district heating share + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) + + # difference of max potential and today's share of district heating + diff = (urban_fraction * central_fraction) - dist_fraction_node + progress = get( + snakemake.config["sector"]["district_heating"]["progress"], investment_year + ) + dist_fraction_node += diff * progress + logger.info( + f"Increase district heating share by a progress factor of {progress:.2%} " + f"resulting in new average share of {dist_fraction_node.mean():.2%}" + ) + + df = pd.DataFrame( + { + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction, + }, + dtype=float, + ) + + df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_electricity_demand.py b/scripts/build_electricity_demand.py index d7d9927d..fc8af372 100755 --- a/scripts/build_electricity_demand.py +++ b/scripts/build_electricity_demand.py @@ -1,15 +1,13 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ -This rule downloads the load data from `Open Power System Data Time series. - +This rule downloads the load data from `Open Power System Data Time series `_. For all countries in -the network, the per country load timeseries with suffix -``_load_actual_entsoe_transparency`` are extracted from the dataset. After -filling small gaps linearly and large gaps by copying time-slice of a given -period, the load data is exported to a ``.csv`` file. +the network, the per country load timeseries are extracted from the dataset. +After filling small gaps linearly and large gaps by copying time-slice of a +given period, the load data is exported to a ``.csv`` file. Relevant Settings ----------------- @@ -19,9 +17,7 @@ Relevant Settings snapshots: load: - interpolate_limit: - time_shift_for_large_gaps: - manual_adjustments: + interpolate_limit: time_shift_for_large_gaps: manual_adjustments: .. seealso:: @@ -31,25 +27,25 @@ Relevant Settings Inputs ------ -- ``resources/load_raw.csv``: +- ``data/electricity_demand_raw.csv``: Outputs ------- -- ``resources/load.csv``: +- ``resources/electricity_demand.csv``: """ import logging -logger = logging.getLogger(__name__) -import dateutil import numpy as np import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, get_snapshots, set_scenario_config from pandas import Timedelta as Delta +logger = logging.getLogger(__name__) -def load_timeseries(fn, years, countries, powerstatistics=True): + +def load_timeseries(fn, years, countries): """ Read load data from OPSD time-series package version 2020-10-06. @@ -62,29 +58,15 @@ def load_timeseries(fn, years, countries, powerstatistics=True): File name or url location (file format .csv) countries : listlike Countries for which to read load data. - powerstatistics: bool - Whether the electricity consumption data of the ENTSOE power - statistics (if true) or of the ENTSOE transparency map (if false) - should be parsed. Returns ------- load : pd.DataFrame Load time-series with UTC timestamps x ISO-2 countries """ - logger.info(f"Retrieving load data from '{fn}'.") - - pattern = "power_statistics" if powerstatistics else "transparency" - pattern = f"_load_actual_entsoe_{pattern}" - - def rename(s): - return s[: -len(pattern)] - return ( pd.read_csv(fn, index_col=0, parse_dates=[0], date_format="%Y-%m-%dT%H:%M:%SZ") .tz_localize(None) - .filter(like=pattern) - .rename(columns=rename) .dropna(how="all", axis=0) .rename(columns={"GB_UKM": "GB"}) .filter(items=countries) @@ -147,19 +129,20 @@ def copy_timeslice(load, cntry, start, stop, delta, fn_load=None): load.loc[start:stop, cntry] = load.loc[ start - delta : stop - delta, cntry ].values - elif fn_load is not None: + elif fn_load is not None and cntry in load: duration = pd.date_range(freq="h", start=start - delta, end=stop - delta) - load_raw = load_timeseries(fn_load, duration, [cntry], powerstatistics) + load_raw = load_timeseries(fn_load, duration, [cntry]) load.loc[start:stop, cntry] = load_raw.loc[ start - delta : stop - delta, cntry ].values -def manual_adjustment(load, fn_load, powerstatistics, countries): +def manual_adjustment(load, fn_load, countries): """ Adjust gaps manual for load data from OPSD time-series package. - 1. For the ENTSOE power statistics load data (if powerstatistics is True) + 1. For years later than 2015 for which the load data is mainly taken from the + ENTSOE power statistics Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the same load curve as Serbia and Albania the same as Macdedonia, both scaled @@ -167,7 +150,8 @@ def manual_adjustment(load, fn_load, powerstatistics, countries): IEA Data browser [0] for the year 2013. - 2. For the ENTSOE transparency load data (if powerstatistics is False) + 2. For years earlier than 2015 for which the load data is mainly taken from the + ENTSOE transparency platforms Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the same load curve as Montenegro, scaled by the corresponding ratio of total energy @@ -183,9 +167,6 @@ def manual_adjustment(load, fn_load, powerstatistics, countries): ---------- load : pd.DataFrame Load time-series with UTC timestamps x ISO-2 countries - powerstatistics: bool - Whether argument load comprises the electricity consumption data of - the ENTSOE power statistics or of the ENTSOE transparency map load_fn: str File name or url location (file format .csv) @@ -195,88 +176,72 @@ def manual_adjustment(load, fn_load, powerstatistics, countries): Manual adjusted and interpolated load time-series with UTC timestamps x ISO-2 countries """ - if powerstatistics: - if "MK" in load.columns: - if "AL" not in load.columns or load.AL.isnull().values.all(): - load["AL"] = load["MK"] * (4.1 / 7.4) - if "RS" in load.columns: - if "KV" not in load.columns or load.KV.isnull().values.all(): - load["KV"] = load["RS"] * (4.8 / 27.0) - copy_timeslice( - load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1) - ) - copy_timeslice( - load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2) - ) - copy_timeslice( - load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1) - ) - copy_timeslice( - load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1) - ) - # is a WE, so take WE before - copy_timeslice( - load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1) - ) - copy_timeslice( - load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1) - ) - copy_timeslice( - load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1) - ) - # whole january missing - copy_timeslice( - load, - "GB", - "2010-01-01 00:00", - "2010-01-31 23:00", - Delta(days=-365), - fn_load, - ) - # 1.1. at midnight gets special treatment - copy_timeslice( - load, - "IE", - "2016-01-01 00:00", - "2016-01-01 01:00", - Delta(days=-366), - fn_load, - ) - copy_timeslice( - load, - "PT", - "2016-01-01 00:00", - "2016-01-01 01:00", - Delta(days=-366), - fn_load, - ) - copy_timeslice( - load, - "GB", - "2016-01-01 00:00", - "2016-01-01 01:00", - Delta(days=-366), - fn_load, - ) - - else: + if "AL" not in load and "AL" in countries: if "ME" in load: - if "AL" not in load and "AL" in countries: - load["AL"] = load.ME * (5.7 / 2.9) - if "MK" not in load and "MK" in countries: + load["AL"] = load.ME * (5.7 / 2.9) + elif "MK" in load: + load["AL"] = load["MK"] * (4.1 / 7.4) + + if "MK" in countries and "MK" in countries: + if "MK" not in load or load.MK.isnull().sum() > len(load) / 2: + if "ME" in load: load["MK"] = load.ME * (6.7 / 2.9) - if "BA" not in load and "BA" in countries: - load["BA"] = load.HR * (11.0 / 16.2) - copy_timeslice( - load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1) - ) - copy_timeslice( - load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1) - ) - copy_timeslice( - load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1) - ) + + if "BA" not in load and "BA" in countries: + if "ME" in load: + load["BA"] = load.HR * (11.0 / 16.2) + + if ("KV" not in load or load.KV.isnull().values.all()) and "KV" in countries: + if "RS" in load: + load["KV"] = load["RS"] * (4.8 / 27.0) + + copy_timeslice(load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1)) + copy_timeslice(load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2)) + copy_timeslice(load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1)) + copy_timeslice(load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1)) + # is a WE, so take WE before + copy_timeslice(load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1)) + copy_timeslice(load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1)) + copy_timeslice(load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1)) + # whole january missing + copy_timeslice( + load, + "GB", + "2010-01-01 00:00", + "2010-01-31 23:00", + Delta(days=-365), + fn_load, + ) + # 1.1. at midnight gets special treatment + copy_timeslice( + load, + "IE", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) + copy_timeslice( + load, + "PT", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) + copy_timeslice( + load, + "GB", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) + + copy_timeslice(load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)) + copy_timeslice(load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1)) + copy_timeslice(load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1)) if "UA" in countries: copy_timeslice( @@ -296,24 +261,35 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_electricity_demand") configure_logging(snakemake) + set_scenario_config(snakemake) + + snapshots = get_snapshots( + snakemake.params.snapshots, snakemake.params.drop_leap_day + ) + + fixed_year = snakemake.params["load"].get("fixed_year", False) + years = ( + slice(str(fixed_year), str(fixed_year)) + if fixed_year + else slice(snapshots[0], snapshots[-1]) + ) - powerstatistics = snakemake.params.load["power_statistics"] interpolate_limit = snakemake.params.load["interpolate_limit"] countries = snakemake.params.countries - snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) - years = slice(snapshots[0], snapshots[-1]) + time_shift = snakemake.params.load["time_shift_for_large_gaps"] - load = load_timeseries(snakemake.input[0], years, countries, powerstatistics) + load = load_timeseries(snakemake.input.reported, years, countries) + + load = load.reindex(index=snapshots) if "UA" in countries: # attach load of UA (best data only for entsoe transparency) - load_ua = load_timeseries(snakemake.input[0], "2018", ["UA"], False) + load_ua = load_timeseries(snakemake.input.reported, "2018", ["UA"]) snapshot_year = str(snapshots.year.unique().item()) time_diff = pd.Timestamp("2018") - pd.Timestamp(snapshot_year) - load_ua.index -= ( - time_diff # hack indices (currently, UA is manually set to 2018) - ) + # hack indices (currently, UA is manually set to 2018) + load_ua.index -= time_diff load["UA"] = load_ua # attach load of MD (no time-series available, use 2020-totals and distribute according to UA): # https://www.iea.org/data-and-statistics/data-browser/?country=MOLDOVA&fuel=Energy%20consumption&indicator=TotElecCons @@ -321,10 +297,7 @@ if __name__ == "__main__": load["MD"] = 6.2e6 * (load_ua / load_ua.sum()) if snakemake.params.load["manual_adjustments"]: - load = manual_adjustment(load, snakemake.input[0], powerstatistics, countries) - - if load.empty: - logger.warning("Build electricity demand time series is empty.") + load = manual_adjustment(load, snakemake.input[0], countries) logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.") load = load.interpolate(method="linear", limit=interpolate_limit) @@ -334,10 +307,23 @@ if __name__ == "__main__": ) load = load.apply(fill_large_gaps, shift=time_shift) + if snakemake.params.load["supplement_synthetic"]: + logger.info("Supplement missing data with synthetic data.") + fn = snakemake.input.synthetic + synthetic_load = pd.read_csv(fn, index_col=0, parse_dates=True) + # "UA" does not appear in synthetic load data + countries = list(set(countries) - set(["UA"])) + synthetic_load = synthetic_load.loc[snapshots, countries] + load = load.combine_first(synthetic_load) + assert not load.isna().any().any(), ( "Load data contains nans. Adjust the parameters " "`time_shift_for_large_gaps` or modify the `manual_adjustment` function " "for implementing the needed load data modifications." ) + # need to reindex load time series to target year + if fixed_year: + load.index = load.index.map(lambda t: t.replace(year=snapshots.year[0])) + load.to_csv(snakemake.output[0]) diff --git a/scripts/build_electricity_prices.py b/scripts/build_electricity_prices.py index 353ea7e3..f9b964bd 100644 --- a/scripts/build_electricity_prices.py +++ b/scripts/build_electricity_prices.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT import logging import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from entsoe import EntsoePandasClient from entsoe.exceptions import NoMatchingDataError @@ -19,6 +19,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_cross_border_flows") configure_logging(snakemake) + set_scenario_config(snakemake) api_key = snakemake.config["private"]["keys"]["entsoe_api"] client = EntsoePandasClient(api_key=api_key) diff --git a/scripts/build_electricity_production.py b/scripts/build_electricity_production.py index beb859bd..b81c6b45 100644 --- a/scripts/build_electricity_production.py +++ b/scripts/build_electricity_production.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT import logging import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from entsoe import EntsoePandasClient from entsoe.exceptions import NoMatchingDataError @@ -39,6 +39,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_electricity_production") configure_logging(snakemake) + set_scenario_config(snakemake) api_key = snakemake.config["private"]["keys"]["entsoe_api"] client = EntsoePandasClient(api_key=api_key) @@ -58,7 +59,7 @@ if __name__ == "__main__": gen = client.query_generation(country, start=start, end=end, nett=True) gen = gen.tz_localize(None).resample("1h").mean() gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)] - gen = gen.rename(columns=carrier_grouper).groupby(level=0, axis=1).sum() + gen = gen.rename(columns=carrier_grouper).T.groupby(level=0).sum().T generation.append(gen) except NoMatchingDataError: unavailable_countries.append(country) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 67b86466..b56d3294 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,21 +7,19 @@ Build total energy demands per country using JRC IDEES, eurostat, and EEA data. """ import logging - -logger = logging.getLogger(__name__) - import multiprocessing as mp +import os from functools import partial import country_converter as coco import geopandas as gpd import numpy as np import pandas as pd -from _helpers import mute_print +from _helpers import configure_logging, mute_print, set_scenario_config from tqdm import tqdm cc = coco.CountryConverter() - +logger = logging.getLogger(__name__) idx = pd.IndexSlice @@ -39,54 +37,6 @@ def reverse(dictionary): return {v: k for k, v in dictionary.items()} -eurostat_codes = { - "EU28": "EU", - "EA19": "EA", - "Belgium": "BE", - "Bulgaria": "BG", - "Czech Republic": "CZ", - "Denmark": "DK", - "Germany": "DE", - "Estonia": "EE", - "Ireland": "IE", - "Greece": "GR", - "Spain": "ES", - "France": "FR", - "Croatia": "HR", - "Italy": "IT", - "Cyprus": "CY", - "Latvia": "LV", - "Lithuania": "LT", - "Luxembourg": "LU", - "Hungary": "HU", - "Malta": "MA", - "Netherlands": "NL", - "Austria": "AT", - "Poland": "PL", - "Portugal": "PT", - "Romania": "RO", - "Slovenia": "SI", - "Slovakia": "SK", - "Finland": "FI", - "Sweden": "SE", - "United Kingdom": "GB", - "Iceland": "IS", - "Norway": "NO", - "Montenegro": "ME", - "FYR of Macedonia": "MK", - "Albania": "AL", - "Serbia": "RS", - "Turkey": "TU", - "Bosnia and Herzegovina": "BA", - "Kosovo\n(UNSCR 1244/99)": "KO", # 2017 version - # 2016 version - "Kosovo\n(under United Nations Security Council Resolution 1244/99)": "KO", - "Moldova": "MO", - "Ukraine": "UK", - "Switzerland": "CH", -} - - idees_rename = {"GR": "EL", "GB": "UK"} eu28 = cc.EU28as("ISO2").ISO2.tolist() @@ -119,51 +69,105 @@ to_ipcc = { } -def build_eurostat(input_eurostat, countries, report_year, year): +def eurostat_per_country(input_eurostat, country): + filename = ( + f"{input_eurostat}/{country}-Energy-balance-sheets-April-2023-edition.xlsb" + ) + sheet = pd.read_excel( + filename, + engine="pyxlsb", + sheet_name=None, + skiprows=4, + index_col=list(range(4)), + ) + sheet.pop("Cover") + return pd.concat(sheet) + + +def build_eurostat(input_eurostat, countries, nprocesses=1, disable_progressbar=False): """ Return multi-index for all countries' energy data in TWh/a. """ - filenames = { - 2016: f"/{year}-Energy-Balances-June2016edition.xlsx", - 2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx", - } + countries = {idees_rename.get(country, country) for country in countries} - {"CH"} + func = partial(eurostat_per_country, input_eurostat) + tqdm_kwargs = dict( + ascii=False, + unit=" country", + total=len(countries), + desc="Build from eurostat database", + disable=disable_progressbar, + ) with mute_print(): - dfs = pd.read_excel( - input_eurostat + filenames[report_year], - sheet_name=None, - skiprows=1, - index_col=list(range(4)), - ) + with mp.Pool(processes=nprocesses) as pool: + dfs = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) - # sorted_index necessary for slicing - lookup = eurostat_codes - labelled_dfs = { - lookup[df.columns[0]]: df - for df in dfs.values() - if lookup[df.columns[0]] in countries + index_names = ["country", "year", "lvl1", "lvl2", "lvl3", "lvl4"] + df = pd.concat(dfs, keys=countries, names=index_names) + df.index = df.index.set_levels(df.index.levels[1].astype(int), level=1) + + # drop columns with all NaNs + unnamed_cols = df.columns[df.columns.astype(str).str.startswith("Unnamed")] + df.drop(unnamed_cols, axis=1, inplace=True) + df.drop(list(range(1990, 2022)), axis=1, inplace=True, errors="ignore") + + # make numeric values where possible + df.replace("Z", 0, inplace=True) + df = df.apply(pd.to_numeric, errors="coerce") + df = df.select_dtypes(include=[np.number]) + + # write 'International aviation' to the lower level of the multiindex + int_avia = df.index.get_level_values(3) == "International aviation" + temp = df.loc[int_avia] + temp.index = pd.MultiIndex.from_frame( + temp.index.to_frame().fillna("International aviation") + ) + df = pd.concat([temp, df.loc[~int_avia]]) + + # Fill in missing data on "Domestic aviation" for each country. + domestic_avia = df.index.get_level_values(4) == "Domestic aviation" + for country in countries: + slicer = idx[country, :, :, :, "Domestic aviation"] + # For the Total and Fossil energy columns, fill in zeros with + # the closest non-zero value in the year index. + for col in ["Total", "Fossil energy"]: + df.loc[slicer, col] = ( + df.loc[slicer, col].replace(0.0, np.nan).ffill().bfill() + ) + + # Renaming some indices + index_rename = { + "Households": "Residential", + "Commercial & public services": "Services", + "Domestic navigation": "Domestic Navigation", + "International maritime bunkers": "Bunkers", + "UK": "GB", } - df = pd.concat(labelled_dfs, sort=True).sort_index() + columns_rename = {"Total": "Total all products"} + df.rename(index=index_rename, columns=columns_rename, inplace=True) + df.sort_index(inplace=True) - # drop non-numeric and country columns - non_numeric_cols = df.columns[df.dtypes != float] - country_cols = df.columns.intersection(lookup.keys()) - to_drop = non_numeric_cols.union(country_cols) - df.drop(to_drop, axis=1, inplace=True) - - # convert ktoe/a to TWh/a + # convert to TWh/a from ktoe/a df *= 11.63 / 1e3 return df -def build_swiss(year): +def build_swiss(): """ - Return a pd.Series of Swiss energy data in TWh/a. + Return a pd.DataFrame of Swiss energy data in TWh/a. """ fn = snakemake.input.swiss - df = pd.read_csv(fn, index_col=[0, 1]).loc["CH", str(year)] + df = pd.read_csv(fn, index_col=[0, 1]) + + df.columns = df.columns.astype(int) + + df.columns.name = "year" + + df = df.stack().unstack("item") + + df.columns.name = None # convert PJ/a to TWh/a df /= 3.6 @@ -171,35 +175,35 @@ def build_swiss(year): return df -def idees_per_country(ct, year, base_dir): +def idees_per_country(ct, base_dir): ct_idees = idees_rename.get(ct, ct) fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx" fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx" fn_transport = f"{base_dir}/JRC-IDEES-2015_Transport_{ct_idees}.xlsx" + ct_totals = {} + # residential - df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year] + df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0) rows = ["Advanced electric heating", "Conventional electric heating"] - ct_totals = { - "total residential space": df["Space heating"], - "electricity residential space": df[rows].sum(), - } - ct_totals["total residential water"] = df.at["Water heating"] + ct_totals["electricity residential space"] = df.loc[rows].sum() + ct_totals["total residential space"] = df.loc["Space heating"] + ct_totals["total residential water"] = df.loc["Water heating"] assert df.index[23] == "Electricity" ct_totals["electricity residential water"] = df.iloc[23] - ct_totals["total residential cooking"] = df["Cooking"] + ct_totals["total residential cooking"] = df.loc["Cooking"] assert df.index[30] == "Electricity" ct_totals["electricity residential cooking"] = df.iloc[30] - df = pd.read_excel(fn_residential, "RES_summary", index_col=0)[year] + df = pd.read_excel(fn_residential, "RES_summary", index_col=0) row = "Energy consumption by fuel - Eurostat structure (ktoe)" - ct_totals["total residential"] = df[row] + ct_totals["total residential"] = df.loc[row] assert df.index[47] == "Electricity" ct_totals["electricity residential"] = df.iloc[47] @@ -212,27 +216,27 @@ def idees_per_country(ct, year, base_dir): # services - df = pd.read_excel(fn_tertiary, "SER_hh_fec", index_col=0)[year] + df = pd.read_excel(fn_tertiary, "SER_hh_fec", index_col=0) - ct_totals["total services space"] = df["Space heating"] + ct_totals["total services space"] = df.loc["Space heating"] rows = ["Advanced electric heating", "Conventional electric heating"] - ct_totals["electricity services space"] = df[rows].sum() + ct_totals["electricity services space"] = df.loc[rows].sum() - ct_totals["total services water"] = df["Hot water"] + ct_totals["total services water"] = df.loc["Hot water"] assert df.index[24] == "Electricity" ct_totals["electricity services water"] = df.iloc[24] - ct_totals["total services cooking"] = df["Catering"] + ct_totals["total services cooking"] = df.loc["Catering"] assert df.index[31] == "Electricity" ct_totals["electricity services cooking"] = df.iloc[31] - df = pd.read_excel(fn_tertiary, "SER_summary", index_col=0)[year] + df = pd.read_excel(fn_tertiary, "SER_summary", index_col=0) row = "Energy consumption by fuel - Eurostat structure (ktoe)" - ct_totals["total services"] = df[row] + ct_totals["total services"] = df.loc[row] assert df.index[50] == "Electricity" ct_totals["electricity services"] = df.iloc[50] @@ -248,7 +252,7 @@ def idees_per_country(ct, year, base_dir): start = "Detailed split of energy consumption (ktoe)" end = "Market shares of energy uses (%)" - df = pd.read_excel(fn_tertiary, "AGR_fec", index_col=0).loc[start:end, year] + df = pd.read_excel(fn_tertiary, "AGR_fec", index_col=0).loc[start:end] rows = [ "Lighting", @@ -256,30 +260,30 @@ def idees_per_country(ct, year, base_dir): "Specific electricity uses", "Pumping devices (electric)", ] - ct_totals["total agriculture electricity"] = df[rows].sum() + ct_totals["total agriculture electricity"] = df.loc[rows].sum() rows = ["Specific heat uses", "Low enthalpy heat"] - ct_totals["total agriculture heat"] = df[rows].sum() + ct_totals["total agriculture heat"] = df.loc[rows].sum() rows = [ "Motor drives", "Farming machine drives (diesel oil incl. biofuels)", "Pumping devices (diesel oil incl. biofuels)", ] - ct_totals["total agriculture machinery"] = df[rows].sum() + ct_totals["total agriculture machinery"] = df.loc[rows].sum() row = "Agriculture, forestry and fishing" - ct_totals["total agriculture"] = df[row] + ct_totals["total agriculture"] = df.loc[row] # transport - df = pd.read_excel(fn_transport, "TrRoad_ene", index_col=0)[year] + df = pd.read_excel(fn_transport, "TrRoad_ene", index_col=0) - ct_totals["total road"] = df["by fuel (EUROSTAT DATA)"] + ct_totals["total road"] = df.loc["by fuel (EUROSTAT DATA)"] - ct_totals["electricity road"] = df["Electricity"] + ct_totals["electricity road"] = df.loc["Electricity"] - ct_totals["total two-wheel"] = df["Powered 2-wheelers (Gasoline)"] + ct_totals["total two-wheel"] = df.loc["Powered 2-wheelers (Gasoline)"] assert df.index[19] == "Passenger cars" ct_totals["total passenger cars"] = df.iloc[19] @@ -300,16 +304,16 @@ def idees_per_country(ct, year, base_dir): ct_totals["electricity light duty road freight"] = df.iloc[49] row = "Heavy duty vehicles (Diesel oil incl. biofuels)" - ct_totals["total heavy duty road freight"] = df[row] + ct_totals["total heavy duty road freight"] = df.loc[row] assert df.index[61] == "Passenger cars" ct_totals["passenger car efficiency"] = df.iloc[61] - df = pd.read_excel(fn_transport, "TrRail_ene", index_col=0)[year] + df = pd.read_excel(fn_transport, "TrRail_ene", index_col=0) - ct_totals["total rail"] = df["by fuel (EUROSTAT DATA)"] + ct_totals["total rail"] = df.loc["by fuel (EUROSTAT DATA)"] - ct_totals["electricity rail"] = df["Electricity"] + ct_totals["electricity rail"] = df.loc["Electricity"] assert df.index[15] == "Passenger transport" ct_totals["total rail passenger"] = df.iloc[15] @@ -325,7 +329,7 @@ def idees_per_country(ct, year, base_dir): assert df.index[23] == "Electric" ct_totals["electricity rail freight"] = df.iloc[23] - df = pd.read_excel(fn_transport, "TrAvia_ene", index_col=0)[year] + df = pd.read_excel(fn_transport, "TrAvia_ene", index_col=0) assert df.index[6] == "Passenger transport" ct_totals["total aviation passenger"] = df.iloc[6] @@ -356,24 +360,24 @@ def idees_per_country(ct, year, base_dir): + ct_totals["total international aviation passenger"] ) - df = pd.read_excel(fn_transport, "TrNavi_ene", index_col=0)[year] + df = pd.read_excel(fn_transport, "TrNavi_ene", index_col=0) # coastal and inland - ct_totals["total domestic navigation"] = df["by fuel (EUROSTAT DATA)"] + ct_totals["total domestic navigation"] = df.loc["by fuel (EUROSTAT DATA)"] - df = pd.read_excel(fn_transport, "TrRoad_act", index_col=0)[year] + df = pd.read_excel(fn_transport, "TrRoad_act", index_col=0) assert df.index[85] == "Passenger cars" ct_totals["passenger cars"] = df.iloc[85] - return pd.Series(ct_totals, name=ct) + return pd.DataFrame(ct_totals) -def build_idees(countries, year): +def build_idees(countries): nprocesses = snakemake.threads disable_progress = snakemake.config["run"].get("disable_progressbar", False) - func = partial(idees_per_country, year=year, base_dir=snakemake.input.idees) + func = partial(idees_per_country, base_dir=snakemake.input.idees) tqdm_kwargs = dict( ascii=False, unit=" country", @@ -385,60 +389,67 @@ def build_idees(countries, year): with mp.Pool(processes=nprocesses) as pool: totals_list = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) - totals = pd.concat(totals_list, axis=1) + totals = pd.concat( + totals_list, + keys=countries, + names=["country", "year"], + ) # convert ktoe to TWh - exclude = totals.index.str.fullmatch("passenger cars") - totals.loc[~exclude] *= 11.63 / 1e3 + exclude = totals.columns.str.fullmatch("passenger cars") + totals.loc[:, ~exclude] *= 11.63 / 1e3 # convert TWh/100km to kWh/km - totals.loc["passenger car efficiency"] *= 10 + totals.loc[:, "passenger car efficiency"] *= 10 - # district heating share - district_heat = totals.loc[ - ["derived heat residential", "derived heat services"] - ].sum() - total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() - totals.loc["district heat share"] = district_heat.div(total_heat) - - return totals.T + return totals def build_energy_totals(countries, eurostat, swiss, idees): eurostat_fuels = {"electricity": "Electricity", "total": "Total all products"} + eurostat_countries = eurostat.index.levels[0] + eurostat_years = eurostat.index.levels[1] to_drop = ["passenger cars", "passenger car efficiency"] - df = idees.reindex(countries).drop(to_drop, axis=1) + new_index = pd.MultiIndex.from_product( + [countries, eurostat_years], names=["country", "year"] + ) - eurostat_countries = eurostat.index.levels[0] - in_eurostat = df.index.intersection(eurostat_countries) + df = idees.reindex(new_index).drop(to_drop, axis=1) + + in_eurostat = df.index.levels[0].intersection(eurostat_countries) # add international navigation - slicer = idx[in_eurostat, :, "Bunkers", :] - fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum() + slicer = idx[in_eurostat, :, :, "Bunkers", :] + fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=[0, 1]).sum() df.loc[in_eurostat, "total international navigation"] = fill_values # add swiss energy data - df.loc["CH"] = swiss + df = pd.concat([df.drop("CH", errors="ignore"), swiss]).sort_index() # get values for missing countries based on Eurostat EnergyBalances # divide cooking/space/water according to averages in EU28 - missing = df.index[df["total residential"].isna()] - to_fill = missing.intersection(eurostat_countries) uses = ["space", "cooking", "water"] + to_fill = df.index[ + df["total residential"].isna() + & df.index.get_level_values("country").isin(eurostat_countries) + ] + c = to_fill.get_level_values("country") + y = to_fill.get_level_values("year") + for sector in ["residential", "services", "road", "rail"]: eurostat_sector = sector.capitalize() # fuel use for fuel in ["electricity", "total"]: - slicer = idx[to_fill, :, :, eurostat_sector] + slicer = idx[c, y, :, :, eurostat_sector] fill_values = ( - eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=0).sum() + eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=[0, 1]).sum() ) df.loc[to_fill, f"{fuel} {sector}"] = fill_values @@ -479,7 +490,7 @@ def build_energy_totals(countries, eurostat, swiss, idees): # The main heating source for about 73 per cent of the households is based on electricity # => 26% is non-electric - if "NO" in df: + if "NO" in df.index: elec_fraction = 0.73 no_norway = df.drop("NO") @@ -500,25 +511,27 @@ def build_energy_totals(countries, eurostat, swiss, idees): no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"] ) fraction = nonelectric_use.div(nonelectric).mean() - df.loc["NO", f"total {sector} {use}"] = total_heating * fraction + df.loc["NO", f"total {sector} {use}"] = ( + total_heating * fraction + ).values df.loc["NO", f"electricity {sector} {use}"] = ( total_heating * fraction * elec_fraction - ) + ).values # Missing aviation - slicer = idx[to_fill, :, :, "Domestic aviation"] - fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum() + slicer = idx[c, y, :, :, "Domestic aviation"] + fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=[0, 1]).sum() df.loc[to_fill, "total domestic aviation"] = fill_values - slicer = idx[to_fill, :, :, "International aviation"] - fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum() + slicer = idx[c, y, :, :, "International aviation"] + fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=[0, 1]).sum() df.loc[to_fill, "total international aviation"] = fill_values # missing domestic navigation - slicer = idx[to_fill, :, :, "Domestic Navigation"] - fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum() + slicer = idx[c, y, :, :, "Domestic Navigation"] + fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=[0, 1]).sum() df.loc[to_fill, "total domestic navigation"] = fill_values # split road traffic for non-IDEES @@ -571,22 +584,49 @@ def build_energy_totals(countries, eurostat, swiss, idees): if "BA" in df.index: # fill missing data for BA (services and road energy data) # proportional to RS with ratio of total residential demand - missing = df.loc["BA"] == 0.0 - ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] - df.loc["BA", missing] = ratio * df.loc["RS", missing] - - # Missing district heating share - dh_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] - ) - # make conservative assumption and take minimum from both data sets - df["district heat share"] = pd.concat( - [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 - ).min(axis=1) + mean_BA = df.loc["BA"].loc[2014:2021, "total residential"].mean() + mean_RS = df.loc["RS"].loc[2014:2021, "total residential"].mean() + ratio = mean_BA / mean_RS + df.loc["BA"] = df.loc["BA"].replace(0.0, np.nan).values + df.loc["BA"] = df.loc["BA"].combine_first(ratio * df.loc["RS"]).values return df +def build_district_heat_share(countries, idees): + # district heating share + district_heat = idees[["derived heat residential", "derived heat services"]].sum( + axis=1 + ) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum( + axis=1 + ) + + district_heat_share = district_heat / total_heat + + district_heat_share = district_heat_share.reindex(countries, level="country") + + # Missing district heating share + dh_share = ( + pd.read_csv(snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]) + .div(100) + .squeeze() + ) + # make conservative assumption and take minimum from both data sets + district_heat_share = pd.concat( + [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 + ).min(axis=1) + + district_heat_share.name = "district heat share" + + # restrict to available years + district_heat_share = ( + district_heat_share.unstack().dropna(how="all", axis=1).ffill(axis=1) + ) + + return district_heat_share + + def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 # downloaded 201228 (modified by EEA last on 201221) @@ -597,8 +637,6 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): index_col = ["Country_code", "Pollutant_name", "Year", "Sector_name"] df = df.set_index(index_col).sort_index() - emissions_scope = emissions_scope - cts = ["CH", "EUA", "NO"] + eu28_eea slicer = idx[cts, emissions_scope, year, to_ipcc.values()] @@ -641,8 +679,8 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): return emissions / 1e3 -def build_eurostat_co2(input_eurostat, countries, report_year, year=1990): - eurostat = build_eurostat(input_eurostat, countries, report_year, year) +def build_eurostat_co2(eurostat, year=1990): + eurostat_year = eurostat.xs(year, level="year") specific_emissions = pd.Series(index=eurostat.columns, dtype=float) @@ -656,7 +694,7 @@ def build_eurostat_co2(input_eurostat, countries, report_year, year=1990): # Residual oil (No. 6) 0.298 # https://www.eia.gov/electricity/annual/html/epa_a_03.html - return eurostat.multiply(specific_emissions).sum(axis=1) + return eurostat_year.multiply(specific_emissions).sum(axis=1) def build_co2_totals(countries, eea_co2, eurostat_co2): @@ -664,12 +702,7 @@ def build_co2_totals(countries, eea_co2, eurostat_co2): for ct in pd.Index(countries).intersection(["BA", "RS", "AL", "ME", "MK"]): mappings = { - "electricity": ( - ct, - "+", - "Conventional Thermal Power Stations", - "of which From Coal", - ), + "electricity": (ct, "+", "Electricity & heat generation", np.nan), "residential non-elec": (ct, "+", "+", "Residential"), "services non-elec": (ct, "+", "+", "Services"), "road non-elec": (ct, "+", "+", "Road"), @@ -677,12 +710,12 @@ def build_co2_totals(countries, eea_co2, eurostat_co2): "domestic navigation": (ct, "+", "+", "Domestic Navigation"), "international navigation": (ct, "-", "Bunkers"), "domestic aviation": (ct, "+", "+", "Domestic aviation"), - "international aviation": (ct, "+", "+", "International aviation"), + "international aviation": (ct, "-", "International aviation"), # does not include industrial process emissions or fuel processing/refining - "industrial non-elec": (ct, "+", "Industry"), + "industrial non-elec": (ct, "+", "Industry sector"), # does not include non-energy emissions "agriculture": (eurostat_co2.index.get_level_values(0) == ct) - & eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3), + & eurostat_co2.index.isin(["Agriculture & forestry", "Fishing"], level=3), } for i, mi in mappings.items(): @@ -692,15 +725,30 @@ def build_co2_totals(countries, eea_co2, eurostat_co2): def build_transport_data(countries, population, idees): - transport_data = pd.DataFrame(index=countries) + # first collect number of cars - # collect number of cars + transport_data = pd.DataFrame(idees["passenger cars"]) - transport_data["number cars"] = idees["passenger cars"] + countries_without_ch = set(countries) - {"CH"} + new_index = pd.MultiIndex.from_product( + [countries_without_ch, transport_data.index.levels[1]], + names=["country", "year"], + ) - # CH from http://ec.europa.eu/eurostat/statistics-explained/index.php/Passenger_cars_in_the_EU#Luxembourg_has_the_highest_number_of_passenger_cars_per_inhabitant + transport_data = transport_data.reindex(index=new_index) + + # https://www.bfs.admin.ch/bfs/en/home/statistics/mobility-transport/transport-infrastructure-vehicles/vehicles/road-vehicles-stock-level-motorisation.html if "CH" in countries: - transport_data.at["CH", "number cars"] = 4.136e6 + fn = snakemake.input.swiss_transport + swiss_cars = pd.read_csv(fn, index_col=0).loc[2000:2015, ["passenger cars"]] + + swiss_cars.index = pd.MultiIndex.from_product( + [["CH"], swiss_cars.index], names=["country", "year"] + ) + + transport_data = pd.concat([transport_data, swiss_cars]).sort_index() + + transport_data.rename(columns={"passenger cars": "number cars"}, inplace=True) missing = transport_data.index[transport_data["number cars"].isna()] if not missing.empty: @@ -709,7 +757,16 @@ def build_transport_data(countries, population, idees): ) cars_pp = transport_data["number cars"] / population - transport_data.loc[missing, "number cars"] = cars_pp.mean() * population + + fill_values = { + year: cars_pp.mean() * population for year in transport_data.index.levels[1] + } + fill_values = pd.DataFrame(fill_values).stack() + fill_values = pd.DataFrame(fill_values, columns=["number cars"]) + fill_values.index.names = ["country", "year"] + fill_values = fill_values.reindex(transport_data.index) + + transport_data = transport_data.combine_first(fill_values) # collect average fuel efficiency in kWh/km @@ -718,7 +775,7 @@ def build_transport_data(countries, population, idees): missing = transport_data.index[transport_data["average fuel efficiency"].isna()] if not missing.empty: logger.info( - f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data." + f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gaps with averaged data." ) fill_values = transport_data["average fuel efficiency"].mean() @@ -727,13 +784,173 @@ def build_transport_data(countries, population, idees): return transport_data +def rescale_idees_from_eurostat( + idees_countries, + energy, + eurostat, +): + """ + Takes JRC IDEES data from 2015 and rescales it by the ratio of the eurostat + data and the 2015 eurostat data. + + missing data: ['passenger car efficiency', 'passenger cars'] + """ + main_cols = ["Total all products", "Electricity"] + # read in the eurostat data for 2015 + eurostat_2015 = eurostat.xs(2015, level="year")[main_cols] + # calculate the ratio of the two data sets + ratio = eurostat[main_cols] / eurostat_2015 + ratio = ratio.droplevel([2, 5]) + cols_rename = {"Total all products": "total", "Electricity": "ele"} + index_rename = {v: k for k, v in idees_rename.items()} + ratio.rename(columns=cols_rename, index=index_rename, inplace=True) + + mappings = { + "Residential": { + "total": [ + "total residential space", + "total residential water", + "total residential cooking", + "total residential", + "derived heat residential", + "thermal uses residential", + ], + "elec": [ + "electricity residential space", + "electricity residential water", + "electricity residential cooking", + "electricity residential", + ], + }, + "Services": { + "total": [ + "total services space", + "total services water", + "total services cooking", + "total services", + "derived heat services", + "thermal uses services", + ], + "elec": [ + "electricity services space", + "electricity services water", + "electricity services cooking", + "electricity services", + ], + }, + "Agriculture & forestry": { + "total": [ + "total agriculture heat", + "total agriculture machinery", + "total agriculture", + ], + "elec": [ + "total agriculture electricity", + ], + }, + "Road": { + "total": [ + "total road", + "total passenger cars", + "total other road passenger", + "total light duty road freight", + ], + "elec": [ + "electricity road", + "electricity passenger cars", + "electricity other road passenger", + "electricity light duty road freight", + ], + }, + "Rail": { + "total": [ + "total rail", + "total rail passenger", + "total rail freight", + ], + "elec": [ + "electricity rail", + "electricity rail passenger", + "electricity rail freight", + ], + }, + } + + avia_inter = [ + "total aviation passenger", + "total aviation freight", + "total international aviation passenger", + "total international aviation freight", + "total international aviation", + ] + avia_domestic = [ + "total domestic aviation passenger", + "total domestic aviation freight", + "total domestic aviation", + ] + navigation = [ + "total domestic navigation", + ] + + for country in idees_countries: + filling_years = [(2015, slice(2016, 2021)), (2000, slice(1990, 1999))] + + for source_year, target_years in filling_years: + + slicer_source = idx[country, source_year, :, :] + slicer_target = idx[country, target_years, :, :] + + for sector, mapping in mappings.items(): + sector_ratio = ratio.loc[ + (country, slice(None), slice(None), sector) + ].droplevel("lvl2") + + energy.loc[slicer_target, mapping["total"]] = cartesian( + sector_ratio.loc[target_years, "total"], + energy.loc[slicer_source, mapping["total"]].squeeze(axis=0), + ).values + energy.loc[slicer_target, mapping["elec"]] = cartesian( + sector_ratio.loc[target_years, "ele"], + energy.loc[slicer_source, mapping["elec"]].squeeze(axis=0), + ).values + + level_drops = ["country", "lvl2", "lvl3"] + + slicer = idx[country, :, :, "Domestic aviation"] + avi_d = ratio.loc[slicer, "total"].droplevel(level_drops) + + slicer = idx[country, :, :, "International aviation"] + avi_i = ratio.loc[slicer, "total"].droplevel(level_drops) + + slicer = idx[country, :, :, "Domestic Navigation"] + nav = ratio.loc[slicer, "total"].droplevel(level_drops) + + energy.loc[slicer_target, avia_inter] = cartesian( + avi_i.loc[target_years], + energy.loc[slicer_source, avia_inter].squeeze(axis=0), + ).values + + energy.loc[slicer_target, avia_domestic] = cartesian( + avi_d.loc[target_years], + energy.loc[slicer_source, avia_domestic].squeeze(axis=0), + ).values + + energy.loc[slicer_target, navigation] = cartesian( + nav.loc[target_years], + energy.loc[slicer_source, navigation].squeeze(axis=0), + ).values + + return energy + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake("build_energy_totals") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) params = snakemake.params.energy @@ -743,22 +960,34 @@ if __name__ == "__main__": countries = snakemake.params.countries idees_countries = pd.Index(countries).intersection(eu28) - data_year = params["energy_totals_year"] - report_year = snakemake.params.energy["eurostat_report_year"] input_eurostat = snakemake.input.eurostat - eurostat = build_eurostat(input_eurostat, countries, report_year, data_year) - swiss = build_swiss(data_year) - idees = build_idees(idees_countries, data_year) + eurostat = build_eurostat( + input_eurostat, + countries, + nprocesses=snakemake.threads, + disable_progressbar=snakemake.config["run"].get("disable_progressbar", False), + ) + swiss = build_swiss() + idees = build_idees(idees_countries) energy = build_energy_totals(countries, eurostat, swiss, idees) + + # Data from IDEES only exists from 2000-2015. + logger.info("Extrapolate IDEES data based on eurostat for years 2015-2021.") + energy = rescale_idees_from_eurostat(idees_countries, energy, eurostat) + energy.to_csv(snakemake.output.energy_name) + # use rescaled idees data to calculate district heat share + district_heat_share = build_district_heat_share( + countries, energy.loc[idees_countries] + ) + district_heat_share.to_csv(snakemake.output.district_heat_share) + base_year_emissions = params["base_emissions_year"] emissions_scope = snakemake.params.energy["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) - eurostat_co2 = build_eurostat_co2( - input_eurostat, countries, report_year, base_year_emissions - ) + eurostat_co2 = build_eurostat_co2(eurostat, base_year_emissions) co2 = build_co2_totals(countries, eea_co2, eurostat_co2) co2.to_csv(snakemake.output.co2_name) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py new file mode 100644 index 00000000..eb2361c2 --- /dev/null +++ b/scripts/build_existing_heating_distribution.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Builds table of existing heat generation capacities for initial planning +horizon. +""" +import country_converter as coco +import numpy as np +import pandas as pd +from _helpers import set_scenario_config + +cc = coco.CountryConverter() + + +def build_existing_heating(): + # retrieve existing heating capacities + + # Add existing heating capacities, data comes from the study + # "Mapping and analyses of the current and future (2020 - 2030) + # heating/cooling fuel deployment (fossil/renewables) " + # https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en + # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". + # data is for buildings only (i.e. NOT district heating) and represents the year 2012 + # TODO start from original file + + existing_heating = pd.read_csv( + snakemake.input.existing_heating, index_col=0, header=0 + ) + + # data for Albania, Montenegro and Macedonia not included in database + existing_heating.loc["Albania"] = np.nan + existing_heating.loc["Montenegro"] = np.nan + existing_heating.loc["Macedonia"] = np.nan + + existing_heating.fillna(0.0, inplace=True) + + # convert GW to MW + existing_heating *= 1e3 + + existing_heating.index = cc.convert(existing_heating.index, to="iso2") + + # coal and oil boilers are assimilated to oil boilers + existing_heating["oil boiler"] = ( + existing_heating["oil boiler"] + existing_heating["coal boiler"] + ) + existing_heating.drop(["coal boiler"], axis=1, inplace=True) + + # distribute technologies to nodes by population + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + nodal_heating = existing_heating.loc[pop_layout.ct] + nodal_heating.index = pop_layout.index + nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] + + energy_layout = pd.read_csv( + snakemake.input.clustered_pop_energy_layout, index_col=0 + ) + + uses = ["space", "water"] + sectors = ["residential", "services"] + + nodal_sectoral_totals = pd.DataFrame(dtype=float) + + for sector in sectors: + nodal_sectoral_totals[sector] = energy_layout[ + [f"total {sector} {use}" for use in uses] + ].sum(axis=1) + + nodal_sectoral_fraction = nodal_sectoral_totals.div( + nodal_sectoral_totals.sum(axis=1), axis=0 + ) + + nodal_heat_name_fraction = pd.DataFrame(index=district_heat_info.index, dtype=float) + + nodal_heat_name_fraction["urban central"] = 0.0 + + for sector in sectors: + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[ + sector + ] * (1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = ( + nodal_sectoral_fraction[sector] * urban_fraction + ) + + nodal_heat_name_tech = pd.concat( + { + name: nodal_heating.multiply(nodal_heat_name_fraction[name], axis=0) + for name in nodal_heat_name_fraction.columns + }, + axis=1, + names=["heat name", "technology"], + ) + + # move all ground HPs to rural, all air to urban + + for sector in sectors: + nodal_heat_name_tech[(f"{sector} rural", "ground heat pump")] += ( + nodal_heat_name_tech[("urban central", "ground heat pump")] + * nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] + ) + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 + + nodal_heat_name_tech[ + (f"{sector} urban decentral", "air heat pump") + ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 + + nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 + + nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_existing_heating_distribution", + simpl="", + clusters=48, + planning_horizons=2050, + ) + set_scenario_config(snakemake) + + build_existing_heating() diff --git a/scripts/build_gas_input_locations.py b/scripts/build_gas_input_locations.py index 9ad3760d..67dbc986 100644 --- a/scripts/build_gas_input_locations.py +++ b/scripts/build_gas_input_locations.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,12 +9,13 @@ production sites with data from SciGRID_gas and Global Energy Monitor. import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd +from _helpers import configure_logging, set_scenario_config from cluster_gas_network import load_bus_regions +logger = logging.getLogger(__name__) + def read_scigrid_gas(fn): df = gpd.read_file(fn) @@ -24,11 +25,14 @@ def read_scigrid_gas(fn): def build_gem_lng_data(fn): - df = pd.read_excel(fn[0], sheet_name="LNG terminals - data") + df = pd.read_excel(fn, sheet_name="LNG terminals - data") df = df.set_index("ComboID") - remove_country = ["Cyprus", "Turkey"] - remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"] + remove_country = ["Cyprus", "Turkey"] # noqa: F841 + remove_terminal = [ # noqa: F841 + "Puerto de la Luz LNG Terminal", + "Gran Canaria LNG Terminal", + ] df = df.query( "Status != 'Cancelled' \ @@ -42,11 +46,11 @@ def build_gem_lng_data(fn): def build_gem_prod_data(fn): - df = pd.read_excel(fn[0], sheet_name="Gas extraction - main") + df = pd.read_excel(fn, sheet_name="Gas extraction - main") df = df.set_index("GEM Unit ID") - remove_country = ["Cyprus", "Türkiye"] - remove_fuel_type = ["oil"] + remove_country = ["Cyprus", "Türkiye"] # noqa: F841 + remove_fuel_type = ["oil"] # noqa: F841 df = df.query( "Status != 'shut in' \ @@ -56,7 +60,7 @@ def build_gem_prod_data(fn): & ~Longitude.isna()" ).copy() - p = pd.read_excel(fn[0], sheet_name="Gas extraction - production") + p = pd.read_excel(fn, sheet_name="Gas extraction - production") p = p.set_index("GEM Unit ID") p = p[p["Fuel description"] == "gas"] @@ -96,8 +100,8 @@ def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries): ] sto = read_scigrid_gas(sto_fn) - remove_country = ["RU", "UA", "TR", "BY"] - sto = sto.query("country_code != @remove_country") + remove_country = ["RU", "UA", "TR", "BY"] # noqa: F841 + sto = sto.query("country_code not in @remove_country") # production sites inside the model scope prod = build_gem_prod_data(gem_fn) @@ -131,7 +135,8 @@ if __name__ == "__main__": clusters="128", ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) regions = load_bus_regions( snakemake.input.regions_onshore, snakemake.input.regions_offshore diff --git a/scripts/build_gas_network.py b/scripts/build_gas_network.py index 92e686cd..5e9a5c9a 100644 --- a/scripts/build_gas_network.py +++ b/scripts/build_gas_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,13 +9,14 @@ Preprocess gas network based on data from bthe SciGRID_gas project import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd +from _helpers import configure_logging, set_scenario_config from pypsa.geo import haversine_pts from shapely.geometry import Point +logger = logging.getLogger(__name__) + def diameter_to_capacity(pipe_diameter_mm): """ @@ -114,12 +115,10 @@ def prepare_dataset( df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity) ratio = df.p_nom / df.p_nom_diameter not_nordstream = df.max_pressure_bar < 220 - df.p_nom.update( - df.p_nom_diameter.where( - (df.p_nom <= 500) - | ((ratio > correction_threshold_p_nom) & not_nordstream) - | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) - ) + df["p_nom"] = df.p_nom_diameter.where( + (df.p_nom <= 500) + | ((ratio > correction_threshold_p_nom) & not_nordstream) + | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) ) # lines which have way too discrepant line lengths @@ -130,12 +129,10 @@ def prepare_dataset( axis=1, ) ratio = df.eval("length / length_haversine") - df["length"].update( - df.length_haversine.where( - (df["length"] < 20) - | (ratio > correction_threshold_length) - | (ratio < 1 / correction_threshold_length) - ) + df["length"] = df.length_haversine.where( + (df["length"] < 20) + | (ratio > correction_threshold_length) + | (ratio < 1 / correction_threshold_length) ) return df @@ -147,7 +144,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_gas_network") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) gas_network = load_dataset(snakemake.input.gas_network) diff --git a/scripts/build_heat_totals.py b/scripts/build_heat_totals.py new file mode 100644 index 00000000..9bee63e5 --- /dev/null +++ b/scripts/build_heat_totals.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Approximate heat demand for all weather years. +""" + +from itertools import product + +import pandas as pd +from numpy.polynomial import Polynomial + +idx = pd.IndexSlice + + +def approximate_heat_demand(energy_totals, hdd): + + countries = hdd.columns.intersection(energy_totals.index.levels[0]) + + demands = {} + + for kind, sector in product(["total", "electricity"], ["services", "residential"]): + # reduced number years (2007-2021) for regression because it implicitly + # assumes a constant building stock + row = idx[:, 2007:2021] + col = f"{kind} {sector} space" + demand = energy_totals.loc[row, col].unstack(0) + + # ffill for GB in 2020- and bfill for CH 2007-2009 + # compromise to have more years available for the fit + demand = demand.ffill(axis=0).bfill(axis=0) + + demand_approx = {} + + for c in countries: + Y = demand[c].dropna() + X = hdd.loc[Y.index, c] + + # Sometimes (looking at you, Switzerland) we only have + # _one_ year of heating data to base the prediction on. In + # this case we add a point at 0, 0 to make a "polynomial" + # fit work. + if len(X) == len(Y) == 1: + X.loc[-1] = 0 + Y.loc[-1] = 0 + + to_predict = hdd.index.difference(Y.index) + X_pred = hdd.loc[to_predict, c] + + p = Polynomial.fit(X, Y, 1) + Y_pred = p(X_pred) + + demand_approx[c] = pd.Series(Y_pred, index=to_predict) + + demand_approx = pd.DataFrame(demand_approx) + demand_approx = pd.concat([demand, demand_approx]).sort_index() + demands[f"{kind} {sector} space"] = demand_approx.groupby( + demand_approx.index + ).sum() + + demands = pd.concat(demands).unstack().T.clip(lower=0) + demands.index.names = ["country", "year"] + + return demands + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("build_heat_totals") + + hdd = pd.read_csv(snakemake.input.hdd, index_col=0).T + hdd.index = hdd.index.astype(int) + + energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=[0, 1]) + + heat_demand = approximate_heat_demand(energy_totals, hdd) + + heat_demand.to_csv(snakemake.output.heat_totals) diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py new file mode 100644 index 00000000..1fb4f5a4 --- /dev/null +++ b/scripts/build_hourly_heat_demand.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build hourly heat demand time series from daily ones. +""" + +from itertools import product + +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles, get_snapshots, set_scenario_config + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_hourly_heat_demands", + scope="total", + simpl="", + clusters=48, + ) + set_scenario_config(snakemake) + + snapshots = get_snapshots( + snakemake.params.snapshots, snakemake.params.drop_leap_day + ) + + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand) + .to_pandas() + .reindex(index=snapshots, method="ffill") + ) + + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + + sectors = ["residential", "services"] + uses = ["water", "space"] + + heat_demand = {} + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile, + ) + + if use == "space": + heat_demand[f"{sector} {use}"] = ( + daily_space_heat_demand * intraday_year_profile + ) + else: + heat_demand[f"{sector} {use}"] = intraday_year_profile + + heat_demand = pd.concat(heat_demand, axis=1, names=["sector use", "node"]) + + heat_demand.index.name = "snapshots" + + ds = heat_demand.stack().to_xarray() + + ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 65cc22b7..6a0315c7 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -65,17 +65,17 @@ import atlite import country_converter as coco import geopandas as gpd import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, get_snapshots, set_scenario_config +from numpy.polynomial import Polynomial cc = coco.CountryConverter() -def get_eia_annual_hydro_generation(fn, countries): +def get_eia_annual_hydro_generation(fn, countries, capacities=False): # in billion kWh/a = TWh/a - df = pd.read_csv( - fn, skiprows=2, index_col=1, na_values=[" ", "--"], decimal="," - ).iloc[1:, 1:] + df = pd.read_csv(fn, skiprows=2, index_col=1, na_values=[" ", "--"]).iloc[1:, 1:] df.index = df.index.str.strip() + df.columns = df.columns.astype(int) former_countries = { "Former Czechoslovakia": dict( @@ -99,7 +99,7 @@ def get_eia_annual_hydro_generation(fn, countries): } for k, v in former_countries.items(): - period = [str(i) for i in range(v["start"], v["end"] + 1)] + period = [i for i in range(v["start"], v["end"] + 1)] ratio = df.loc[v["countries"]].T.dropna().sum() ratio /= ratio.sum() for country in v["countries"]: @@ -118,11 +118,52 @@ def get_eia_annual_hydro_generation(fn, countries): df.index = cc.convert(df.index, to="iso2") df.index.name = "countries" - df = df.T[countries] * 1e6 # in MWh/a + # convert to MW of MWh/a + factor = 1e3 if capacities else 1e6 + df = df.T[countries] * factor return df +def correct_eia_stats_by_capacity(eia_stats, fn, countries, baseyear=2019): + cap = get_eia_annual_hydro_generation(fn, countries, capacities=True) + ratio = cap / cap.loc[baseyear] + eia_stats_corrected = eia_stats / ratio + to_keep = ["AL", "AT", "CH", "DE", "GB", "NL", "RS", "RO", "SK"] + to_correct = eia_stats_corrected.columns.difference(to_keep) + eia_stats.loc[:, to_correct] = eia_stats_corrected.loc[:, to_correct] + + +def approximate_missing_eia_stats(eia_stats, runoff_fn, countries): + runoff = pd.read_csv(runoff_fn, index_col=0).T[countries] + runoff.index = runoff.index.astype(int) + + # fix outliers; exceptional floods in 1977-1979 in ES & PT + if "ES" in runoff: + runoff.loc[1978, "ES"] = runoff.loc[1979, "ES"] + if "PT" in runoff: + runoff.loc[1978, "PT"] = runoff.loc[1979, "PT"] + + runoff_eia = runoff.loc[eia_stats.index] + + eia_stats_approximated = {} + + for c in countries: + X = runoff_eia[c] + Y = eia_stats[c] + + to_predict = runoff.index.difference(eia_stats.index) + X_pred = runoff.loc[to_predict, c] + + p = Polynomial.fit(X, Y, 1) + Y_pred = p(X_pred) + + eia_stats_approximated[c] = pd.Series(Y_pred, index=to_predict) + + eia_stats_approximated = pd.DataFrame(eia_stats_approximated) + return pd.concat([eia_stats, eia_stats_approximated]).sort_index() + + logger = logging.getLogger(__name__) if __name__ == "__main__": @@ -131,9 +172,13 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_hydro_profile") configure_logging(snakemake) + set_scenario_config(snakemake) params_hydro = snakemake.params.hydro - cutout = atlite.Cutout(snakemake.input.cutout) + + time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) + + cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) countries = snakemake.params.countries country_shapes = ( @@ -146,6 +191,24 @@ if __name__ == "__main__": fn = snakemake.input.eia_hydro_generation eia_stats = get_eia_annual_hydro_generation(fn, countries) + config_hydro = snakemake.config["renewable"]["hydro"] + + if config_hydro.get("eia_correct_by_capacity"): + fn = snakemake.input.eia_hydro_capacity + correct_eia_stats_by_capacity(eia_stats, fn, countries) + + if config_hydro.get("eia_approximate_missing"): + fn = snakemake.input.era5_runoff + eia_stats = approximate_missing_eia_stats(eia_stats, fn, countries) + + contained_years = pd.date_range(freq="YE", **snakemake.params.snapshots).year + norm_year = config_hydro.get("eia_norm_year") + missing_years = contained_years.difference(eia_stats.index) + if norm_year: + eia_stats.loc[contained_years] = eia_stats.loc[norm_year] + elif missing_years.any(): + eia_stats.loc[missing_years] = eia_stats.median() + inflow = cutout.runoff( shapes=country_shapes, smooth=True, @@ -156,4 +219,4 @@ if __name__ == "__main__": if "clip_min_inflow" in params_hydro: inflow = inflow.where(inflow > params_hydro["clip_min_inflow"], 0) - inflow.to_netcdf(snakemake.output[0]) + inflow.to_netcdf(snakemake.output.profile) diff --git a/scripts/build_industrial_distribution_key.py b/scripts/build_industrial_distribution_key.py index e6d515b0..7cba0af5 100644 --- a/scripts/build_industrial_distribution_key.py +++ b/scripts/build_industrial_distribution_key.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,17 +7,15 @@ Build spatial distribution of industries from Hotmaps database. """ import logging - -logger = logging.getLogger(__name__) - import uuid from itertools import product import country_converter as coco import geopandas as gpd import pandas as pd -from packaging.version import Version, parse +from _helpers import configure_logging, set_scenario_config +logger = logging.getLogger(__name__) cc = coco.CountryConverter() @@ -32,7 +30,7 @@ def locate_missing_industrial_sites(df): try: from geopy.extra.rate_limiter import RateLimiter from geopy.geocoders import Nominatim - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'geopy' not found." "Install via 'conda install -c conda-forge geopy'" @@ -86,12 +84,7 @@ def prepare_hotmaps_database(regions): gdf = gpd.GeoDataFrame(df, geometry="coordinates", crs="EPSG:4326") - kws = ( - dict(op="within") - if parse(gpd.__version__) < Version("0.10") - else dict(predicate="within") - ) - gdf = gpd.sjoin(gdf, regions, how="inner", **kws) + gdf = gpd.sjoin(gdf, regions, how="inner", predicate="within") gdf.rename(columns={"index_right": "bus"}, inplace=True) gdf["country"] = gdf.bus.str[:2] @@ -101,7 +94,7 @@ def prepare_hotmaps_database(regions): # get all duplicated entries duplicated_i = gdf.index[gdf.index.duplicated()] # convert from raw data country name to iso-2-code - code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") + code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") # noqa: F841 # screen out malformed country allocation gdf_filtered = gdf.loc[duplicated_i].query("country == @code") # concat not duplicated and filtered gdf @@ -156,8 +149,8 @@ if __name__ == "__main__": simpl="", clusters=128, ) - - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) countries = snakemake.params.countries diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index d1c672f1..8129177a 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ from functools import partial import country_converter as coco import pandas as pd +from _helpers import set_scenario_config from tqdm import tqdm cc = coco.CountryConverter() @@ -73,7 +74,7 @@ def industrial_energy_demand_per_country(country, year, jrc_dir): def get_subsector_data(sheet): df = df_dict[sheet][year].groupby(fuels).sum() - df["ammonia"] = 0.0 + df["hydrogen"] = 0.0 df["other"] = df["all"] - df.loc[df.index != "all"].sum() @@ -94,51 +95,50 @@ def industrial_energy_demand_per_country(country, year, jrc_dir): return df -def add_ammonia_energy_demand(demand): - # MtNH3/a - fn = snakemake.input.ammonia_production - ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3 +def separate_basic_chemicals(demand, production): - def get_ammonia_by_fuel(x): - fuels = { - "gas": params["MWh_CH4_per_tNH3_SMR"], - "electricity": params["MWh_elec_per_tNH3_SMR"], + ammonia = pd.DataFrame( + { + "hydrogen": production["Ammonia"] * params["MWh_H2_per_tNH3_electrolysis"], + "electricity": production["Ammonia"] + * params["MWh_elec_per_tNH3_electrolysis"], } - - return pd.Series({k: x * v for k, v in fuels.items()}) - - ammonia_by_fuel = ammonia.apply(get_ammonia_by_fuel).T - ammonia_by_fuel = ammonia_by_fuel.unstack().reindex( - index=demand.index, fill_value=0.0 - ) - - ammonia = pd.DataFrame({"ammonia": ammonia * params["MWh_NH3_per_tNH3"]}).T + ).T + chlorine = pd.DataFrame( + { + "hydrogen": production["Chlorine"] * params["MWh_H2_per_tCl"], + "electricity": production["Chlorine"] * params["MWh_elec_per_tCl"], + } + ).T + methanol = pd.DataFrame( + { + "gas": production["Methanol"] * params["MWh_CH4_per_tMeOH"], + "electricity": production["Methanol"] * params["MWh_elec_per_tMeOH"], + } + ).T demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0) + demand["Chlorine"] = chlorine.unstack().reindex(index=demand.index, fill_value=0.0) + demand["Methanol"] = methanol.unstack().reindex(index=demand.index, fill_value=0.0) - demand["Basic chemicals (without ammonia)"] = ( - demand["Basic chemicals"] - ammonia_by_fuel + demand["HVC"] = ( + demand["Basic chemicals"] + - demand["Ammonia"] + - demand["Methanol"] + - demand["Chlorine"] ) - demand["Basic chemicals (without ammonia)"].clip(lower=0, inplace=True) - demand.drop(columns="Basic chemicals", inplace=True) + demand["HVC"].clip(lower=0, inplace=True) + return demand -def add_non_eu28_industrial_energy_demand(countries, demand): +def add_non_eu28_industrial_energy_demand(countries, demand, production): non_eu28 = countries.difference(eu28) if non_eu28.empty: return demand - # output in MtMaterial/a - fn = snakemake.input.industrial_production_per_country - production = pd.read_csv(fn, index_col=0) / 1e3 - - # recombine HVC, Chlorine and Methanol to Basic chemicals (without ammonia) - chemicals = ["HVC", "Chlorine", "Methanol"] - production["Basic chemicals (without ammonia)"] = production[chemicals].sum(axis=1) - production.drop(columns=chemicals, inplace=True) eu28_production = production.loc[countries.intersection(eu28)].sum() eu28_energy = demand.groupby(level=1).sum() @@ -175,6 +175,7 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today") + set_scenario_config(snakemake) params = snakemake.params.industry year = params.get("reference_year", 2015) @@ -182,9 +183,15 @@ if __name__ == "__main__": demand = industrial_energy_demand(countries.intersection(eu28), year) - demand = add_ammonia_energy_demand(demand) + # output in MtMaterial/a + production = ( + pd.read_csv(snakemake.input.industrial_production_per_country, index_col=0) + / 1e3 + ) - demand = add_non_eu28_industrial_energy_demand(countries, demand) + demand = separate_basic_chemicals(demand, production) + + demand = add_non_eu28_industrial_energy_demand(countries, demand, production) # for format compatibility demand = demand.stack(dropna=False).unstack(level=[0, 2]) diff --git a/scripts/build_industrial_energy_demand_per_node.py b/scripts/build_industrial_energy_demand_per_node.py index 55c10c5d..ce72ea7a 100644 --- a/scripts/build_industrial_energy_demand_per_node.py +++ b/scripts/build_industrial_energy_demand_per_node.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,6 +7,7 @@ Build industrial energy demand per model region. """ import pandas as pd +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): @@ -18,24 +19,33 @@ if __name__ == "__main__": clusters=48, planning_horizons=2030, ) + set_scenario_config(snakemake) - # import EU ratios df as csv + # import ratios fn = snakemake.input.industry_sector_ratios - industry_sector_ratios = pd.read_csv(fn, index_col=0) + sector_ratios = pd.read_csv(fn, header=[0, 1], index_col=0) - # material demand per node and industry (kton/a) + # material demand per node and industry (Mton/a) fn = snakemake.input.industrial_production_per_node - nodal_production = pd.read_csv(fn, index_col=0) + nodal_production = pd.read_csv(fn, index_col=0) / 1e3 # energy demand today to get current electricity fn = snakemake.input.industrial_energy_demand_per_node_today nodal_today = pd.read_csv(fn, index_col=0) - # final energy consumption per node and industry (TWh/a) - nodal_df = nodal_production.dot(industry_sector_ratios.T) + nodal_sector_ratios = pd.concat( + {node: sector_ratios[node[:2]] for node in nodal_production.index}, axis=1 + ) - # convert GWh to TWh and ktCO2 to MtCO2 - nodal_df *= 0.001 + nodal_production_stacked = nodal_production.stack() + nodal_production_stacked.index.names = [None, None] + + # final energy consumption per node and industry (TWh/a) + nodal_df = ( + (nodal_sector_ratios.multiply(nodal_production_stacked)) + .T.groupby(level=0) + .sum() + ) rename_sectors = { "elec": "electricity", diff --git a/scripts/build_industrial_energy_demand_per_node_today.py b/scripts/build_industrial_energy_demand_per_node_today.py index d845e704..8b2b70a0 100644 --- a/scripts/build_industrial_energy_demand_per_node_today.py +++ b/scripts/build_industrial_energy_demand_per_node_today.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -10,6 +10,7 @@ from itertools import product import numpy as np import pandas as pd +from _helpers import set_scenario_config # map JRC/our sectors to hotmaps sector, where mapping exist sector_mapping = { @@ -75,5 +76,6 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) build_nodal_industrial_energy_demand() diff --git a/scripts/build_industrial_production_per_country.py b/scripts/build_industrial_production_per_country.py index 74cb1949..5c14b065 100644 --- a/scripts/build_industrial_production_per_country.py +++ b/scripts/build_industrial_production_per_country.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,18 +7,16 @@ Build industrial production per country. """ import logging -from functools import partial - -logger = logging.getLogger(__name__) - import multiprocessing as mp +from functools import partial import country_converter as coco import numpy as np import pandas as pd -from _helpers import mute_print +from _helpers import configure_logging, mute_print, set_scenario_config from tqdm import tqdm +logger = logging.getLogger(__name__) cc = coco.CountryConverter() tj_to_ktoe = 0.0238845 @@ -99,33 +97,18 @@ fields = { "Other Industrial Sectors": "Physical output (index)", } -eb_names = { - "NO": "Norway", - "AL": "Albania", - "BA": "Bosnia and Herzegovina", - "MK": "FYR of Macedonia", - "GE": "Georgia", - "IS": "Iceland", - "KO": "Kosovo", - "MD": "Moldova", - "ME": "Montenegro", - "RS": "Serbia", - "UA": "Ukraine", - "TR": "Turkey", -} - eb_sectors = { - "Iron & steel industry": "Iron and steel", - "Chemical and Petrochemical industry": "Chemicals Industry", - "Non-ferrous metal industry": "Non-metallic mineral products", - "Paper, Pulp and Print": "Pulp, paper and printing", - "Food and Tabacco": "Food, beverages and tobacco", - "Non-metallic Minerals (Glass, pottery & building mat. Industry)": "Non Ferrous Metals", - "Transport Equipment": "Transport Equipment", + "Iron & steel": "Iron and steel", + "Chemical & petrochemical": "Chemicals Industry", + "Non-ferrous metals": "Non-metallic mineral products", + "Paper, pulp & printing": "Pulp, paper and printing", + "Food, beverages & tobacco": "Food, beverages and tobacco", + "Non-metallic minerals": "Non Ferrous Metals", + "Transport equipment": "Transport Equipment", "Machinery": "Machinery Equipment", - "Textile and Leather": "Textiles and leather", - "Wood and Wood Products": "Wood and wood products", - "Non-specified (Industry)": "Other Industrial Sectors", + "Textile & leather": "Textiles and leather", + "Wood & wood products": "Wood and wood products", + "Not elsewhere specified (industry)": "Other Industrial Sectors", } # TODO: this should go in a csv in `data` @@ -162,12 +145,15 @@ def get_energy_ratio(country, eurostat_dir, jrc_dir, year): e_country = e_switzerland * tj_to_ktoe else: # estimate physical output, energy consumption in the sector and country - fn = f"{eurostat_dir}/{eb_names[country]}.XLSX" - with mute_print(): - df = pd.read_excel( - fn, sheet_name="2016", index_col=2, header=0, skiprows=1 - ).squeeze("columns") - e_country = df.loc[eb_sectors.keys(), "Total all products"].rename(eb_sectors) + fn = f"{eurostat_dir}/{country}-Energy-balance-sheets-April-2023-edition.xlsb" + df = pd.read_excel( + fn, + sheet_name=str(min(2021, year)), + index_col=2, + header=0, + skiprows=4, + ) + e_country = df.loc[eb_sectors.keys(), "Total"].rename(eb_sectors) fn = f"{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx" @@ -263,7 +249,11 @@ def separate_basic_chemicals(demand, year): demand["Basic chemicals"].clip(lower=0.0, inplace=True) # assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals - distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum() + distribution_key = ( + demand["Basic chemicals"] + / params["basic_chemicals_without_NH3_production_today"] + / 1e3 + ) demand["HVC"] = params["HVC_production_today"] * 1e3 * distribution_key demand["Chlorine"] = params["chlorine_production_today"] * 1e3 * distribution_key demand["Methanol"] = params["methanol_production_today"] * 1e3 * distribution_key @@ -276,8 +266,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("build_industrial_production_per_country") - - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) countries = snakemake.params.countries diff --git a/scripts/build_industrial_production_per_country_tomorrow.py b/scripts/build_industrial_production_per_country_tomorrow.py index ffed5195..a8b6c312 100644 --- a/scripts/build_industrial_production_per_country_tomorrow.py +++ b/scripts/build_industrial_production_per_country_tomorrow.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,6 +7,7 @@ Build future industrial production per country. """ import pandas as pd +from _helpers import set_scenario_config from prepare_sector_network import get if __name__ == "__main__": @@ -14,6 +15,7 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow") + set_scenario_config(snakemake) params = snakemake.params.industry diff --git a/scripts/build_industrial_production_per_node.py b/scripts/build_industrial_production_per_node.py index 7b69948a..1eeecbae 100644 --- a/scripts/build_industrial_production_per_node.py +++ b/scripts/build_industrial_production_per_node.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,6 +9,7 @@ Build industrial production per model region. from itertools import product import pandas as pd +from _helpers import set_scenario_config # map JRC/our sectors to hotmaps sector, where mapping exist sector_mapping = { @@ -72,5 +73,6 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) build_nodal_industrial_production() diff --git a/scripts/build_industry_sector_ratios.py b/scripts/build_industry_sector_ratios.py index 45705002..52e83f60 100644 --- a/scripts/build_industry_sector_ratios.py +++ b/scripts/build_industry_sector_ratios.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,7 +7,7 @@ Build specific energy consumption by carrier and industries. """ import pandas as pd -from _helpers import mute_print +from _helpers import mute_print, set_scenario_config # GWh/ktoe OR MWh/toe toe_to_MWh = 11.630 @@ -303,7 +303,7 @@ def chemicals_industry(): # There are Solids, Refinery gas, LPG, Diesel oil, Residual fuel oil, # Other liquids, Naphtha, Natural gas for feedstock. # Naphta represents 47%, methane 17%. LPG (18%) solids, refinery gas, - # diesel oil, residual fuel oils and other liquids are asimilated to Naphtha + # diesel oil, residual fuel oils and other liquids are assimilated to Naphtha s_fec = idees["fec"][13:22] assert s_fec.index[0] == subsector @@ -408,15 +408,15 @@ def chemicals_industry(): df.loc["methane", sector] -= ammonia_total * params["MWh_CH4_per_tNH3_SMR"] df.loc["elec", sector] -= ammonia_total * params["MWh_elec_per_tNH3_SMR"] - # subtract chlorine demand + # subtract chlorine demand (in MtCl/a) chlorine_total = params["chlorine_production_today"] - df.loc["hydrogen", sector] -= chlorine_total * params["MWh_H2_per_tCl"] - df.loc["elec", sector] -= chlorine_total * params["MWh_elec_per_tCl"] + df.loc["hydrogen", sector] -= chlorine_total * params["MWh_H2_per_tCl"] * 1e3 + df.loc["elec", sector] -= chlorine_total * params["MWh_elec_per_tCl"] * 1e3 - # subtract methanol demand + # subtract methanol demand (in MtMeOH/a) methanol_total = params["methanol_production_today"] - df.loc["methane", sector] -= methanol_total * params["MWh_CH4_per_tMeOH"] - df.loc["elec", sector] -= methanol_total * params["MWh_elec_per_tMeOH"] + df.loc["methane", sector] -= methanol_total * params["MWh_CH4_per_tMeOH"] * 1e3 + df.loc["elec", sector] -= methanol_total * params["MWh_elec_per_tMeOH"] * 1e3 # MWh/t material df.loc[sources, sector] = df.loc[sources, sector] / s_out @@ -1464,6 +1464,7 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("build_industry_sector_ratios") + set_scenario_config(snakemake) # TODO make params option year = 2015 diff --git a/scripts/build_industry_sector_ratios_intermediate.py b/scripts/build_industry_sector_ratios_intermediate.py new file mode 100644 index 00000000..14e09505 --- /dev/null +++ b/scripts/build_industry_sector_ratios_intermediate.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build specific energy consumption by carrier and industries and by country, +that interpolates between the current average energy consumption (from +2015-2020) and the ideal future best-in-class consumption. +""" + +import pandas as pd +from prepare_sector_network import get + + +def build_industry_sector_ratios_intermediate(): + + # in TWh/a + demand = pd.read_csv( + snakemake.input.industrial_energy_demand_per_country_today, + header=[0, 1], + index_col=0, + ) + + # in Mt/a + production = ( + pd.read_csv(snakemake.input.industrial_production_per_country, index_col=0) + / 1e3 + ).stack() + production.index.names = [None, None] + + # in MWh/t + future_sector_ratios = pd.read_csv( + snakemake.input.industry_sector_ratios, index_col=0 + ) + + today_sector_ratios = demand.div(production, axis=1) + + today_sector_ratios.dropna(how="all", axis=1, inplace=True) + + rename = { + "waste": "biomass", + "electricity": "elec", + "solid": "coke", + "gas": "methane", + "other": "biomass", + "liquid": "naphtha", + } + today_sector_ratios = today_sector_ratios.rename(rename).groupby(level=0).sum() + + fraction_future = get(params["sector_ratios_fraction_future"], year) + + intermediate_sector_ratios = {} + for ct, group in today_sector_ratios.T.groupby(level=0): + today_sector_ratios_ct = ( + group.droplevel(0) + .T.reindex_like(future_sector_ratios) + .fillna(future_sector_ratios) + ) + intermediate_sector_ratios[ct] = ( + today_sector_ratios_ct * (1 - fraction_future) + + future_sector_ratios * fraction_future + ) + intermediate_sector_ratios = pd.concat(intermediate_sector_ratios, axis=1) + + intermediate_sector_ratios.to_csv(snakemake.output.industry_sector_ratios) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_industry_sector_ratios_intermediate", + planning_horizons="2030", + ) + + year = int(snakemake.wildcards.planning_horizons[-4:]) + + params = snakemake.params.industry + + build_industry_sector_ratios_intermediate() diff --git a/scripts/build_line_rating.py b/scripts/build_line_rating.py index 589f3656..f9c71ea3 100755 --- a/scripts/build_line_rating.py +++ b/scripts/build_line_rating.py @@ -50,7 +50,6 @@ With a heat balance considering the maximum temperature threshold of the transmi the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated. """ -import logging import re import atlite @@ -59,7 +58,7 @@ import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import configure_logging +from _helpers import configure_logging, get_snapshots, set_scenario_config from shapely.geometry import LineString as Line from shapely.geometry import Point @@ -99,7 +98,7 @@ def calculate_line_rating(n, cutout): ------- xarray DataArray object with maximal power. """ - relevant_lines = n.lines[(n.lines["underground"] == False)] + relevant_lines = n.lines[~n.lines["underground"]].copy() buses = relevant_lines[["bus0", "bus1"]].values x = n.buses.x y = n.buses.y @@ -145,11 +144,11 @@ if __name__ == "__main__": opts="Co2L-4H", ) configure_logging(snakemake) - - snapshots = snakemake.params.snapshots + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.base_network) - time = pd.date_range(freq="h", **snapshots) + time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) + cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) da = calculate_line_rating(n, cutout) diff --git a/scripts/build_monthly_prices.py b/scripts/build_monthly_prices.py index 1c6d461b..d35243c3 100644 --- a/scripts/build_monthly_prices.py +++ b/scripts/build_monthly_prices.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -43,7 +43,7 @@ Data was accessed at 16.5.2023 import logging import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config logger = logging.getLogger(__name__) @@ -111,6 +111,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_monthly_prices") configure_logging(snakemake) + set_scenario_config(snakemake) fuel_price = get_fuel_price() fuel_price.to_csv(snakemake.output.fuel_price) diff --git a/scripts/build_natura_raster.py b/scripts/build_natura_raster.py index 8fdb4ea3..35fb0dbd 100644 --- a/scripts/build_natura_raster.py +++ b/scripts/build_natura_raster.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -46,7 +46,7 @@ import logging import atlite import geopandas as gpd import rasterio as rio -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from rasterio.features import geometry_mask from rasterio.warp import transform_bounds @@ -92,10 +92,10 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_natura_raster") configure_logging(snakemake) + set_scenario_config(snakemake) - cutouts = snakemake.input.cutouts - xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) - bounds = transform_bounds(4326, 3035, min(xs), min(ys), max(Xs), max(Ys)) + x, X, y, Y = determine_cutout_xXyY(snakemake.input.cutout) + bounds = transform_bounds(4326, 3035, x, y, X, Y) transform, out_shape = get_transform_and_shape(bounds, res=100) # adjusted boundaries diff --git a/scripts/build_population_layouts.py b/scripts/build_population_layouts.py index e864d925..dc4cf2f8 100644 --- a/scripts/build_population_layouts.py +++ b/scripts/build_population_layouts.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,22 +8,25 @@ Build mapping between cutout grid cells and population (total, urban, rural). import logging -logger = logging.getLogger(__name__) - - import atlite import geopandas as gpd import numpy as np import pandas as pd import xarray as xr +from _helpers import configure_logging, set_scenario_config + +logger = logging.getLogger(__name__) if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake("build_population_layouts") + snakemake = mock_snakemake( + "build_population_layouts", + ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) cutout = atlite.Cutout(snakemake.input.cutout) @@ -34,7 +37,7 @@ if __name__ == "__main__": nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index") # Indicator matrix NUTS3 -> grid cells - I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) + I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) # noqa: E741 # Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity # but imprecisions mean not perfect @@ -84,7 +87,8 @@ if __name__ == "__main__": # correct for imprecision of Iinv*I pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum() - pop_cells_ct *= pop_ct / pop_cells_ct.sum() + if pop_cells_ct.sum() != 0: + pop_cells_ct *= pop_ct / pop_cells_ct.sum() # The first low density grid cells to reach rural fraction are rural asc_density_i = density_cells_ct.sort_values().index diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 879e3b9b..60af66aa 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,6 +7,7 @@ Distribute country-level energy demands by population. """ import pandas as pd +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): @@ -14,16 +15,28 @@ if __name__ == "__main__": snakemake = mock_snakemake( "build_population_weighted_energy_totals", + kind="heat", simpl="", - clusters=48, + clusters=60, ) + set_scenario_config(snakemake) + + config = snakemake.config["energy"] + + if snakemake.wildcards.kind == "heat": + years = pd.date_range(freq="h", **snakemake.params.snapshots).year.unique() + assert len(years) == 1, "Currently only works for single year." + data_year = years[0] + else: + data_year = int(config["energy_totals_year"]) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=0) + totals = pd.read_csv(snakemake.input.energy_totals, index_col=[0, 1]) + totals = totals.xs(data_year, level="year") - nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.0) - nodal_energy_totals.index = pop_layout.index - nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) + nodal_totals = totals.loc[pop_layout.ct].fillna(0.0) + nodal_totals.index = pop_layout.index + nodal_totals = nodal_totals.multiply(pop_layout.fraction, axis=0) - nodal_energy_totals.to_csv(snakemake.output[0]) + nodal_totals.to_csv(snakemake.output[0]) diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index 8f99cd41..66a01624 100755 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -91,7 +91,7 @@ import numpy as np import pandas as pd import powerplantmatching as pm import pypsa -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from powerplantmatching.export import map_country_bus logger = logging.getLogger(__name__) @@ -165,6 +165,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_powerplants") configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.base_network) countries = snakemake.params.countries diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 97763611..7056e61d 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -186,9 +186,8 @@ import time import atlite import geopandas as gpd import numpy as np -import pandas as pd import xarray as xr -from _helpers import configure_logging +from _helpers import configure_logging, get_snapshots, set_scenario_config from dask.distributed import Client from pypsa.geo import haversine from shapely.geometry import LineString @@ -200,17 +199,24 @@ if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake("build_renewable_profiles", technology="solar") + snakemake = mock_snakemake("build_renewable_profiles", technology="offwind-dc") configure_logging(snakemake) + set_scenario_config(snakemake) nprocesses = int(snakemake.threads) noprogress = snakemake.config["run"].get("disable_progressbar", True) noprogress = noprogress or not snakemake.config["atlite"]["show_progress"] params = snakemake.params.renewable[snakemake.wildcards.technology] resource = params["resource"] # pv panel params / wind turbine params + + tech = next(t for t in ["panel", "turbine"] if t in resource) + models = resource[tech] + if not isinstance(models, dict): + models = {0: models} + resource[tech] = models[next(iter(models))] + correction_factor = params.get("correction_factor", 1.0) capacity_per_sqkm = params["capacity_per_sqkm"] - snapshots = snakemake.params.snapshots if correction_factor != 1.0: logger.info(f"correction_factor is set as {correction_factor}") @@ -220,7 +226,8 @@ if __name__ == "__main__": else: client = None - sns = pd.date_range(freq="h", **snapshots) + sns = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) + cutout = atlite.Cutout(snakemake.input.cutout).sel(time=sns) regions = gpd.read_file(snakemake.input.regions) assert not regions.empty, ( @@ -329,24 +336,42 @@ if __name__ == "__main__": duration = time.time() - start logger.info(f"Completed average capacity factor calculation ({duration:2.2f}s)") - logger.info("Calculate weighted capacity factor time series...") - start = time.time() + profiles = [] + capacities = [] + for year, model in models.items(): - profile, capacities = func( - matrix=availability.stack(spatial=["y", "x"]), - layout=layout, - index=buses, - per_unit=True, - return_capacity=True, - **resource, - ) + logger.info( + f"Calculate weighted capacity factor time series for model {model}..." + ) + start = time.time() - duration = time.time() - start - logger.info( - f"Completed weighted capacity factor time series calculation ({duration:2.2f}s)" - ) + resource[tech] = model - logger.info(f"Calculating maximal capacity per bus") + profile, capacity = func( + matrix=availability.stack(spatial=["y", "x"]), + layout=layout, + index=buses, + per_unit=True, + return_capacity=True, + **resource, + ) + + dim = {"year": [year]} + profile = profile.expand_dims(dim) + capacity = capacity.expand_dims(dim) + + profiles.append(profile.rename("profile")) + capacities.append(capacity.rename("weight")) + + duration = time.time() - start + logger.info( + f"Completed weighted capacity factor time series calculation for model {model} ({duration:2.2f}s)" + ) + + profiles = xr.merge(profiles) + capacities = xr.merge(capacities) + + logger.info("Calculating maximal capacity per bus") p_nom_max = capacity_per_sqkm * availability @ area logger.info("Calculate average distances.") @@ -371,8 +396,8 @@ if __name__ == "__main__": ds = xr.merge( [ - (correction_factor * profile).rename("profile"), - capacities.rename("weight"), + correction_factor * profiles, + capacities, p_nom_max.rename("p_nom_max"), potential.rename("potential"), average_distance.rename("average_distance"), @@ -392,9 +417,13 @@ if __name__ == "__main__": ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses]) # select only buses with some capacity and minimal capacity factor + mean_profile = ds["profile"].mean("time") + if "year" in ds.indexes: + mean_profile = mean_profile.max("year") + ds = ds.sel( bus=( - (ds["profile"].mean("time") > params.get("min_p_max_pu", 0.0)) + (mean_profile > params.get("min_p_max_pu", 0.0)) & (ds["p_nom_max"] > params.get("min_p_nom_max", 0.0)) ) ) diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py index d2aae140..52f545e9 100755 --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -68,6 +68,7 @@ The script has the following structure: """ import pandas as pd import xarray as xr +from _helpers import set_scenario_config # (i) --- FIXED PARAMETER / STANDARD VALUES ----------------------------------- @@ -297,8 +298,8 @@ def prepare_building_stock_data(): errors="ignore", ) - u_values.subsector.replace(rename_sectors, inplace=True) - u_values.btype.replace(rename_sectors, inplace=True) + u_values["subsector"] = u_values.subsector.replace(rename_sectors) + u_values["btype"] = u_values.btype.replace(rename_sectors) # for missing weighting of surfaces of building types assume MFH u_values["assumed_subsector"] = u_values.subsector @@ -306,8 +307,8 @@ def prepare_building_stock_data(): ~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector" ] = "MFH" - u_values.country_code.replace({"UK": "GB"}, inplace=True) - u_values.bage.replace({"Berfore 1945": "Before 1945"}, inplace=True) + u_values["country_code"] = u_values.country_code.replace({"UK": "GB"}) + u_values["bage"] = u_values.bage.replace({"Berfore 1945": "Before 1945"}) u_values = u_values[~u_values.bage.isna()] u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True) @@ -554,7 +555,7 @@ def prepare_temperature_data(): # windows --------------------------------------------------------------- -def window_limit(l, window_assumptions): +def window_limit(l, window_assumptions): # noqa: E741 """ Define limit u value from which on window is retrofitted. """ @@ -567,7 +568,7 @@ def window_limit(l, window_assumptions): return m * l + a -def u_retro_window(l, window_assumptions): +def u_retro_window(l, window_assumptions): # noqa: E741 """ Define retrofitting value depending on renovation strength. """ @@ -580,7 +581,7 @@ def u_retro_window(l, window_assumptions): return max(m * l + a, 0.8) -def window_cost(u, cost_retro, window_assumptions): +def window_cost(u, cost_retro, window_assumptions): # noqa: E741 """ Get costs for new windows depending on u value. """ @@ -600,33 +601,40 @@ def window_cost(u, cost_retro, window_assumptions): return window_cost -def calculate_costs(u_values, l, cost_retro, window_assumptions): +def calculate_costs(u_values, l, cost_retro, window_assumptions): # noqa: E741 """ Returns costs for a given retrofitting strength weighted by the average surface/volume ratio of the component for each building type. """ return u_values.apply( lambda x: ( - cost_retro.loc[x.name[3], "cost_var"] - * 100 - * float(l) - * l_weight.loc[x.name[3]].iloc[0] - + cost_retro.loc[x.name[3], "cost_fix"] - ) - * x.A_element - / x.A_C_Ref - if x.name[3] != "Window" - else ( - (window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) * x.A_element) + ( + cost_retro.loc[x.name[3], "cost_var"] + * 100 + * float(l) + * l_weight.loc[x.name[3]].iloc[0] + + cost_retro.loc[x.name[3], "cost_fix"] + ) + * x.A_element / x.A_C_Ref - ) - if x.value > window_limit(float(l), window_assumptions) - else 0, + if x.name[3] != "Window" + else ( + ( + ( + window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) + * x.A_element + ) + / x.A_C_Ref + ) + if x.value > window_limit(float(l), window_assumptions) + else 0 + ) + ), axis=1, ) -def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): +def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): # noqa: E741 """ Calculate U-values after building retrofitting, depending on the old U-values (u_values). This is for simple insulation measuers, adding an @@ -648,12 +656,14 @@ def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): k: thermal conductivity """ return u_values.apply( - lambda x: k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) - if x.name[3] != "Window" - else ( - min(x.value, u_retro_window(float(l), window_assumptions)) - if x.value > window_limit(float(l), window_assumptions) - else x.value + lambda x: ( + k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) + if x.name[3] != "Window" + else ( + min(x.value, u_retro_window(float(l), window_assumptions)) + if x.value > window_limit(float(l), window_assumptions) + else x.value + ) ), axis=1, ) @@ -746,7 +756,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) """ # (1) by transmission # calculate new U values of building elements due to additional insulation - for l in l_strength: + for l in l_strength: # noqa: E741 u_values[f"new_U_{l}"] = calculate_new_u( u_values, l, l_weight, window_assumptions ) @@ -1044,6 +1054,7 @@ if __name__ == "__main__": ll="v1.0", sector_opts="Co2L0-168H-T-H-B-I-solar3-dist1", ) + set_scenario_config(snakemake) # ******** config ********************************************************* diff --git a/scripts/build_salt_cavern_potentials.py b/scripts/build_salt_cavern_potentials.py index ed039772..f2c2ce8f 100644 --- a/scripts/build_salt_cavern_potentials.py +++ b/scripts/build_salt_cavern_potentials.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -24,6 +24,7 @@ onshore (>50km from sea), offshore (Figure 7). import geopandas as gpd import pandas as pd +from _helpers import set_scenario_config def concat_gdf(gdf_list, crs="EPSG:4326"): @@ -77,6 +78,8 @@ if __name__ == "__main__": "build_salt_cavern_potentials", simpl="", clusters="37" ) + set_scenario_config(snakemake) + fn_onshore = snakemake.input.regions_onshore fn_offshore = snakemake.input.regions_offshore diff --git a/scripts/build_sequestration_potentials.py b/scripts/build_sequestration_potentials.py index f6ad3526..0d70448d 100644 --- a/scripts/build_sequestration_potentials.py +++ b/scripts/build_sequestration_potentials.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -10,6 +10,7 @@ database_en>`_. import geopandas as gpd import pandas as pd +from _helpers import set_scenario_config def area(gdf): @@ -22,13 +23,15 @@ def area(gdf): def allocate_sequestration_potential( gdf, regions, attr="conservative estimate Mt", threshold=3 ): - gdf = gdf.loc[gdf[attr] > threshold, [attr, "geometry"]] + if isinstance(attr, str): + attr = [attr] + gdf = gdf.loc[gdf[attr].sum(axis=1) > threshold, attr + ["geometry"]] gdf["area_sqkm"] = area(gdf) overlay = gpd.overlay(regions, gdf, keep_geom_type=True) overlay["share"] = area(overlay) / overlay["area_sqkm"] adjust_cols = overlay.columns.difference({"name", "area_sqkm", "geometry", "share"}) overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0) - return overlay.dissolve("name", aggfunc="sum")[attr] + return overlay.dissolve("name", aggfunc="sum")[attr].sum(axis=1) if __name__ == "__main__": @@ -36,12 +39,14 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_sequestration_potentials", simpl="", clusters="181" + "build_sequestration_potentials", simpl="", clusters="128" ) + set_scenario_config(snakemake) + cf = snakemake.params.sequestration_potential - gdf = gpd.read_file(snakemake.input.sequestration_potential[0]) + gdf = gpd.read_file(snakemake.input.sequestration_potential) regions = gpd.read_file(snakemake.input.regions_offshore) if cf["include_onshore"]: diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 35bae147..fd64411a 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -77,7 +77,7 @@ import geopandas as gpd import numpy as np import pandas as pd import pycountry as pyc -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from shapely.geometry import MultiPolygon, Polygon logger = logging.getLogger(__name__) @@ -158,7 +158,7 @@ def country_cover(country_shapes, eez_shapes=None): shapes = pd.concat([shapes, eez_shapes]) europe_shape = shapes.unary_union if isinstance(europe_shape, MultiPolygon): - europe_shape = max(europe_shape, key=attrgetter("area")) + europe_shape = max(europe_shape.geoms, key=attrgetter("area")) return Polygon(shell=europe_shape.exterior) @@ -254,6 +254,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_shapes") configure_logging(snakemake) + set_scenario_config(snakemake) country_shapes = countries(snakemake.input.naturalearth, snakemake.params.countries) country_shapes.reset_index().to_file(snakemake.output.country_shapes) diff --git a/scripts/build_ship_raster.py b/scripts/build_ship_raster.py index da8c8b28..47d725d8 100644 --- a/scripts/build_ship_raster.py +++ b/scripts/build_ship_raster.py @@ -46,7 +46,7 @@ import zipfile from pathlib import Path import rioxarray -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from build_natura_raster import determine_cutout_xXyY logger = logging.getLogger(__name__) @@ -57,9 +57,9 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_ship_raster") configure_logging(snakemake) + set_scenario_config(snakemake) - cutouts = snakemake.input.cutouts - xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) + x, X, y, Y = determine_cutout_xXyY(snakemake.input.cutout) with zipfile.ZipFile(snakemake.input.ship_density) as zip_f: resources = Path(snakemake.output[0]).parent @@ -67,7 +67,7 @@ if __name__ == "__main__": zip_f.extract(fn, resources) with rioxarray.open_rasterio(resources / fn) as ship_density: ship_density = ship_density.drop_vars(["band"]).sel( - x=slice(min(xs), max(Xs)), y=slice(max(Ys), min(ys)) + x=slice(x, X), y=slice(Y, y) ) ship_density.rio.to_raster(snakemake.output[0]) diff --git a/scripts/build_shipping_demand.py b/scripts/build_shipping_demand.py index 8000c66c..b50cd316 100644 --- a/scripts/build_shipping_demand.py +++ b/scripts/build_shipping_demand.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,22 +11,25 @@ import json import geopandas as gpd import pandas as pd +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_shipping_demand_per_node", + "build_shipping_demand", simpl="", clusters=48, ) + set_scenario_config(snakemake) scope = gpd.read_file(snakemake.input.scope).geometry[0] regions = gpd.read_file(snakemake.input.regions).set_index("name") - demand = pd.read_csv(snakemake.input.demand, index_col=0)[ + demand = pd.read_csv(snakemake.input.demand, index_col=[0, 1])[ "total international navigation" ] + demand = demand.xs(snakemake.params.energy_totals_year, level=1) # read port data into GeoDataFrame with open(snakemake.input.ports, "r", encoding="latin_1") as f: diff --git a/scripts/build_solar_thermal_profiles.py b/scripts/build_solar_thermal_profiles.py index ee6ed881..bb5180b9 100644 --- a/scripts/build_solar_thermal_profiles.py +++ b/scripts/build_solar_thermal_profiles.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,8 +9,8 @@ Build solar thermal collector time series. import atlite import geopandas as gpd import numpy as np -import pandas as pd import xarray as xr +from _helpers import get_snapshots, set_scenario_config from dask.distributed import Client, LocalCluster if __name__ == "__main__": @@ -22,14 +22,17 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) nprocesses = int(snakemake.threads) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) client = Client(cluster, asynchronous=True) config = snakemake.params.solar_thermal + config.pop("cutout", None) + + time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) - time = pd.date_range(freq="h", **snakemake.params.snapshots) cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) clustered_regions = ( diff --git a/scripts/build_temperature_profiles.py b/scripts/build_temperature_profiles.py index a13ec3c2..00c88b5b 100644 --- a/scripts/build_temperature_profiles.py +++ b/scripts/build_temperature_profiles.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,8 +9,8 @@ Build time series for air and soil temperatures per clustered model region. import atlite import geopandas as gpd import numpy as np -import pandas as pd import xarray as xr +from _helpers import get_snapshots, set_scenario_config from dask.distributed import Client, LocalCluster if __name__ == "__main__": @@ -22,19 +22,21 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) nprocesses = int(snakemake.threads) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) client = Client(cluster, asynchronous=True) - time = pd.date_range(freq="h", **snakemake.params.snapshots) + time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) + cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) clustered_regions = ( gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop_layout = xr.open_dataarray(snakemake.input.pop_layout) diff --git a/scripts/build_transport_demand.py b/scripts/build_transport_demand.py index 0bcfb7ed..35f22a80 100644 --- a/scripts/build_transport_demand.py +++ b/scripts/build_transport_demand.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,14 +8,24 @@ improvements due to drivetrain changes, time series for electric vehicle availability and demand-side management constraints. """ +import logging + import numpy as np import pandas as pd import xarray as xr -from _helpers import generate_periodic_profiles +from _helpers import ( + configure_logging, + generate_periodic_profiles, + get_snapshots, + set_scenario_config, +) + +logger = logging.getLogger(__name__) -def build_nodal_transport_data(fn, pop_layout): - transport_data = pd.read_csv(fn, index_col=0) +def build_nodal_transport_data(fn, pop_layout, year): + transport_data = pd.read_csv(fn, index_col=[0, 1]) + transport_data = transport_data.xs(min(2015, year), level="year") nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.0) nodal_transport_data.index = pop_layout.index @@ -130,6 +140,12 @@ def bev_availability_profile(fn, snapshots, nodes, options): traffic.mean() - traffic.min() ) + if not avail[avail < 0].empty: + logger.warning( + "The BEV availability weekly profile has negative values which can " + "lead to infeasibility." + ) + return generate_periodic_profiles( dt_index=snapshots, nodes=nodes, @@ -158,8 +174,10 @@ if __name__ == "__main__": snakemake = mock_snakemake( "build_transport_demand", simpl="", - clusters=48, + clusters=60, ) + configure_logging(snakemake) + set_scenario_config(snakemake) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) @@ -171,12 +189,15 @@ if __name__ == "__main__": options = snakemake.params.sector - snapshots = pd.date_range(freq="h", **snakemake.params.snapshots, tz="UTC") + snapshots = get_snapshots( + snakemake.params.snapshots, snakemake.params.drop_leap_day, tz="UTC" + ) nyears = len(snapshots) / 8760 + energy_totals_year = snakemake.params.energy_totals_year nodal_transport_data = build_nodal_transport_data( - snakemake.input.transport_data, pop_layout + snakemake.input.transport_data, pop_layout, energy_totals_year ) transport_demand = build_transport_demand( diff --git a/scripts/cluster_gas_network.py b/scripts/cluster_gas_network.py index e7554dff..19585aa9 100755 --- a/scripts/cluster_gas_network.py +++ b/scripts/cluster_gas_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,14 +8,14 @@ Cluster gas transmission network to clustered model regions. import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd -from packaging.version import Version, parse +from _helpers import configure_logging, set_scenario_config from pypsa.geo import haversine_pts from shapely import wkt +logger = logging.getLogger(__name__) + def concat_gdf(gdf_list, crs="EPSG:4326"): """ @@ -41,12 +41,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): for i in [0, 1]: gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326") - kws = ( - dict(op="within") - if parse(gpd.__version__) < Version("0.10") - else dict(predicate="within") - ) - bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right + bus_mapping = gpd.sjoin( + gdf, bus_regions, how="left", predicate="within" + ).index_right bus_mapping = bus_mapping.groupby(bus_mapping.index).first() df[f"bus{i}"] = bus_mapping @@ -75,10 +72,10 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): return df -def reindex_pipes(df): +def reindex_pipes(df, prefix="gas pipeline"): def make_index(x): connector = " <-> " if x.bidirectional else " -> " - return "gas pipeline " + x.bus0 + connector + x.bus1 + return prefix + " " + x.bus0 + connector + x.bus1 df.index = df.apply(make_index, axis=1) @@ -109,8 +106,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37") - - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) fn = snakemake.input.cleaned_gas_network df = pd.read_csv(fn, index_col=0) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 28f08396..87762b36 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -122,17 +122,21 @@ Exemplary unsolved network clustered to 37 nodes: """ import logging +import os import warnings from functools import reduce import geopandas as gpd +import linopy import matplotlib.pyplot as plt import numpy as np import pandas as pd -import pyomo.environ as po import pypsa import seaborn as sns -from _helpers import configure_logging, update_p_nom_max +from _helpers import configure_logging, set_scenario_config, update_p_nom_max +from add_electricity import load_costs +from base_network import append_bus_shapes +from packaging.version import Version, parse from pypsa.clustering.spatial import ( busmap_by_greedy_modularity, busmap_by_hac, @@ -140,12 +144,10 @@ from pypsa.clustering.spatial import ( get_clustering_from_busmap, ) +PD_GE_2_2 = parse(pd.__version__) >= Version("2.2") + warnings.filterwarnings(action="ignore", category=UserWarning) - -from add_electricity import load_costs - idx = pd.IndexSlice - logger = logging.getLogger(__name__) @@ -217,7 +219,7 @@ def get_feature_for_hac(n, buses_i=None, feature=None): return feature_data -def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): +def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="scip"): """ Determine the number of clusters per country. """ @@ -230,7 +232,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): .pipe(normed) ) - N = n.buses.groupby(["country", "sub_network"]).size() + N = n.buses.groupby(["country", "sub_network"]).size()[L.index] assert ( n_clusters >= len(N) and n_clusters <= N.sum() @@ -257,31 +259,22 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): L.sum(), 1.0, rtol=1e-3 ), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." - m = po.ConcreteModel() - - def n_bounds(model, *n_id): - return (1, N[n_id]) - - m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers) - m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters)) - m.objective = po.Objective( - expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index), - sense=po.minimize, + m = linopy.Model() + clusters = m.add_variables( + lower=1, upper=N, coords=[L.index], name="n", integer=True ) - - opt = po.SolverFactory(solver_name) - if solver_name == "appsi_highs" or not opt.has_capability("quadratic_objective"): - logger.warning( - f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`." + m.add_constraints(clusters.sum() == n_clusters, name="tot") + # leave out constant in objective (L * n_clusters) ** 2 + m.objective = (clusters * clusters - 2 * clusters * L * n_clusters).sum() + if solver_name == "gurobi": + logging.getLogger("gurobipy").propagate = False + elif solver_name not in ["scip", "cplex", "xpress", "copt", "mosek"]: + logger.info( + f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `scip`." ) - opt = po.SolverFactory("ipopt") - - results = opt.solve(m) - assert ( - results["Solver"][0]["Status"] == "ok" - ), f"Solver returned non-optimally: {results}" - - return pd.Series(m.n.get_values(), index=L.index).round().astype(int) + solver_name = "scip" + m.solve(solver_name=solver_name) + return m.solution["n"].to_series().astype(int) def busmap_for_n_clusters( @@ -373,9 +366,11 @@ def busmap_for_n_clusters( f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}." ) + compat_kws = dict(include_groups=False) if PD_GE_2_2 else {} + return ( n.buses.groupby(["country", "sub_network"], group_keys=False) - .apply(busmap_for_country) + .apply(busmap_for_country, **compat_kws) .squeeze() .rename("busmap") ) @@ -388,7 +383,7 @@ def clustering_for_n_clusters( aggregate_carriers=None, line_length_factor=1.25, aggregation_strategies=dict(), - solver_name="cbc", + solver_name="scip", algorithm="hac", feature=None, extended_link_costs=0, @@ -434,20 +429,27 @@ def clustering_for_n_clusters( return clustering -def cluster_regions(busmaps, input=None, output=None): +def cluster_regions(busmaps, regions): + """ + Cluster regions based on busmaps and save the results to a file and to the + network. + + Parameters: + - busmaps (list): A list of busmaps used for clustering. + - which (str): The type of regions to cluster. + + Returns: + None + """ busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) - - for which in ("regions_onshore", "regions_offshore"): - regions = gpd.read_file(getattr(input, which)) - regions = regions.reindex(columns=["name", "geometry"]).set_index("name") - regions_c = regions.dissolve(busmap) - regions_c.index.name = "name" - regions_c = regions_c.reset_index() - regions_c.to_file(getattr(output, which)) + regions = regions.reindex(columns=["name", "geometry"]).set_index("name") + regions_c = regions.dissolve(busmap) + regions_c.index.name = "name" + return regions_c.reset_index() -def plot_busmap_for_n_clusters(n, n_clusters, fn=None): - busmap = busmap_for_n_clusters(n, n_clusters) +def plot_busmap_for_n_clusters(n, n_clusters, solver_name="scip", fn=None): + busmap = busmap_for_n_clusters(n, n_clusters, solver_name) cs = busmap.unique() cr = sns.color_palette("hls", len(cs)) n.plot(bus_colors=busmap.map(dict(zip(cs, cr)))) @@ -460,12 +462,12 @@ if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake("cluster_network", simpl="", clusters="37") + snakemake = mock_snakemake("cluster_network", simpl="", clusters="40") configure_logging(snakemake) + set_scenario_config(snakemake) params = snakemake.params solver_name = snakemake.config["solving"]["solver"]["name"] - solver_name = "appsi_highs" if solver_name == "highs" else solver_name n = pypsa.Network(snakemake.input.network) @@ -477,7 +479,7 @@ if __name__ == "__main__": conventional_carriers = set(params.conventional_carriers) if snakemake.wildcards.clusters.endswith("m"): n_clusters = int(snakemake.wildcards.clusters[:-1]) - aggregate_carriers = params.conventional_carriers & aggregate_carriers + aggregate_carriers = conventional_carriers & aggregate_carriers elif snakemake.wildcards.clusters.endswith("c"): n_clusters = int(snakemake.wildcards.clusters[:-1]) aggregate_carriers = aggregate_carriers - conventional_carriers @@ -500,7 +502,9 @@ if __name__ == "__main__": gens.efficiency, bins=[0, low, high, 1], labels=labels ).astype(str) carriers += [f"{c} {label} efficiency" for label in labels] - n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency") + n.generators.update( + {"carrier": gens.carrier + " " + suffix + " efficiency"} + ) aggregate_carriers = carriers if n_clusters == len(n.buses): @@ -523,8 +527,8 @@ if __name__ == "__main__": custom_busmap = params.custom_busmap if custom_busmap: custom_busmap = pd.read_csv( - snakemake.input.custom_busmap, index_col=0, squeeze=True - ) + snakemake.input.custom_busmap, index_col=0 + ).squeeze() custom_busmap.index = custom_busmap.index.astype(str) logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}") @@ -542,21 +546,25 @@ if __name__ == "__main__": params.focus_weights, ) - update_p_nom_max(clustering.network) + nc = clustering.network + update_p_nom_max(nc) if params.cluster_network.get("consider_efficiency_classes"): labels = [f" {label} efficiency" for label in ["low", "medium", "high"]] - nc = clustering.network nc.generators["carrier"] = nc.generators.carrier.replace(labels, "", regex=True) - clustering.network.meta = dict( - snakemake.config, **dict(wildcards=dict(snakemake.wildcards)) - ) - clustering.network.export_to_netcdf(snakemake.output.network) for attr in ( "busmap", "linemap", ): # also available: linemap_positive, linemap_negative getattr(clustering, attr).to_csv(snakemake.output[attr]) - cluster_regions((clustering.busmap,), snakemake.input, snakemake.output) + nc.shapes = n.shapes.copy() + for which in ["regions_onshore", "regions_offshore"]: + regions = gpd.read_file(snakemake.input[which]) + clustered_regions = cluster_regions((clustering.busmap,), regions) + clustered_regions.to_file(snakemake.output[which]) + append_bus_shapes(nc, clustered_regions, type=which.split("_")[1]) + + nc.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + nc.export_to_netcdf(snakemake.output.network) diff --git a/scripts/copy_config.py b/scripts/copy_config.py deleted file mode 100644 index a549d893..00000000 --- a/scripts/copy_config.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: MIT -""" -Copy used configuration files and important scripts for archiving. -""" - -from pathlib import Path -from shutil import copy - -import yaml - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - snakemake = mock_snakemake("copy_config") - - with open(snakemake.output[0], "w") as yaml_file: - yaml.dump( - snakemake.config, - yaml_file, - default_flow_style=False, - allow_unicode=True, - sort_keys=False, - ) diff --git a/scripts/determine_availability_matrix_MD_UA.py b/scripts/determine_availability_matrix_MD_UA.py index efe9a712..80c04083 100644 --- a/scripts/determine_availability_matrix_MD_UA.py +++ b/scripts/determine_availability_matrix_MD_UA.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -15,7 +15,7 @@ import fiona import geopandas as gpd import matplotlib.pyplot as plt import numpy as np -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from atlite.gis import shape_availability from rasterio.plot import show @@ -38,6 +38,7 @@ if __name__ == "__main__": "determine_availability_matrix_MD_UA", technology="solar" ) configure_logging(snakemake) + set_scenario_config(snakemake) nprocesses = None # snakemake.config["atlite"].get("nprocesses") noprogress = not snakemake.config["atlite"].get("show_progress", True) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index fb13e91e..8c2a1aea 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,18 +8,16 @@ capacity factors, curtailment, energy balances, prices and other metrics. """ import logging - -logger = logging.getLogger(__name__) - import sys import numpy as np import pandas as pd import pypsa +from _helpers import configure_logging, get_snapshots, set_scenario_config from prepare_sector_network import prepare_costs idx = pd.IndexSlice - +logger = logging.getLogger(__name__) opt_name = {"Store": "e", "Line": "s", "Transformer": "s"} @@ -509,21 +507,15 @@ def calculate_weighted_prices(n, label, weighted_prices): if carrier in ["H2", "gas"]: load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0) - elif carrier[:5] == "space": - load = heat_demand_df[buses.str[:2]].rename( - columns=lambda i: str(i) + suffix - ) else: - load = n.loads_t.p_set[buses] + load = n.loads_t.p_set[buses.intersection(n.loads.index)] for tech in value: names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] if not names.empty: load += ( - n.links_t.p0[names] - .groupby(n.links.loc[names, "bus0"], axis=1) - .sum() + n.links_t.p0[names].T.groupby(n.links.loc[names, "bus0"]).sum().T ) # Add H2 Store when charging @@ -563,14 +555,16 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.generators_t.p[gens] - .groupby(n.generators.loc[gens, "bus"], axis=1) + .T.groupby(n.generators.loc[gens, "bus"]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) - revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan ## Now do market value of links ## @@ -586,14 +580,17 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.links_t["p" + i][links] - .groupby(n.links.loc[links, "bus" + i], axis=1) + .T.groupby(n.links.loc[links, "bus" + i]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan return market_values @@ -648,7 +645,8 @@ def make_summaries(networks_dict): ] columns = pd.MultiIndex.from_tuples( - networks_dict.keys(), names=["cluster", "ll", "opt", "planning_horizon"] + networks_dict.keys(), + names=["cluster", "ll", "opt", "planning_horizon"], ) df = {output: pd.DataFrame(columns=columns, dtype=float) for output in outputs} @@ -677,7 +675,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("make_summary") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) networks_dict = { (cluster, ll, opt + sector_opt, planning_horizon): "results/" @@ -691,7 +690,8 @@ if __name__ == "__main__": for planning_horizon in snakemake.params.scenario["planning_horizons"] } - Nyears = len(pd.date_range(freq="h", **snakemake.params.snapshots)) / 8760 + time = get_snapshots(snakemake.params.snapshots, snakemake.params.drop_leap_day) + Nyears = len(time) / 8760 costs_db = prepare_costs( snakemake.input.costs, diff --git a/scripts/make_summary_perfect.py b/scripts/make_summary_perfect.py index c387c6cf..76bd4ad0 100644 --- a/scripts/make_summary_perfect.py +++ b/scripts/make_summary_perfect.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -12,15 +12,13 @@ other metrics. import numpy as np import pandas as pd import pypsa -from make_summary import ( - assign_carriers, - assign_locations, - calculate_cfs, - calculate_nodal_cfs, - calculate_nodal_costs, -) +from _helpers import set_scenario_config +from make_summary import calculate_cfs # noqa: F401 +from make_summary import calculate_nodal_cfs # noqa: F401 +from make_summary import calculate_nodal_costs # noqa: F401 +from make_summary import assign_carriers, assign_locations from prepare_sector_network import prepare_costs -from pypsa.descriptors import get_active_assets, nominal_attrs +from pypsa.descriptors import get_active_assets from six import iteritems idx = pd.IndexSlice @@ -249,8 +247,9 @@ def calculate_energy(n, label, energy): .groupby(level=0) .sum() .multiply(c.df.sign) - .groupby(c.df.carrier, axis=1) + .T.groupby(c.df.carrier) .sum() + .T ) else: c_energies = pd.DataFrame( @@ -268,7 +267,7 @@ def calculate_energy(n, label, energy): totals[no_bus] = float( n.component_attrs[c.name].loc["p" + port, "default"] ) - c_energies -= totals.groupby(c.df.carrier, axis=1).sum() + c_energies -= totals.T.groupby(c.df.carrier).sum().T c_energies = pd.concat([c_energies.T], keys=[c.list_name]) @@ -379,9 +378,8 @@ def calculate_supply_energy(n, label, supply_energy): .groupby(level=0) .sum() .multiply(c.df.loc[items, "sign"]) - .groupby(c.df.loc[items, "carrier"], axis=1) + .T.groupby(c.df.loc[items, "carrier"]) .sum() - .T ) s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) @@ -398,16 +396,9 @@ def calculate_supply_energy(n, label, supply_energy): if len(items) == 0: continue - s = ( - (-1) - * c.pnl["p" + end] - .reindex(items, axis=1) - .multiply(n.snapshot_weightings.objective, axis=0) - .groupby(level=0) - .sum() - .groupby(c.df.loc[items, "carrier"], axis=1) - .sum() - ).T + s = (-1) * c.pnl["p" + end].reindex(items, axis=1).multiply( + n.snapshot_weightings.objective, axis=0 + ).groupby(level=0).sum().T.groupby(c.df.loc[items, "carrier"]).sum() s.index = s.index + end s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) @@ -502,7 +493,7 @@ def calculate_weighted_prices(n, label, weighted_prices): else: suffix = " " + carrier - buses = n.buses.index[n.buses.index.str[2:] == suffix] + buses = n.buses.index[n.buses.index.str[5:] == suffix] if buses.empty: continue @@ -513,14 +504,14 @@ def calculate_weighted_prices(n, label, weighted_prices): else n.loads_t.p_set.reindex(buses, axis=1) ) for tech in value: - names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] + names = n.links.index[ + n.links.index.to_series().str[-len(tech) - 5 : -5] == tech + ] if names.empty: continue - load += ( - n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum() - ) + load += n.links_t.p0[names].T.groupby(n.links.loc[names, "bus0"]).sum().T # Add H2 Store when charging # if carrier == "H2": @@ -528,9 +519,12 @@ def calculate_weighted_prices(n, label, weighted_prices): # stores[stores > 0.] = 0. # load += -stores - weighted_prices.loc[carrier, label] = ( - load * n.buses_t.marginal_price[buses] - ).sum().sum() / load.sum().sum() + if total_load := load.sum().sum(): + weighted_prices.loc[carrier, label] = ( + load * n.buses_t.marginal_price[buses] + ).sum().sum() / total_load + else: + weighted_prices.loc[carrier, label] = np.nan if carrier[:5] == "space": print(load * n.buses_t.marginal_price[buses]) @@ -558,14 +552,17 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.generators_t.p[gens] - .groupby(n.generators.loc[gens, "bus"], axis=1) + .T.groupby(n.generators.loc[gens, "bus"]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan ## Now do market value of links ## @@ -581,14 +578,17 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.links_t["p" + i][links] - .groupby(n.links.loc[links, "bus" + i], axis=1) + .T.groupby(n.links.loc[links, "bus" + i]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan return market_values @@ -647,7 +647,7 @@ def calculate_co2_emissions(n, label, df): emitted = n.generators_t.p[gens.index].mul(em_pu) emitted_grouped = ( - emitted.groupby(level=0).sum().groupby(n.generators.carrier, axis=1).sum().T + emitted.groupby(level=0).sum().T.groupby(n.generators.carrier).sum() ) df = df.reindex(emitted_grouped.index.union(df.index)) @@ -723,6 +723,7 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("make_summary_perfect") + set_scenario_config(snakemake) run = snakemake.config["run"]["name"] if run != "": diff --git a/scripts/plot_gas_network.py b/scripts/plot_gas_network.py new file mode 100644 index 00000000..26186d51 --- /dev/null +++ b/scripts/plot_gas_network.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised gas network, storage and selected other +infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging, set_scenario_config +from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def plot_ch4_map(n): + # if "gas pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + bus_size_factor = 8e7 + linewidth_factor = 1e4 + # MW below which not drawn + line_lower_threshold = 1e3 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + fossil_gas_i = n.generators[n.generators.carrier == "gas"].index + fossil_gas = ( + n.generators_t.p.loc[:, fossil_gas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.generators.loc[fossil_gas_i, "bus"]) + .sum() + / bus_size_factor + ) + fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) + fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) + # make a fake MultiIndex so that area is correct for legend + fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) + + methanation_i = n.links.query("carrier == 'Sabatier'").index + methanation = ( + abs( + n.links_t.p1.loc[:, methanation_i].mul( + n.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .groupby(n.links.loc[methanation_i, "bus1"]) + .sum() + / bus_size_factor + ) + methanation = ( + methanation.groupby(methanation.index) + .sum() + .rename(index=lambda x: x.replace(" gas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) + + biogas_i = n.stores[n.stores.carrier == "biogas"].index + biogas = ( + n.stores_t.p.loc[:, biogas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.stores.loc[biogas_i, "bus"]) + .sum() + / bus_size_factor + ) + biogas = ( + biogas.groupby(biogas.index) + .sum() + .rename(index=lambda x: x.replace(" biogas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) + + bus_sizes = pd.concat([fossil_gas, methanation, biogas]) + bus_sizes.sort_index(inplace=True) + + to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] + n.links.drop(to_remove, inplace=True) + + link_widths_rem = n.links.p_nom_opt / linewidth_factor + link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + link_widths_orig = n.links.p_nom / linewidth_factor + link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 + + max_usage = n.links_t.p0[n.links.index].abs().max(axis=0) + link_widths_used = max_usage / linewidth_factor + link_widths_used[max_usage < line_lower_threshold] = 0.0 + + tech_colors = snakemake.params.plotting["tech_colors"] + + pipe_colors = { + "gas pipeline": "#f08080", + "gas pipeline new": "#c46868", + "gas pipeline (in 2020)": "lightgrey", + "gas pipeline (available)": "#e8d1d1", + } + + link_color_used = n.links.carrier.map(pipe_colors) + + n.links.bus0 = n.links.bus0.str.replace(" gas", "") + n.links.bus1 = n.links.bus1.str.replace(" gas", "") + + bus_colors = { + "fossil gas": tech_colors["fossil gas"], + "methanation": tech_colors["methanation"], + "biogas": "seagreen", + } + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + n.plot( + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=pipe_colors["gas pipeline (in 2020)"], + link_widths=link_widths_orig, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=pipe_colors["gas pipeline (available)"], + link_widths=link_widths_rem, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=link_color_used, + link_widths=link_widths_used, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + sizes = [100, 10] + labels = [f"{s} TWh" for s in sizes] + sizes = [s / bus_size_factor * 1e6 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.03), + labelspacing=0.8, + frameon=False, + handletextpad=1, + title="gas sources", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.25, 1.03), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title="gas pipeline", + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = list(pipe_colors.values()) + list(bus_colors.values()) + labels = list(pipe_colors.keys()) + list(bus_colors.keys()) + + # legend on the side + # legend_kw = dict( + # bbox_to_anchor=(1.47, 1.04), + # frameon=False, + # ) + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.24), + ncol=2, + frameon=False, + ) + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_gas_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_ch4_map(n) diff --git a/scripts/plot_hydrogen_network.py b/scripts/plot_hydrogen_network.py new file mode 100644 index 00000000..b4585fb2 --- /dev/null +++ b/scripts/plot_hydrogen_network.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised hydrogen network, storage and selected other +infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging, set_scenario_config +from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def group_pipes(df, drop_direction=False): + """ + Group pipes which connect same buses and return overall capacity. + """ + df = df.copy() + if drop_direction: + positive_order = df.bus0 < df.bus1 + df_p = df[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + df_n = df[~positive_order].rename(columns=swap_buses) + df = pd.concat([df_p, df_n]) + + # there are pipes for each investment period rename to AC buses name for plotting + df["index_orig"] = df.index + df.index = df.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + return df.groupby(level=0).agg( + {"p_nom_opt": "sum", "bus0": "first", "bus1": "first", "index_orig": "first"} + ) + + +def plot_h2_map(n, regions): + # if "H2 pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + h2_storage = n.stores.query("carrier == 'H2'") + regions["H2"] = ( + h2_storage.rename(index=h2_storage.bus.map(n.buses.location)) + .e_nom_opt.groupby(level=0) + .sum() + .div(1e6) + ) # TWh + regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) + + bus_size_factor = 1e5 + linewidth_factor = 7e3 + # MW below which not drawn + line_lower_threshold = 750 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + carriers = ["H2 Electrolysis", "H2 Fuel Cell"] + + elec = n.links[n.links.carrier.isin(carriers)].index + + bus_sizes = ( + n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() + / bus_size_factor + ) + + # make a fake MultiIndex so that area is correct for legend + bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) + # drop all links which are not H2 pipelines + n.links.drop( + n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True + ) + + h2_new = n.links[n.links.carrier == "H2 pipeline"] + h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] + + if snakemake.params.foresight == "myopic": + # sum capacitiy for pipelines from different investment periods + h2_new = group_pipes(h2_new) + + if not h2_retro.empty: + h2_retro = ( + group_pipes(h2_retro, drop_direction=True) + .reindex(h2_new.index) + .fillna(0) + ) + + if not h2_retro.empty: + if snakemake.params.foresight != "myopic": + positive_order = h2_retro.bus0 < h2_retro.bus1 + h2_retro_p = h2_retro[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) + h2_retro = pd.concat([h2_retro_p, h2_retro_n]) + + h2_retro["index_orig"] = h2_retro.index + h2_retro.index = h2_retro.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + + retro_w_new_i = h2_retro.index.intersection(h2_new.index) + h2_retro_w_new = h2_retro.loc[retro_w_new_i] + + retro_wo_new_i = h2_retro.index.difference(h2_new.index) + h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] + h2_retro_wo_new.index = h2_retro_wo_new.index_orig + + to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] + h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() + + else: + h2_total = h2_new.p_nom_opt + + link_widths_total = h2_total / linewidth_factor + + n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) + n.links = n.links.groupby(level=0).first() + link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) + link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + retro = n.links.p_nom_opt.where( + n.links.carrier == "H2 pipeline retrofitted", other=0.0 + ) + link_widths_retro = retro / linewidth_factor + link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + n.links.bus0 = n.links.bus0.str.replace(" H2", "") + n.links.bus1 = n.links.bus1.str.replace(" H2", "") + + regions = regions.to_crs(proj.proj4_init) + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + color_h2_pipe = "#b3f3f4" + color_retrofit = "#499a9c" + + bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} + + n.plot( + geomap=True, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=color_h2_pipe, + link_widths=link_widths_total, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + geomap=True, + bus_sizes=0, + link_colors=color_retrofit, + link_widths=link_widths_retro, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + regions.plot( + ax=ax, + column="H2", + cmap="Blues", + linewidths=0, + legend=True, + vmax=6, + vmin=0, + legend_kwds={ + "label": "Hydrogen Storage [TWh]", + "shrink": 0.7, + "extend": "max", + }, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + sizes = [s / bus_size_factor * 1e3 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1), + labelspacing=0.8, + handletextpad=0, + frameon=False, + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [30, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.23, 1), + frameon=False, + labelspacing=0.8, + handletextpad=1, + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] + labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.13), + ncol=2, + frameon=False, + ) + + add_legend_patches(ax, colors, labels, legend_kw=legend_kw) + + ax.set_facecolor("white") + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_hydrogen_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_h2_map(n, regions) diff --git a/scripts/plot_network.py b/scripts/plot_network.py deleted file mode 100644 index 67481120..00000000 --- a/scripts/plot_network.py +++ /dev/null @@ -1,1106 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: MIT -""" -Creates plots for optimised network topologies, including electricity, gas and -hydrogen networks, and regional generation, storage and conversion capacities -built. - -This rule plots a map of the network with technology capacities at the -nodes. -""" - -import logging - -logger = logging.getLogger(__name__) - -import cartopy.crs as ccrs -import geopandas as gpd -import matplotlib.pyplot as plt -import pandas as pd -import pypsa -from make_summary import assign_carriers -from plot_summary import preferred_order, rename_techs -from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches - -plt.style.use(["ggplot"]) - - -def rename_techs_tyndp(tech): - tech = rename_techs(tech) - if "heat pump" in tech or "resistive heater" in tech: - return "power-to-heat" - elif tech in ["H2 Electrolysis", "methanation", "H2 liquefaction"]: - return "power-to-gas" - elif tech == "H2": - return "H2 storage" - elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: - return "ammonia" - elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: - return "gas-to-power/heat" - # elif "solar" in tech: - # return "solar" - elif tech in ["Fischer-Tropsch", "methanolisation"]: - return "power-to-liquid" - elif "offshore wind" in tech: - return "offshore wind" - elif "CC" in tech or "sequestration" in tech: - return "CCS" - else: - return tech - - -def assign_location(n): - for c in n.iterate_components(n.one_port_components | n.branch_components): - ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) - for i in ifind.value_counts().index: - # these have already been assigned defaults - if i == -1: - continue - names = ifind.index[ifind == i] - c.df.loc[names, "location"] = names.str[:i] - - -def plot_map( - network, - components=["links", "stores", "storage_units", "generators"], - bus_size_factor=1.7e10, - transmission=False, - with_legend=True, -): - tech_colors = snakemake.params.plotting["tech_colors"] - - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - costs = pd.DataFrame(index=n.buses.index) - - for comp in components: - df_c = getattr(n, comp) - - if df_c.empty: - continue - - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" - - costs_c = ( - (df_c.capital_cost * df_c[attr]) - .groupby([df_c.location, df_c.nice_group]) - .sum() - .unstack() - .fillna(0.0) - ) - costs = pd.concat([costs, costs_c], axis=1) - - logger.debug(f"{comp}, {costs}") - - costs = costs.groupby(costs.columns, axis=1).sum() - - costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) - - new_columns = preferred_order.intersection(costs.columns).append( - costs.columns.difference(preferred_order) - ) - costs = costs[new_columns] - - for item in new_columns: - if item not in tech_colors: - logger.warning(f"{item} not in config/plotting/tech_colors") - - costs = costs.stack() # .sort_index() - - # hack because impossible to drop buses... - eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - logger.info(f"Dropping non-buses {to_drop.tolist()}") - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - threshold = 100e6 # 100 mEUR/a - carriers = costs.groupby(level=1).sum() - carriers = carriers.where(carriers > threshold).dropna() - carriers = list(carriers.index) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 4e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - title = "added grid" - - if snakemake.wildcards["ll"] == "v1.0": - # should be zero - line_widths = n.lines.s_nom_opt - n.lines.s_nom - link_widths = n.links.p_nom_opt - n.links.p_nom - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "current grid" - else: - line_widths = n.lines.s_nom_opt - n.lines.s_nom_min - link_widths = n.links.p_nom_opt - n.links.p_nom_min - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - title = "total grid" - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - fig, ax = plt.subplots(subplot_kw={"projection": ccrs.EqualEarth()}) - fig.set_size_inches(7, 6) - - n.plot( - bus_sizes=costs / bus_size_factor, - bus_colors=tech_colors, - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - if with_legend: - colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] - labels = carriers + ["HVAC line", "HVDC link"] - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig(snakemake.output.map, transparent=True, bbox_inches="tight") - - -def group_pipes(df, drop_direction=False): - """ - Group pipes which connect same buses and return overall capacity. - """ - if drop_direction: - positive_order = df.bus0 < df.bus1 - df_p = df[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - df_n = df[~positive_order].rename(columns=swap_buses) - df = pd.concat([df_p, df_n]) - - # there are pipes for each investment period rename to AC buses name for plotting - df.index = df.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"}) - - -def plot_h2_map(network, regions): - n = network.copy() - if "H2 pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - h2_storage = n.stores.query("carrier == 'H2'") - regions["H2"] = h2_storage.rename( - index=h2_storage.bus.map(n.buses.location) - ).e_nom_opt.div( - 1e6 - ) # TWh - regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) - - bus_size_factor = 1e5 - linewidth_factor = 7e3 - # MW below which not drawn - line_lower_threshold = 750 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - carriers = ["H2 Electrolysis", "H2 Fuel Cell"] - - elec = n.links[n.links.carrier.isin(carriers)].index - - bus_sizes = ( - n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() - / bus_size_factor - ) - - # make a fake MultiIndex so that area is correct for legend - bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) - # drop all links which are not H2 pipelines - n.links.drop( - n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True - ) - - h2_new = n.links[n.links.carrier == "H2 pipeline"] - h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] - - if snakemake.params.foresight == "myopic": - # sum capacitiy for pipelines from different investment periods - h2_new = group_pipes(h2_new) - - if not h2_retro.empty: - h2_retro = ( - group_pipes(h2_retro, drop_direction=True) - .reindex(h2_new.index) - .fillna(0) - ) - - if not h2_retro.empty: - positive_order = h2_retro.bus0 < h2_retro.bus1 - h2_retro_p = h2_retro[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) - h2_retro = pd.concat([h2_retro_p, h2_retro_n]) - - h2_retro["index_orig"] = h2_retro.index - h2_retro.index = h2_retro.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - - retro_w_new_i = h2_retro.index.intersection(h2_new.index) - h2_retro_w_new = h2_retro.loc[retro_w_new_i] - - retro_wo_new_i = h2_retro.index.difference(h2_new.index) - h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] - h2_retro_wo_new.index = h2_retro_wo_new.index_orig - - to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] - h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() - - else: - h2_total = h2_new.p_nom_opt - - link_widths_total = h2_total / linewidth_factor - - n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) - n.links = n.links.groupby(level=0).first() - link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) - link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - retro = n.links.p_nom_opt.where( - n.links.carrier == "H2 pipeline retrofitted", other=0.0 - ) - link_widths_retro = retro / linewidth_factor - link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - n.links.bus0 = n.links.bus0.str.replace(" H2", "") - n.links.bus1 = n.links.bus1.str.replace(" H2", "") - - proj = ccrs.EqualEarth() - regions = regions.to_crs(proj.proj4_init) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - color_h2_pipe = "#b3f3f4" - color_retrofit = "#499a9c" - - bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} - - n.plot( - geomap=True, - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=color_h2_pipe, - link_widths=link_widths_total, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - geomap=True, - bus_sizes=0, - link_colors=color_retrofit, - link_widths=link_widths_retro, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - regions.plot( - ax=ax, - column="H2", - cmap="Blues", - linewidths=0, - legend=True, - vmax=6, - vmin=0, - legend_kwds={ - "label": "Hydrogen Storage [TWh]", - "shrink": 0.7, - "extend": "max", - }, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - sizes = [s / bus_size_factor * 1e3 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1), - labelspacing=0.8, - handletextpad=0, - frameon=False, - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [30, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.23, 1), - frameon=False, - labelspacing=0.8, - handletextpad=1, - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] - labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.13), - ncol=2, - frameon=False, - ) - - add_legend_patches(ax, colors, labels, legend_kw=legend_kw) - - ax.set_facecolor("white") - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-h2_network"), bbox_inches="tight" - ) - - -def plot_ch4_map(network): - n = network.copy() - - if "gas pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - bus_size_factor = 8e7 - linewidth_factor = 1e4 - # MW below which not drawn - line_lower_threshold = 1e3 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fossil_gas_i = n.generators[n.generators.carrier == "gas"].index - fossil_gas = ( - n.generators_t.p.loc[:, fossil_gas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.generators.loc[fossil_gas_i, "bus"]) - .sum() - / bus_size_factor - ) - fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) - fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) - # make a fake MultiIndex so that area is correct for legend - fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) - - methanation_i = n.links.query("carrier == 'Sabatier'").index - methanation = ( - abs( - n.links_t.p1.loc[:, methanation_i].mul( - n.snapshot_weightings.generators, axis=0 - ) - ) - .sum() - .groupby(n.links.loc[methanation_i, "bus1"]) - .sum() - / bus_size_factor - ) - methanation = ( - methanation.groupby(methanation.index) - .sum() - .rename(index=lambda x: x.replace(" gas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) - - biogas_i = n.stores[n.stores.carrier == "biogas"].index - biogas = ( - n.stores_t.p.loc[:, biogas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.stores.loc[biogas_i, "bus"]) - .sum() - / bus_size_factor - ) - biogas = ( - biogas.groupby(biogas.index) - .sum() - .rename(index=lambda x: x.replace(" biogas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) - - bus_sizes = pd.concat([fossil_gas, methanation, biogas]) - bus_sizes.sort_index(inplace=True) - - to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] - n.links.drop(to_remove, inplace=True) - - link_widths_rem = n.links.p_nom_opt / linewidth_factor - link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - link_widths_orig = n.links.p_nom / linewidth_factor - link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 - - max_usage = n.links_t.p0.abs().max(axis=0) - link_widths_used = max_usage / linewidth_factor - link_widths_used[max_usage < line_lower_threshold] = 0.0 - - tech_colors = snakemake.params.plotting["tech_colors"] - - pipe_colors = { - "gas pipeline": "#f08080", - "gas pipeline new": "#c46868", - "gas pipeline (in 2020)": "lightgrey", - "gas pipeline (available)": "#e8d1d1", - } - - link_color_used = n.links.carrier.map(pipe_colors) - - n.links.bus0 = n.links.bus0.str.replace(" gas", "") - n.links.bus1 = n.links.bus1.str.replace(" gas", "") - - bus_colors = { - "fossil gas": tech_colors["fossil gas"], - "methanation": tech_colors["methanation"], - "biogas": "seagreen", - } - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()}) - - n.plot( - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=pipe_colors["gas pipeline (in 2020)"], - link_widths=link_widths_orig, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=pipe_colors["gas pipeline (available)"], - link_widths=link_widths_rem, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=link_color_used, - link_widths=link_widths_used, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - sizes = [100, 10] - labels = [f"{s} TWh" for s in sizes] - sizes = [s / bus_size_factor * 1e6 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.03), - labelspacing=0.8, - frameon=False, - handletextpad=1, - title="gas sources", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.25, 1.03), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title="gas pipeline", - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = list(pipe_colors.values()) + list(bus_colors.values()) - labels = list(pipe_colors.keys()) + list(bus_colors.keys()) - - # legend on the side - # legend_kw = dict( - # bbox_to_anchor=(1.47, 1.04), - # frameon=False, - # ) - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.24), - ncol=2, - frameon=False, - ) - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-ch4_network"), bbox_inches="tight" - ) - - -def plot_map_without(network): - n = network.copy() - assign_location(n) - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()}) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 200.0 - line_upper_threshold = 1e4 - linewidth_factor = 3e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - # hack because impossible to drop buses... - if "EU gas" in n.buses.index: - eu_location = snakemake.params.plotting.get( - "eu_node_location", dict(x=-5.5, y=46) - ) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")] - n.links.drop(to_drop, inplace=True) - - if snakemake.wildcards["ll"] == "v1.0": - line_widths = n.lines.s_nom - link_widths = n.links.p_nom - else: - line_widths = n.lines.s_nom_min - link_widths = n.links.p_nom_min - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - n.plot( - bus_colors="k", - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - handles = [] - labels = [] - - for s in (10, 5): - handles.append( - plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor) - ) - labels.append(f"{s} GW") - l1_1 = ax.legend( - handles, - labels, - loc="upper left", - bbox_to_anchor=(0.05, 1.01), - frameon=False, - labelspacing=0.8, - handletextpad=1.5, - title="Today's transmission", - ) - ax.add_artist(l1_1) - - fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight") - - -def plot_series(network, carrier="AC", name="test"): - n = network.copy() - assign_location(n) - assign_carriers(n) - - buses = n.buses.index[n.buses.carrier.str.contains(carrier)] - - supply = pd.DataFrame(index=n.snapshots) - for c in n.iterate_components(n.branch_components): - n_port = 4 if c.name == "Link" else 2 - for i in range(n_port): - supply = pd.concat( - ( - supply, - ( - -1 - * c.pnl[f"p{str(i)}"] - .loc[:, c.df.index[c.df[f"bus{str(i)}"].isin(buses)]] - .groupby(c.df.carrier, axis=1) - .sum() - ), - ), - axis=1, - ) - - for c in n.iterate_components(n.one_port_components): - comps = c.df.index[c.df.bus.isin(buses)] - supply = pd.concat( - ( - supply, - ((c.pnl["p"].loc[:, comps]).multiply(c.df.loc[comps, "sign"])) - .groupby(c.df.carrier, axis=1) - .sum(), - ), - axis=1, - ) - - supply = supply.groupby(rename_techs_tyndp, axis=1).sum() - - both = supply.columns[(supply < 0.0).any() & (supply > 0.0).any()] - - positive_supply = supply[both] - negative_supply = supply[both] - - positive_supply[positive_supply < 0.0] = 0.0 - negative_supply[negative_supply > 0.0] = 0.0 - - supply[both] = positive_supply - - suffix = " charging" - - negative_supply.columns = negative_supply.columns + suffix - - supply = pd.concat((supply, negative_supply), axis=1) - - # 14-21.2 for flaute - # 19-26.1 for flaute - - start = "2013-02-19" - stop = "2013-02-26" - - threshold = 10e3 - - to_drop = supply.columns[(abs(supply) < threshold).all()] - - if len(to_drop) != 0: - logger.info(f"Dropping {to_drop.tolist()} from supply") - supply.drop(columns=to_drop, inplace=True) - - supply.index.name = None - - supply = supply / 1e3 - - supply.rename( - columns={"electricity": "electric demand", "heat": "heat demand"}, inplace=True - ) - supply.columns = supply.columns.str.replace("residential ", "") - supply.columns = supply.columns.str.replace("services ", "") - supply.columns = supply.columns.str.replace("urban decentral ", "decentral ") - - preferred_order = pd.Index( - [ - "electric demand", - "transmission lines", - "hydroelectricity", - "hydro reservoir", - "run of river", - "pumped hydro storage", - "CHP", - "onshore wind", - "offshore wind", - "solar PV", - "solar thermal", - "building retrofitting", - "ground heat pump", - "air heat pump", - "resistive heater", - "OCGT", - "gas boiler", - "gas", - "natural gas", - "methanation", - "hydrogen storage", - "battery storage", - "hot water storage", - ] - ) - - new_columns = preferred_order.intersection(supply.columns).append( - supply.columns.difference(preferred_order) - ) - - supply = supply.groupby(supply.columns, axis=1).sum() - fig, ax = plt.subplots() - fig.set_size_inches((8, 5)) - - ( - supply.loc[start:stop, new_columns].plot( - ax=ax, - kind="area", - stacked=True, - linewidth=0.0, - color=[ - snakemake.params.plotting["tech_colors"][i.replace(suffix, "")] - for i in new_columns - ], - ) - ) - - handles, labels = ax.get_legend_handles_labels() - - handles.reverse() - labels.reverse() - - new_handles = [] - new_labels = [] - - for i, item in enumerate(labels): - if "charging" not in item: - new_handles.append(handles[i]) - new_labels.append(labels[i]) - - ax.legend(new_handles, new_labels, ncol=3, loc="upper left", frameon=False) - ax.set_xlim([start, stop]) - ax.set_ylim([-1300, 1900]) - ax.grid(True) - ax.set_ylabel("Power [GW]") - fig.tight_layout() - - fig.savefig( - "{}/{RDIR}maps/series-{}-{}-{}-{}-{}.pdf".format( - "results", - snakemake.params.RDIR, - snakemake.wildcards["ll"], - carrier, - start, - stop, - name, - ), - transparent=True, - ) - - -def plot_map_perfect( - network, - components=["Link", "Store", "StorageUnit", "Generator"], - bus_size_factor=1.7e10, -): - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - # investment periods - investments = n.snapshots.levels[0] - - costs = {} - for comp in components: - df_c = n.df(comp) - if df_c.empty: - continue - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" - - active = pd.concat( - [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], - axis=1, - ).astype(int) - capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost - capital_cost_t = ( - (active.mul(capital_cost, axis=0)) - .groupby([n.df(comp).location, n.df(comp).nice_group]) - .sum() - ) - - capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") - - costs[comp] = capital_cost_t - - costs = pd.concat(costs).groupby(level=[1, 2]).sum() - costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) - - new_columns = preferred_order.intersection(costs.index.levels[1]).append( - costs.index.levels[1].difference(preferred_order) - ) - costs = costs.reindex(new_columns, level=1) - - for item in new_columns: - if item not in snakemake.config["plotting"]["tech_colors"]: - print( - "Warning!", - item, - "not in config/plotting/tech_colors, assign random color", - ) - snakemake.config["plotting"]["tech_colors"] = "pink" - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - print("dropping non-buses", to_drop) - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 2e3 - ac_color = "gray" - dc_color = "m" - - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "Today's transmission" - - line_widths[line_widths < line_lower_threshold] = 0.0 - link_widths[link_widths < line_lower_threshold] = 0.0 - - line_widths[line_widths > line_upper_threshold] = line_upper_threshold - link_widths[link_widths > line_upper_threshold] = line_upper_threshold - - for year in costs.columns: - fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) - fig.set_size_inches(7, 6) - fig.suptitle(year) - - n.plot( - bus_sizes=costs[year] / bus_size_factor, - bus_colors=snakemake.config["plotting"]["tech_colors"], - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - fig.savefig( - snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight" - ) - - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - snakemake = mock_snakemake( - "plot_network", - simpl="", - opts="", - clusters="37", - ll="v1.0", - sector_opts="4380H-T-H-B-I-A-solar+p3-dist1", - ) - - logging.basicConfig(level=snakemake.config["logging"]["level"]) - - n = pypsa.Network(snakemake.input.network) - - regions = gpd.read_file(snakemake.input.regions).set_index("name") - - map_opts = snakemake.params.plotting["map"] - - if map_opts["boundaries"] is None: - map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] - - if snakemake.params["foresight"] == "perfect": - plot_map_perfect( - n, - components=["Link", "Store", "StorageUnit", "Generator"], - bus_size_factor=2e10, - ) - else: - plot_map( - n, - components=["generators", "links", "stores", "storage_units"], - bus_size_factor=2e10, - transmission=False, - ) - - plot_h2_map(n, regions) - plot_ch4_map(n) - plot_map_without(n) - - # plot_series(n, carrier="AC", name=suffix) - # plot_series(n, carrier="heat", name=suffix) diff --git a/scripts/plot_power_network.py b/scripts/plot_power_network.py new file mode 100644 index 00000000..6db53bcc --- /dev/null +++ b/scripts/plot_power_network.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built. +""" + +import logging + +import cartopy.crs as ccrs +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging, set_scenario_config +from plot_summary import preferred_order, rename_techs +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def rename_techs_tyndp(tech): + tech = rename_techs(tech) + if "heat pump" in tech or "resistive heater" in tech: + return "power-to-heat" + elif tech in ["H2 Electrolysis", "methanation", "H2 liquefaction"]: + return "power-to-gas" + elif tech == "H2": + return "H2 storage" + elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: + return "ammonia" + elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: + return "gas-to-power/heat" + # elif "solar" in tech: + # return "solar" + elif tech in ["Fischer-Tropsch", "methanolisation"]: + return "power-to-liquid" + elif "offshore wind" in tech: + return "offshore wind" + elif "CC" in tech or "sequestration" in tech: + return "CCS" + else: + return tech + + +def assign_location(n): + for c in n.iterate_components(n.one_port_components | n.branch_components): + ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) + for i in ifind.value_counts().index: + # these have already been assigned defaults + if i == -1: + continue + names = ifind.index[ifind == i] + c.df.loc[names, "location"] = names.str[:i] + + +def load_projection(plotting_params): + proj_kwargs = plotting_params.get("projection", dict(name="EqualEarth")) + proj_func = getattr(ccrs, proj_kwargs.pop("name")) + return proj_func(**proj_kwargs) + + +def plot_map( + n, + components=["links", "stores", "storage_units", "generators"], + bus_size_factor=2e10, + transmission=False, + with_legend=True, +): + tech_colors = snakemake.params.plotting["tech_colors"] + + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + costs = pd.DataFrame(index=n.buses.index) + + for comp in components: + df_c = getattr(n, comp) + + if df_c.empty: + continue + + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" + + costs_c = ( + (df_c.capital_cost * df_c[attr]) + .groupby([df_c.location, df_c.nice_group]) + .sum() + .unstack() + .fillna(0.0) + ) + costs = pd.concat([costs, costs_c], axis=1) + + logger.debug(f"{comp}, {costs}") + + costs = costs.T.groupby(costs.columns).sum().T + + costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) + + new_columns = preferred_order.intersection(costs.columns).append( + costs.columns.difference(preferred_order) + ) + costs = costs[new_columns] + + for item in new_columns: + if item not in tech_colors: + logger.warning(f"{item} not in config/plotting/tech_colors") + + costs = costs.stack() # .sort_index() + + # hack because impossible to drop buses... + eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) + n.buses.loc["EU gas", "x"] = eu_location["x"] + n.buses.loc["EU gas", "y"] = eu_location["y"] + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + logger.info(f"Dropping non-buses {to_drop.tolist()}") + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + threshold = 100e6 # 100 mEUR/a + carriers = costs.groupby(level=1).sum() + carriers = carriers.where(carriers > threshold).dropna() + carriers = list(carriers.index) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 4e3 + ac_color = "rosybrown" + dc_color = "darkseagreen" + + title = "added grid" + + if snakemake.wildcards["ll"] == "v1.0": + # should be zero + line_widths = n.lines.s_nom_opt - n.lines.s_nom + link_widths = n.links.p_nom_opt - n.links.p_nom + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "current grid" + else: + line_widths = n.lines.s_nom_opt - n.lines.s_nom_min + link_widths = n.links.p_nom_opt - n.links.p_nom_min + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + title = "total grid" + + line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) + link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) + + line_widths = line_widths.replace(line_lower_threshold, 0) + link_widths = link_widths.replace(line_lower_threshold, 0) + + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + + n.plot( + bus_sizes=costs / bus_size_factor, + bus_colors=tech_colors, + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + if with_legend: + colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] + labels = carriers + ["HVAC line", "HVDC link"] + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map(n) diff --git a/scripts/plot_power_network_clustered.py b/scripts/plot_power_network_clustered.py new file mode 100644 index 00000000..0c3dc635 --- /dev/null +++ b/scripts/plot_power_network_clustered.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023-2024 PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Plot clustered electricity transmission network. +""" + +import geopandas as gpd +import matplotlib.pyplot as plt +import pypsa +from _helpers import set_scenario_config +from matplotlib.lines import Line2D +from plot_power_network import load_projection +from pypsa.plot import add_legend_lines + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_clustered", + clusters=128, + configfiles=["../../config/config.test.yaml"], + ) + set_scenario_config(snakemake) + + lw_factor = 2e3 + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name") + + proj = load_projection(snakemake.params.plotting) + + fig, ax = plt.subplots(figsize=(8, 8), subplot_kw={"projection": proj}) + regions.to_crs(proj.proj4_init).plot( + ax=ax, facecolor="none", edgecolor="lightgray", linewidth=0.75 + ) + n.plot( + ax=ax, + margin=0.06, + line_widths=n.lines.s_nom / lw_factor, + link_colors=n.links.p_nom.apply( + lambda x: "darkseagreen" if x > 0 else "skyblue" + ), + link_widths=2.0, + ) + + sizes = [10, 20] + labels = [f"HVAC ({s} GW)" for s in sizes] + scale = 1e3 / lw_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc=[0.25, 0.9], + frameon=False, + labelspacing=0.5, + handletextpad=1, + fontsize=13, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="rosybrown"), legend_kw=legend_kw + ) + + handles = [ + Line2D([0], [0], color="darkseagreen", lw=2), + Line2D([0], [0], color="skyblue", lw=2), + ] + plt.legend( + handles, + ["HVDC existing", "HVDC planned"], + frameon=False, + loc=[0.0, 0.9], + fontsize=13, + ) + + plt.savefig(snakemake.output.map, bbox_inches="tight") diff --git a/scripts/plot_power_network_perfect.py b/scripts/plot_power_network_perfect.py new file mode 100644 index 00000000..f7506a00 --- /dev/null +++ b/scripts/plot_power_network_perfect.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built for the perfect foresight scenario. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging, set_scenario_config +from plot_power_network import assign_location, load_projection, rename_techs_tyndp +from plot_summary import preferred_order +from pypsa.plot import add_legend_circles, add_legend_lines + +logger = logging.getLogger(__name__) + + +def plot_map_perfect( + n, + components=["Link", "Store", "StorageUnit", "Generator"], + bus_size_factor=2e10, +): + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + # investment periods + investments = n.snapshots.levels[0] + + costs = {} + for comp in components: + df_c = n.df(comp) + if df_c.empty: + continue + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" + + active = pd.concat( + [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], + axis=1, + ).astype(int) + capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost + capital_cost_t = ( + (active.mul(capital_cost, axis=0)) + .groupby([n.df(comp).location, n.df(comp).nice_group]) + .sum() + ) + + capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") + + costs[comp] = capital_cost_t + + costs = pd.concat(costs).groupby(level=[1, 2]).sum() + costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) + + new_columns = preferred_order.intersection(costs.index.levels[1]).append( + costs.index.levels[1].difference(preferred_order) + ) + costs = costs.reindex(new_columns, level=1) + + for item in new_columns: + if item not in snakemake.config["plotting"]["tech_colors"]: + print( + "Warning!", + item, + "not in config/plotting/tech_colors, assign random color", + ) + snakemake.config["plotting"]["tech_colors"] = "pink" + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + print("dropping non-buses", to_drop) + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 2e3 + ac_color = "gray" + dc_color = "m" + + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "Today's transmission" + + line_widths[line_widths < line_lower_threshold] = 0.0 + link_widths[link_widths < line_lower_threshold] = 0.0 + + line_widths[line_widths > line_upper_threshold] = line_upper_threshold + link_widths[link_widths > line_upper_threshold] = line_upper_threshold + + for year in costs.columns: + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + fig.suptitle(year) + + n.plot( + bus_sizes=costs[year] / bus_size_factor, + bus_colors=snakemake.config["plotting"]["tech_colors"], + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + fig.savefig(snakemake.output[f"map_{year}"], bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_perfect", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map_perfect(n) diff --git a/scripts/plot_statistics.py b/scripts/plot_statistics.py index b2728931..738fa618 100644 --- a/scripts/plot_statistics.py +++ b/scripts/plot_statistics.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT import matplotlib.pyplot as plt import pypsa import seaborn as sns -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config sns.set_theme("paper", style="whitegrid") @@ -24,6 +24,7 @@ if __name__ == "__main__": ll="v1.0", ) configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.network) @@ -58,7 +59,7 @@ if __name__ == "__main__": fig, ax = plt.subplots() ds = n.statistics.installed_capacity().dropna() ds = ds.drop("Line") - ds = ds.drop(("Generator", "Load")) + ds = ds.drop(("Generator", "Load"), errors="ignore") ds = ds / 1e3 ds.attrs["unit"] = "GW" plot_static_per_carrier(ds, ax) @@ -67,7 +68,7 @@ if __name__ == "__main__": fig, ax = plt.subplots() ds = n.statistics.optimal_capacity() ds = ds.drop("Line") - ds = ds.drop(("Generator", "Load")) + ds = ds.drop(("Generator", "Load"), errors="ignore") ds = ds / 1e3 ds.attrs["unit"] = "GW" plot_static_per_carrier(ds, ax) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 9f5cdffe..8d983ce9 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,17 +8,15 @@ Creates plots from summary CSV files. import logging -logger = logging.getLogger(__name__) - import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt -import numpy as np import pandas as pd - -plt.style.use("ggplot") - +from _helpers import configure_logging, set_scenario_config from prepare_sector_network import co2_emissions_year +logger = logging.getLogger(__name__) +plt.style.use("ggplot") + # consolidate and rename def rename_techs(label): @@ -155,7 +153,7 @@ def plot_costs(): df = df.drop(to_drop) - logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year") + logger.info(f"Total system cost of {round(df.sum().iloc[0])} EUR billion per year") new_index = preferred_order.intersection(df.index).append( df.index.difference(preferred_order) @@ -215,7 +213,7 @@ def plot_energy(): df = df.drop(to_drop) - logger.info(f"Total energy of {round(df.sum()[0])} TWh/a") + logger.info(f"Total energy of {round(df.sum().iloc[0])} TWh/a") if df.empty: fig, ax = plt.subplots(figsize=(12, 8)) @@ -285,9 +283,14 @@ def plot_balances(): # remove trailing link ports df.index = [ - i[:-1] - if ((i not in ["co2", "NH3", "H2"]) and (i[-1:] in ["0", "1", "2", "3"])) - else i + ( + i[:-1] + if ( + (i not in ["co2", "NH3", "H2"]) + and (i[-1:] in ["0", "1", "2", "3", "4"]) + ) + else i + ) for i in df.index ] @@ -305,7 +308,9 @@ def plot_balances(): df = df.drop(to_drop) - logger.debug(f"Total energy balance for {v} of {round(df.sum()[0],2)} {units}") + logger.debug( + f"Total energy balance for {v} of {round(df.sum().iloc[0],2)} {units}" + ) if df.empty: continue @@ -424,13 +429,13 @@ def historical_emissions(countries): ) emissions = co2_totals.loc["electricity"] - if "T" in opts: + if options["transport"]: emissions += co2_totals.loc[[i + " non-elec" for i in ["rail", "road"]]].sum() - if "H" in opts: + if options["heating"]: emissions += co2_totals.loc[ [i + " non-elec" for i in ["residential", "services"]] ].sum() - if "I" in opts: + if options["industry"]: emissions += co2_totals.loc[ [ "industrial non-elec", @@ -444,7 +449,7 @@ def historical_emissions(countries): return emissions -def plot_carbon_budget_distribution(input_eurostat): +def plot_carbon_budget_distribution(input_eurostat, options): """ Plot historical carbon emissions in the EU and decarbonization path. """ @@ -458,7 +463,6 @@ def plot_carbon_budget_distribution(input_eurostat): plt.rcParams["ytick.labelsize"] = 20 emissions_scope = snakemake.params.emissions_scope - report_year = snakemake.params.eurostat_report_year input_co2 = snakemake.input.co2 # historic emissions @@ -466,9 +470,8 @@ def plot_carbon_budget_distribution(input_eurostat): e_1990 = co2_emissions_year( countries, input_eurostat, - opts, + options, emissions_scope, - report_year, input_co2, year=1990, ) @@ -569,7 +572,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("plot_summary") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) n_header = 4 @@ -579,7 +583,9 @@ if __name__ == "__main__": plot_balances() - for sector_opts in snakemake.params.sector_opts: - opts = sector_opts.split("-") - if any("cb" in o for o in opts) or snakemake.config["foresight"] == "perfect": - plot_carbon_budget_distribution(snakemake.input.eurostat) + co2_budget = snakemake.params["co2_budget"] + if ( + isinstance(co2_budget, str) and co2_budget.startswith("cb") + ) or snakemake.params["foresight"] == "perfect": + options = snakemake.params.sector + plot_carbon_budget_distribution(snakemake.input.eurostat, options) diff --git a/scripts/plot_validation_cross_border_flows.py b/scripts/plot_validation_cross_border_flows.py index 65f4f8c7..8de7d8a1 100644 --- a/scripts/plot_validation_cross_border_flows.py +++ b/scripts/plot_validation_cross_border_flows.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -9,7 +9,7 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa import seaborn as sns -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config sns.set_theme("paper", style="whitegrid") @@ -187,6 +187,7 @@ if __name__ == "__main__": ll="v1.0", ) configure_logging(snakemake) + set_scenario_config(snakemake) countries = snakemake.params.countries diff --git a/scripts/plot_validation_electricity_prices.py b/scripts/plot_validation_electricity_prices.py index 2a187b9f..9efd6c46 100644 --- a/scripts/plot_validation_electricity_prices.py +++ b/scripts/plot_validation_electricity_prices.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -8,8 +8,7 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa import seaborn as sns -from _helpers import configure_logging -from pypsa.statistics import get_bus_and_carrier +from _helpers import configure_logging, set_scenario_config sns.set_theme("paper", style="whitegrid") @@ -25,6 +24,7 @@ if __name__ == "__main__": ll="v1.0", ) configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.network) n.loads.carrier = "load" diff --git a/scripts/plot_validation_electricity_production.py b/scripts/plot_validation_electricity_production.py index 5c5569d0..5a68cfa5 100644 --- a/scripts/plot_validation_electricity_production.py +++ b/scripts/plot_validation_electricity_production.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa import seaborn as sns -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from pypsa.statistics import get_bus_and_carrier sns.set_theme("paper", style="whitegrid") @@ -35,6 +35,7 @@ if __name__ == "__main__": ll="v1.0", ) configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.network) n.loads.carrier = "load" @@ -45,6 +46,12 @@ if __name__ == "__main__": header=[0, 1], parse_dates=True, ) + subset_technologies = ["Geothermal", "Nuclear", "Biomass", "Lignite", "Oil", "Coal"] + lowercase_technologies = [ + technology.lower() if technology in subset_technologies else technology + for technology in historic.columns.levels[1] + ] + historic.columns = historic.columns.set_levels(lowercase_technologies, level=1) colors = n.carriers.set_index("nice_name").color.where( lambda s: s != "", "lightgrey" diff --git a/scripts/prepare_links_p_nom.py b/scripts/prepare_links_p_nom.py index 4b915d22..7c1ed211 100644 --- a/scripts/prepare_links_p_nom.py +++ b/scripts/prepare_links_p_nom.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -40,7 +40,7 @@ Description import logging import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config logger = logging.getLogger(__name__) @@ -69,6 +69,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("prepare_links_p_nom", simpl="") configure_logging(snakemake) + set_scenario_config(snakemake) links_p_nom = pd.read_html( "https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol" diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index 632e6078..00cb00bf 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -58,12 +58,15 @@ Description """ import logging -import re import numpy as np import pandas as pd import pypsa -from _helpers import configure_logging, find_opt, get_opt +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from add_electricity import load_costs, update_transmission_costs from pypsa.descriptors import expand_series @@ -72,6 +75,28 @@ idx = pd.IndexSlice logger = logging.getLogger(__name__) +def maybe_adjust_costs_and_potentials(n, adjustments): + if not adjustments: + return + + for attr, carrier_factor in adjustments.items(): + for carrier, factor in carrier_factor.items(): + # beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan + if carrier == "AC": # lines do not have carrier + n.lines[attr] *= factor + continue + comps = { + "p_nom_max": {"Generator", "Link", "StorageUnit"}, + "e_nom_max": {"Store"}, + "capital_cost": {"Generator", "Link", "StorageUnit", "Store"}, + "marginal_cost": {"Generator", "Link", "StorageUnit", "Store"}, + } + for c in n.iterate_components(comps[attr]): + sel = c.df.index[c.df.carrier == carrier] + c.df.loc[sel, attr] *= factor + logger.info(f"changing {attr} for {carrier} by factor {factor}") + + def add_co2limit(n, co2limit, Nyears=1.0): n.add( "GlobalConstraint", @@ -179,6 +204,9 @@ def average_every_nhours(n, offset): m = n.copy(with_time=False) snapshot_weightings = n.snapshot_weightings.resample(offset).sum() + sns = snapshot_weightings.index + if snakemake.params.drop_leap_day: + sns = sns[~((sns.month == 2) & (sns.day == 29))] m.set_snapshots(snapshot_weightings.index) m.snapshot_weightings = snapshot_weightings @@ -195,7 +223,7 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): logger.info(f"Aggregating time series to {segments} segments.") try: import tsam.timeseriesaggregation as tsam - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" ) @@ -266,12 +294,12 @@ def set_line_nom_max( n.lines["s_nom_max"] = n.lines["s_nom"] + s_nom_max_ext if np.isfinite(p_nom_max_ext) and p_nom_max_ext > 0: - logger.info(f"Limiting line extensions to {p_nom_max_ext} MW") + logger.info(f"Limiting link extensions to {p_nom_max_ext} MW") hvdc = n.links.index[n.links.carrier == "DC"] n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext - n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) - n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) + n.lines["s_nom_max"] = n.lines.s_nom_max.clip(upper=s_nom_max_set) + n.links["p_nom_max"] = n.links.p_nom_max.clip(upper=p_nom_max_set) if __name__ == "__main__": @@ -279,11 +307,15 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "prepare_network", simpl="", clusters="37", ll="v1.0", opts="Ept" + "prepare_network", + simpl="", + clusters="37", + ll="v1.0", + opts="Co2L-4H", ) configure_logging(snakemake) - - opts = snakemake.wildcards.opts.split("-") + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) n = pypsa.Network(snakemake.input[0]) Nyears = n.snapshot_weightings.objective.sum() / 8760.0 @@ -297,81 +329,35 @@ if __name__ == "__main__": set_line_s_max_pu(n, snakemake.params.lines["s_max_pu"]) # temporal averaging - nhours_config = snakemake.params.snapshots.get("resolution", False) - nhours_wildcard = get_opt(opts, r"^\d+h$") - nhours = nhours_wildcard or nhours_config - if nhours: - n = average_every_nhours(n, nhours) + time_resolution = snakemake.params.time_resolution + is_string = isinstance(time_resolution, str) + if is_string and time_resolution.lower().endswith("h"): + n = average_every_nhours(n, time_resolution) # segments with package tsam - time_seg_config = snakemake.params.snapshots.get("segmentation", False) - time_seg_wildcard = get_opt(opts, r"^\d+seg$") - time_seg = time_seg_wildcard or time_seg_config - if time_seg: + if is_string and time_resolution.lower().endswith("seg"): solver_name = snakemake.config["solving"]["solver"]["name"] - n = apply_time_segmentation(n, time_seg.replace("seg", ""), solver_name) + segments = int(time_resolution.replace("seg", "")) + n = apply_time_segmentation(n, segments, solver_name) - Co2L_config = snakemake.params.co2limit_enable - Co2L_wildcard, co2limit_wildcard = find_opt(opts, "Co2L") - if Co2L_wildcard or Co2L_config: - if co2limit_wildcard is not None: - co2limit = co2limit_wildcard * snakemake.params.co2base - add_co2limit(n, co2limit, Nyears) - logger.info("Setting CO2 limit according to wildcard value.") - else: - add_co2limit(n, snakemake.params.co2limit, Nyears) - logger.info("Setting CO2 limit according to config value.") + if snakemake.params.co2limit_enable: + add_co2limit(n, snakemake.params.co2limit, Nyears) - CH4L_config = snakemake.params.gaslimit_enable - CH4L_wildcard, gaslimit_wildcard = find_opt(opts, "CH4L") - if CH4L_wildcard or CH4L_config: - if gaslimit_wildcard is not None: - gaslimit = gaslimit_wildcard * 1e6 - add_gaslimit(n, gaslimit, Nyears) - logger.info("Setting gas usage limit according to wildcard value.") - else: - add_gaslimit(n, snakemake.params.gaslimit, Nyears) - logger.info("Setting gas usage limit according to config value.") + if snakemake.params.gaslimit_enable: + add_gaslimit(n, snakemake.params.gaslimit, Nyears) - for o in opts: - if "+" not in o: - continue - oo = o.split("+") - suptechs = map(lambda c: c.split("-", 2)[0], n.carriers.index) - if oo[0].startswith(tuple(suptechs)): - carrier = oo[0] - # handles only p_nom_max as stores and lines have no potentials - attr_lookup = {"p": "p_nom_max", "c": "capital_cost", "m": "marginal_cost"} - attr = attr_lookup[oo[1][0]] - factor = float(oo[1][1:]) - if carrier == "AC": # lines do not have carrier - n.lines[attr] *= factor - else: - comps = {"Generator", "Link", "StorageUnit", "Store"} - for c in n.iterate_components(comps): - sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel, attr] *= factor + maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"]) emission_prices = snakemake.params.costs["emission_prices"] - Ept_config = emission_prices.get("co2_monthly_prices", False) - Ept_wildcard = "Ept" in opts - Ep_config = emission_prices.get("enable", False) - Ep_wildcard, co2_wildcard = find_opt(opts, "Ep") - - if Ept_wildcard or Ept_config: + if emission_prices["co2_monthly_prices"]: logger.info( "Setting time dependent emission prices according spot market price" ) add_dynamic_emission_prices(n) - elif Ep_wildcard or Ep_config: - if co2_wildcard is not None: - logger.info("Setting CO2 prices according to wildcard value.") - add_emission_prices(n, dict(co2=co2_wildcard)) - else: - logger.info("Setting CO2 prices according to config value.") - add_emission_prices( - n, dict(co2=snakemake.params.costs["emission_prices"]["co2"]) - ) + elif emission_prices["enable"]: + add_emission_prices( + n, dict(co2=snakemake.params.costs["emission_prices"]["co2"]) + ) ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:] set_transmission_limit(n, ll_type, factor, costs, Nyears) @@ -384,11 +370,8 @@ if __name__ == "__main__": p_nom_max_ext=snakemake.params.links.get("max_extension", np.inf), ) - autarky_config = snakemake.params.autarky - if "ATK" in opts or autarky_config.get("enable", False): - only_crossborder = False - if "ATKc" in opts or autarky_config.get("by_country", False): - only_crossborder = True + if snakemake.params.autarky["enable"]: + only_crossborder = snakemake.params.autarky["by_country"] enforce_autarky(n, only_crossborder=only_crossborder) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) diff --git a/scripts/prepare_perfect_foresight.py b/scripts/prepare_perfect_foresight.py index 00f23fab..fea0cef4 100644 --- a/scripts/prepare_perfect_foresight.py +++ b/scripts/prepare_perfect_foresight.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -12,7 +12,11 @@ import re import numpy as np import pandas as pd import pypsa -from _helpers import update_config_with_sector_opts +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from add_existing_baseyear import add_build_year_to_new_assets from pypsa.descriptors import expand_series from pypsa.io import import_components_from_dataframe @@ -56,7 +60,9 @@ def get_investment_weighting(time_weighting, r=0.01): end = time_weighting.cumsum() start = time_weighting.cumsum().shift().fillna(0) return pd.concat([start, end], axis=1).apply( - lambda x: sum(get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))), + lambda x: sum( + get_social_discount(t, r) for t in range(int(x.iloc[0]), int(x.iloc[1])) + ), axis=1, ) @@ -162,15 +168,17 @@ def concat_networks(years): add_build_year_to_new_assets(network, year) # static ---------------------------------- - # (1) add buses and carriers - for component in network.iterate_components(["Bus", "Carrier"]): - df_year = component.df - # get missing assets - missing = get_missing(df_year, n, component.list_name) - import_components_from_dataframe(n, missing, component.name) - # (2) add generators, links, stores and loads for component in network.iterate_components( - ["Generator", "Link", "Store", "Load", "Line", "StorageUnit"] + [ + "Bus", + "Carrier", + "Generator", + "Link", + "Store", + "Load", + "Line", + "StorageUnit", + ] ): df_year = component.df.copy() missing = get_missing(df_year, n, component.list_name) @@ -186,7 +194,7 @@ def concat_networks(years): pnl = getattr(n, component.list_name + "_t") for k in iterkeys(component.pnl): pnl_year = component.pnl[k].copy().reindex(snapshots, level=1) - if pnl_year.empty and ~(component.name == "Load" and k == "p_set"): + if pnl_year.empty and (not (component.name == "Load" and k == "p_set")): continue if component.name == "Load": static_load = network.loads.loc[network.loads.p_set != 0] @@ -199,8 +207,13 @@ def concat_networks(years): pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year else: - # this is to avoid adding multiple times assets with - # infinite lifetime as ror + # For components that aren't new, we just extend + # time-varying data from the previous investment + # period. + if i > 0: + pnl[k].loc[(year,)] = pnl[k].loc[(years[i - 1],)].values + + # Now, add time-varying data for new components. cols = pnl_year.columns.difference(pnl[k].columns) pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1) @@ -214,7 +227,7 @@ def concat_networks(years): # set investment periods n.investment_periods = n.snapshots.levels[0] # weighting of the investment period -> assuming last period same weighting as the period before - time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill") + time_w = n.investment_periods.to_series().diff().shift(-1).ffill() n.investment_period_weightings["years"] = time_w # set objective weightings objective_w = get_investment_weighting( @@ -295,17 +308,14 @@ def set_all_phase_outs(n): n.mremove("Link", remove_i) -def set_carbon_constraints(n, opts): +def set_carbon_constraints(n): """ Add global constraints for carbon emissions. """ - budget = None - for o in opts: - # other budgets - m = re.match(r"^\d+p\d$", o, re.IGNORECASE) - if m is not None: - budget = snakemake.config["co2_budget"][m.group(0)] * 1e9 - if budget != None: + budget = snakemake.config["co2_budget"] + if budget and isinstance(budget, float): + budget *= 1e9 # convert to t CO2 + logger.info(f"add carbon budget of {budget}") n.add( "GlobalConstraint", @@ -332,7 +342,7 @@ def set_carbon_constraints(n, opts): ) # set minimum CO2 emission constraint to avoid too fast reduction - if "co2min" in opts: + if "co2min" in snakemake.wildcards.sector_opts.split("-"): emissions_1990 = 4.53693 emissions_2019 = 3.344096 target_2030 = 0.45 * emissions_1990 @@ -428,7 +438,7 @@ def apply_time_segmentation_perfect( """ try: import tsam.timeseriesaggregation as tsam - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" ) @@ -478,21 +488,6 @@ def apply_time_segmentation_perfect( return n -def set_temporal_aggregation_SEG(n, opts, solver_name): - """ - Aggregate network temporally with tsam. - """ - for o in opts: - # segments with package tsam - m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) - if m is not None: - segments = int(m[1]) - logger.info(f"Use temporal segmentation with {segments} segments") - n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name) - break - return n - - if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -503,17 +498,15 @@ if __name__ == "__main__": opts="", clusters="37", ll="v1.5", - sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1", + sector_opts="1p7-4380H-T-H-B-I-A-dist1", ) + configure_logging(snakemake) + set_scenario_config(snakemake) - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) # parameters ----------------------------------------------------------- years = snakemake.config["scenario"]["planning_horizons"] - opts = snakemake.wildcards.sector_opts.split("-") - social_discountrate = snakemake.config["costs"]["social_discountrate"] - for o in opts: - if "sdr" in o: - social_discountrate = float(o.replace("sdr", "")) / 100 + social_discountrate = snakemake.params.costs["social_discountrate"] logger.info( f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%" @@ -523,9 +516,10 @@ if __name__ == "__main__": n = concat_networks(years) # temporal aggregate - opts = snakemake.wildcards.sector_opts.split("-") solver_name = snakemake.config["solving"]["solver"]["name"] - n = set_temporal_aggregation_SEG(n, opts, solver_name) + segments = snakemake.params.time_resolution + if isinstance(segments, (int, float)): + n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name) # adjust global constraints lv limit if the same for all years n = adjust_lvlimit(n) @@ -541,8 +535,10 @@ if __name__ == "__main__": add_H2_boilers(n) # set carbon constraints - opts = snakemake.wildcards.sector_opts.split("-") - n = set_carbon_constraints(n, opts) + n = set_carbon_constraints(n) + + # update meta + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) # export network n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 37d6f0d2..9f53e317 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,33 +9,35 @@ technologies for the buildings, transport and industry sectors. import logging import os -import re from itertools import product +from types import SimpleNamespace import networkx as nx import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts -from add_electricity import calculate_annuity, sanitize_carriers -from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) +from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locations +from build_energy_totals import ( + build_co2_totals, + build_eea_co2, + build_eurostat, + build_eurostat_co2, +) from networkx.algorithms import complement from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation +from prepare_network import maybe_adjust_costs_and_potentials from pypsa.geo import haversine_pts from pypsa.io import import_components_from_dataframe from scipy.stats import beta -logger = logging.getLogger(__name__) - -from types import SimpleNamespace - spatial = SimpleNamespace() - -from packaging.version import Version, parse - -pd_version = parse(pd.__version__) -agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {} +logger = logging.getLogger(__name__) def define_spatial(nodes, options): @@ -95,7 +97,7 @@ def define_spatial(nodes, options): spatial.gas.industry = nodes + " gas for industry" spatial.gas.industry_cc = nodes + " gas for industry CC" spatial.gas.biogas_to_gas = nodes + " biogas to gas" - spatial.gas.biogas_to_gas_cc = nodes + "biogas to gas CC" + spatial.gas.biogas_to_gas_cc = nodes + " biogas to gas CC" else: spatial.gas.nodes = ["EU gas"] spatial.gas.locations = ["EU"] @@ -179,6 +181,13 @@ def define_spatial(nodes, options): spatial.coal.nodes = ["EU coal"] spatial.coal.locations = ["EU"] + if options["regional_coal_demand"]: + spatial.coal.demand_locations = nodes + spatial.coal.industry = nodes + " coal for industry" + else: + spatial.coal.demand_locations = ["EU"] + spatial.coal.industry = ["EU coal for industry"] + # lignite spatial.lignite = SimpleNamespace() spatial.lignite.nodes = ["EU lignite"] @@ -187,18 +196,16 @@ def define_spatial(nodes, options): return spatial -from types import SimpleNamespace - spatial = SimpleNamespace() -def emission_sectors_from_opts(opts): +def determine_emission_sectors(options): sectors = ["electricity"] - if "T" in opts: + if options["transport"]: sectors += ["rail non-elec", "road non-elec"] - if "H" in opts: + if options["heating"]: sectors += ["residential non-elec", "services non-elec"] - if "I" in opts: + if options["industry"]: sectors += [ "industrial non-elec", "industrial processes", @@ -207,7 +214,7 @@ def emission_sectors_from_opts(opts): "domestic navigation", "international navigation", ] - if "A" in opts: + if options["agriculture"]: sectors += ["agriculture"] return sectors @@ -217,29 +224,50 @@ def get(item, investment_year=None): """ Check whether item depends on investment year. """ - return item[investment_year] if isinstance(item, dict) else item + if not isinstance(item, dict): + return item + elif investment_year in item.keys(): + return item[investment_year] + else: + logger.warning( + f"Investment key {investment_year} not found in dictionary {item}." + ) + keys = sorted(item.keys()) + if investment_year < keys[0]: + logger.warning(f"Lower than minimum key. Taking minimum key {keys[0]}") + return item[keys[0]] + elif investment_year > keys[-1]: + logger.warning(f"Higher than maximum key. Taking maximum key {keys[0]}") + return item[keys[-1]] + else: + logger.warning( + "Interpolate linearly between the next lower and next higher year." + ) + lower_key = max(k for k in keys if k < investment_year) + higher_key = min(k for k in keys if k > investment_year) + lower = item[lower_key] + higher = item[higher_key] + return lower + (higher - lower) * (investment_year - lower_key) / ( + higher_key - lower_key + ) def co2_emissions_year( - countries, input_eurostat, opts, emissions_scope, report_year, input_co2, year + countries, input_eurostat, options, emissions_scope, input_co2, year ): """ Calculate CO2 emissions in one specific year (e.g. 1990 or 2018). """ eea_co2 = build_eea_co2(input_co2, year, emissions_scope) - # TODO: read Eurostat data from year > 2014 + eurostat = build_eurostat(input_eurostat, countries) + # this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK - if year > 2014: - eurostat_co2 = build_eurostat_co2( - input_eurostat, countries, report_year, year=2014 - ) - else: - eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, year) + eurostat_co2 = build_eurostat_co2(eurostat, year) co2_totals = build_co2_totals(countries, eea_co2, eurostat_co2) - sectors = emission_sectors_from_opts(opts) + sectors = determine_emission_sectors(options) co2_emissions = co2_totals.loc[countries, sectors].sum().sum() @@ -250,11 +278,10 @@ def co2_emissions_year( # TODO: move to own rule with sector-opts wildcard? -def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): +def build_carbon_budget(o, input_eurostat, fn, emissions_scope, input_co2, options): """ Distribute carbon budget following beta or exponential transition path. """ - # opts? if "be" in o: # beta decay @@ -270,9 +297,8 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): e_1990 = co2_emissions_year( countries, input_eurostat, - opts, + options, emissions_scope, - report_year, input_co2, year=1990, ) @@ -281,9 +307,8 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): e_0 = co2_emissions_year( countries, input_eurostat, - opts, + options, emissions_scope, - report_year, input_co2, year=2018, ) @@ -425,8 +450,15 @@ def update_wind_solar_costs(n, costs): # code adapted from pypsa-eur/scripts/add_electricity.py for connection in ["dc", "ac"]: tech = "offwind-" + connection - profile = snakemake.input["profile_offwind_" + connection] + if tech not in n.generators.carrier.values: + continue + profile = snakemake.input["profile_offwind-" + connection] with xr.open_dataset(profile) as ds: + + # if-statement for compatibility with old profiles + if "year" in ds.indexes: + ds = ds.sel(year=ds.year.min(), drop=True) + underwater_fraction = ds["underwater_fraction"].to_pandas() connection_cost = ( snakemake.params.length_factor @@ -464,9 +496,9 @@ def update_wind_solar_costs(n, costs): ) ) - n.generators.loc[ - n.generators.carrier == tech, "capital_cost" - ] = capital_cost.rename(index=lambda node: node + " " + tech) + n.generators.loc[n.generators.carrier == tech, "capital_cost"] = ( + capital_cost.rename(index=lambda node: node + " " + tech) + ) def add_carrier_buses(n, carrier, nodes=None): @@ -552,6 +584,17 @@ def patch_electricity_network(n): n.loads_t.p_set.rename(lambda x: x.strip(), axis=1, inplace=True) +def add_eu_bus(n, x=-5.5, y=46): + """ + Add EU bus to the network. + + This cosmetic bus serves as a reference point for the location of + the EU buses in the plots and summaries. + """ + n.add("Bus", "EU", location="EU", x=x, y=y, carrier="none") + n.add("Carrier", "none") + + def add_co2_tracking(n, costs, options): # minus sign because opposite to how fossil fuels used: # CH4 burning puts CH4 down, atmosphere up @@ -716,38 +759,38 @@ def add_dac(n, costs): heat_buses = n.buses.index[n.buses.carrier.isin(heat_carriers)] locations = n.buses.location[heat_buses] - efficiency2 = -( + electricity_input = ( costs.at["direct air capture", "electricity-input"] + costs.at["direct air capture", "compression-electricity-input"] - ) - efficiency3 = -( + ) # MWh_el / tCO2 + heat_input = ( costs.at["direct air capture", "heat-input"] - costs.at["direct air capture", "compression-heat-output"] - ) + ) # MWh_th / tCO2 n.madd( "Link", heat_buses.str.replace(" heat", " DAC"), - bus0="co2 atmosphere", - bus1=spatial.co2.df.loc[locations, "nodes"].values, - bus2=locations.values, - bus3=heat_buses, + bus0=locations.values, + bus1=heat_buses, + bus2="co2 atmosphere", + bus3=spatial.co2.df.loc[locations, "nodes"].values, carrier="DAC", - capital_cost=costs.at["direct air capture", "fixed"], - efficiency=1.0, - efficiency2=efficiency2, - efficiency3=efficiency3, + capital_cost=costs.at["direct air capture", "fixed"] / electricity_input, + efficiency=-heat_input / electricity_input, + efficiency2=-1 / electricity_input, + efficiency3=1 / electricity_input, p_nom_extendable=True, lifetime=costs.at["direct air capture", "lifetime"], ) -def add_co2limit(n, nyears=1.0, limit=0.0): +def add_co2limit(n, options, nyears=1.0, limit=0.0): logger.info(f"Adding CO2 budget limit as per unit of 1990 levels of {limit}") countries = snakemake.params.countries - sectors = emission_sectors_from_opts(opts) + sectors = determine_emission_sectors(options) # convert Mt to tCO2 co2_totals = 1e6 * pd.read_csv(snakemake.input.co2_totals_name, index_col=0) @@ -761,6 +804,7 @@ def add_co2limit(n, nyears=1.0, limit=0.0): "CO2Limit", carrier_attribute="co2_emissions", sense="<=", + type="co2_atmosphere", constant=co2_limit, ) @@ -771,6 +815,10 @@ def average_every_nhours(n, offset): m = n.copy(with_time=False) snapshot_weightings = n.snapshot_weightings.resample(offset).sum() + sns = snapshot_weightings.index + if snakemake.params.drop_leap_day: + sns = sns[~((sns.month == 2) & (sns.day == 29))] + snapshot_weightings = snapshot_weightings.loc[sns] m.set_snapshots(snapshot_weightings.index) m.snapshot_weightings = snapshot_weightings @@ -913,47 +961,6 @@ def add_ammonia(n, costs): ) -def add_wave(n, wave_cost_factor): - # TODO: handle in Snakefile - wave_fn = "data/WindWaveWEC_GLTB.xlsx" - - # in kW - capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600}) - - # in EUR/MW - annuity_factor = calculate_annuity(25, 0.07) + 0.03 - costs = ( - 1e6 - * wave_cost_factor - * annuity_factor - * pd.Series({"Attenuator": 2.5, "F2HB": 2, "MultiPA": 1.5}) - ) - - sheets = pd.read_excel( - wave_fn, - sheet_name=["FirthForth", "Hebrides"], - usecols=["Attenuator", "F2HB", "MultiPA"], - index_col=0, - skiprows=[0], - parse_dates=True, - ) - - wave = pd.concat( - [sheets[l].divide(capacity, axis=1) for l in locations], keys=locations, axis=1 - ) - - for wave_type in costs.index: - n.add( - "Generator", - "Hebrides " + wave_type, - bus="GB4 0", # TODO this location is hardcoded - p_nom_extendable=True, - carrier="wave", - capital_cost=costs[wave_type], - p_max_pu=wave["Hebrides", wave_type], - ) - - def insert_electricity_distribution_grid(n, costs): # TODO pop_layout? # TODO options? @@ -1051,6 +1058,7 @@ def insert_electricity_distribution_grid(n, costs): "Store", nodes + " home battery", bus=nodes + " home battery", + location=nodes, e_cyclic=True, e_nom_extendable=True, carrier="home battery", @@ -1499,7 +1507,6 @@ def add_land_transport(n, costs): # TODO options? logger.info("Add land transport") - nhours = n.snapshot_weightings.generators.sum() transport = pd.read_csv( snakemake.input.transport_demand, index_col=0, parse_dates=True @@ -1669,40 +1676,25 @@ def add_land_transport(n, costs): def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = ( - xr.open_dataarray(snakemake.input.heat_demand_total) - .to_pandas() - .reindex(index=n.snapshots, method="ffill") + heat_demand_shape = ( + xr.open_dataset(snakemake.input.hourly_heat_demand_total) + .to_dataframe() + .unstack(level=1) ) - intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) - sectors = ["residential", "services"] uses = ["water", "space"] heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - weekday = list(intraday_profiles[f"{sector} {use} weekday"]) - weekend = list(intraday_profiles[f"{sector} {use} weekend"]) - weekly_profile = weekday * 5 + weekend * 2 - intraday_year_profile = generate_periodic_profiles( - daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile, - ) + name = f"{sector} {use}" - if use == "space": - heat_demand_shape = daily_space_heat_demand * intraday_year_profile - else: - heat_demand_shape = intraday_year_profile - - heat_demand[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 - electric_heat_supply[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + electric_heat_supply[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) @@ -1725,7 +1717,11 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() + overdim_factor = options["overdimension_individual_heating"] + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) @@ -1765,12 +1761,17 @@ def add_heat(n, costs): for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" + if name == "urban central": + nodes = dist_fraction.index[dist_fraction > 0] + else: + nodes = pop_layout.index + n.add("Carrier", name + " heat") n.madd( "Bus", - nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat", + location=nodes, carrier=name + " heat", unit="MWh_th", ) @@ -1778,9 +1779,9 @@ def add_heat(n, costs): if name == "urban central" and options.get("central_heat_vent"): n.madd( "Generator", - nodes[name] + f" {name} heat vent", - bus=nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat vent", + bus=nodes + f" {name} heat", + location=nodes, carrier=name + " heat vent", p_nom_extendable=True, p_max_pu=0, @@ -1793,11 +1794,11 @@ def add_heat(n, costs): for sector in sectors: # heat demand weighting if "rural" in name: - factor = 1 - urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes] elif "urban central" in name: - factor = dist_fraction[nodes[name]] + factor = dist_fraction[nodes] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] + factor = urban_fraction[nodes] - dist_fraction[nodes] else: raise NotImplementedError( f" {name} not in " f"heat systems: {heat_systems}" @@ -1808,7 +1809,7 @@ def add_heat(n, costs): heat_demand[[sector + " water", sector + " space"]] .T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply(factor) ) @@ -1816,7 +1817,7 @@ def add_heat(n, costs): heat_load = ( heat_demand.T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply( factor * (1 + options["district_heating"]["district_heating_loss"]) ) @@ -1824,54 +1825,56 @@ def add_heat(n, costs): n.madd( "Load", - nodes[name], + nodes, suffix=f" {name} heat", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " heat", p_set=heat_load, ) ## Add heat pumps - heat_pump_type = "air" if "urban" in name else "ground" + heat_pump_types = ["air"] if "urban" in name else ["ground", "air"] - costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" - efficiency = ( - cop[heat_pump_type][nodes[name]] - if options["time_dep_hp_cop"] - else costs.at[costs_name, "efficiency"] - ) + for heat_pump_type in heat_pump_types: + costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" + efficiency = ( + cop[heat_pump_type][nodes] + if options["time_dep_hp_cop"] + else costs.at[costs_name, "efficiency"] + ) - n.madd( - "Link", - nodes[name], - suffix=f" {name} {heat_pump_type} heat pump", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", - carrier=f"{name} {heat_pump_type} heat pump", - efficiency=efficiency, - capital_cost=costs.at[costs_name, "efficiency"] - * costs.at[costs_name, "fixed"], - p_nom_extendable=True, - lifetime=costs.at[costs_name, "lifetime"], - ) + n.madd( + "Link", + nodes, + suffix=f" {name} {heat_pump_type} heat pump", + bus0=nodes, + bus1=nodes + f" {name} heat", + carrier=f"{name} {heat_pump_type} heat pump", + efficiency=efficiency, + capital_cost=costs.at[costs_name, "efficiency"] + * costs.at[costs_name, "fixed"] + * overdim_factor, + p_nom_extendable=True, + lifetime=costs.at[costs_name, "lifetime"], + ) if options["tes"]: n.add("Carrier", name + " water tanks") n.madd( "Bus", - nodes[name] + f" {name} water tanks", - location=nodes[name], + nodes + f" {name} water tanks", + location=nodes, carrier=name + " water tanks", unit="MWh_th", ) n.madd( "Link", - nodes[name] + f" {name} water tanks charger", - bus0=nodes[name] + f" {name} heat", - bus1=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks charger", + bus0=nodes + f" {name} heat", + bus1=nodes + f" {name} water tanks", efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", p_nom_extendable=True, @@ -1879,29 +1882,20 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} water tanks discharger", - bus0=nodes[name] + f" {name} water tanks", - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} water tanks discharger", + bus0=nodes + f" {name} water tanks", + bus1=nodes + f" {name} heat", carrier=name + " water tanks discharger", efficiency=costs.at["water tank discharger", "efficiency"], p_nom_extendable=True, ) - if isinstance(options["tes_tau"], dict): - tes_time_constant_days = options["tes_tau"][name_type] - else: - logger.warning( - "Deprecated: a future version will require you to specify 'tes_tau' ", - "for 'decentral' and 'central' separately.", - ) - tes_time_constant_days = ( - options["tes_tau"] if name_type == "decentral" else 180.0 - ) + tes_time_constant_days = options["tes_tau"][name_type] n.madd( "Store", - nodes[name] + f" {name} water tanks", - bus=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks", + bus=nodes + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", @@ -1915,12 +1909,14 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} resistive heater", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} resistive heater", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], - capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + capital_cost=costs.at[key, "efficiency"] + * costs.at[key, "fixed"] + * overdim_factor, p_nom_extendable=True, lifetime=costs.at[key, "lifetime"], ) @@ -1930,15 +1926,17 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} gas boiler", + nodes + f" {name} gas boiler", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], efficiency2=costs.at["gas", "CO2 intensity"], - capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + capital_cost=costs.at[key, "efficiency"] + * costs.at[key, "fixed"] + * overdim_factor, lifetime=costs.at[key, "lifetime"], ) @@ -1947,13 +1945,14 @@ def add_heat(n, costs): n.madd( "Generator", - nodes[name], + nodes, suffix=f" {name} solar thermal collector", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, - capital_cost=costs.at[name_type + " solar thermal", "fixed"], - p_max_pu=solar_thermal[nodes[name]], + capital_cost=costs.at[name_type + " solar thermal", "fixed"] + * overdim_factor, + p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -1961,10 +1960,10 @@ def add_heat(n, costs): # add gas CHP; biomass CHP is added in biomass section n.madd( "Link", - nodes[name] + " urban central gas CHP", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, @@ -1980,12 +1979,12 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + " urban central gas CHP CC", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP CC", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", - bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, + bus4=spatial.co2.df.loc[nodes, "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, capital_cost=costs.at["central gas CHP", "fixed"] @@ -2017,11 +2016,11 @@ def add_heat(n, costs): if options["chp"] and options["micro_chp"] and name != "urban central": n.madd( "Link", - nodes[name] + f" {name} micro gas CHP", + nodes + f" {name} micro gas CHP", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", efficiency=costs.at["micro CHP", "efficiency"], @@ -2034,13 +2033,6 @@ def add_heat(n, costs): if options["retrofitting"]["retro_endogen"]: logger.info("Add retrofitting endogenously") - # resample heat demand temporal 'heat_demand_r' depending on in config - # specified temporal resolution, to not overestimate retrofitting - hours = list(filter(re.compile(r"^\d+h$", re.IGNORECASE).search, opts)) - if len(hours) == 0: - hours = [n.snapshots[1] - n.snapshots[0]] - heat_demand_r = heat_demand.resample(hours[0]).mean() - # retrofitting data 'retro_data' with 'costs' [EUR/m^2] and heat # demand 'dE' [per unit of original heat demand] for each country and # different retrofitting strengths [additional insulation thickness in m] @@ -2058,12 +2050,12 @@ def add_heat(n, costs): # share of space heat demand 'w_space' of total heat demand w_space = {} for sector in sectors: - w_space[sector] = heat_demand_r[sector + " space"] / ( - heat_demand_r[sector + " space"] + heat_demand_r[sector + " water"] + w_space[sector] = heat_demand[sector + " space"] / ( + heat_demand[sector + " space"] + heat_demand[sector + " water"] ) w_space["tot"] = ( - heat_demand_r["services space"] + heat_demand_r["residential space"] - ) / heat_demand_r.T.groupby(level=[1]).sum().T + heat_demand["services space"] + heat_demand["residential space"] + ) / heat_demand.T.groupby(level=[1]).sum().T for name in n.loads[ n.loads.carrier.isin([x + " heat" for x in heat_systems]) @@ -2093,7 +2085,7 @@ def add_heat(n, costs): pop_layout.loc[node].fraction * floor_area.loc[ct, "value"] * 10**6 ).loc[sec] * f # total heat demand at node [MWh] - demand = n.loads_t.p_set[name].resample(hours[0]).mean() + demand = n.loads_t.p_set[name] # space heat demand at node [MWh] space_heat_demand = demand * w_space[sec][node] @@ -2152,50 +2144,6 @@ def add_heat(n, costs): ) -def create_nodes_for_heat_sector(): - # TODO pop_layout - - # rural are areas with low heating density and individual heating - # urban are areas with high heating density - # urban can be split into district heating (central) and individual heating (decentral) - - ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() - # distribution of urban population within a country - pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) - - sectors = ["residential", "services"] - - nodes = {} - urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) - - for sector in sectors: - nodes[sector + " rural"] = pop_layout.index - nodes[sector + " urban decentral"] = pop_layout.index - - district_heat_share = pop_weighted_energy_totals["district heat share"] - - # maximum potential of urban demand covered by district heating - central_fraction = options["district_heating"]["potential"] - # district heating share at each node - dist_fraction_node = ( - district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] - ) - nodes["urban central"] = dist_fraction_node.index - # if district heating share larger than urban fraction -> set urban - # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) - # difference of max potential and today's share of district heating - diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(options["district_heating"]["progress"], investment_year) - dist_fraction_node += diff * progress - logger.info( - f"Increase district heating share by a progress factor of {progress:.2%} " - f"resulting in new average share of {dist_fraction_node.mean():.2%}" - ) - - return nodes, dist_fraction_node, urban_fraction - - def add_biomass(n, costs): logger.info("Add biomass") @@ -2413,7 +2361,7 @@ def add_biomass(n, costs): if options["biomass_boiler"]: # TODO: Add surcharge for pellets - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", "services rural", @@ -2422,14 +2370,15 @@ def add_biomass(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} biomass boiler", + nodes + f" {name} biomass boiler", p_nom_extendable=True, - bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, - bus1=nodes_heat[name] + f" {name} heat", + bus0=spatial.biomass.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] - * costs.at["biomass boiler", "fixed"], + * costs.at["biomass boiler", "fixed"] + * options["overdimension_individual_heating"], marginal_cost=costs.at["biomass boiler", "pelletizing cost"], lifetime=costs.at["biomass boiler", "lifetime"], ) @@ -2868,7 +2817,7 @@ def add_industry(n, costs): ) if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", @@ -2878,16 +2827,17 @@ def add_industry(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} oil boiler", + nodes + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, - bus1=nodes_heat[name] + f" {name} heat", + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] - * costs.at["decentral oil boiler", "fixed"], + * costs.at["decentral oil boiler", "fixed"] + * options["overdimension_individual_heating"], lifetime=costs.at["decentral oil boiler", "lifetime"], ) @@ -3013,9 +2963,11 @@ def add_industry(n, costs): nodes, suffix=" low-temperature heat for industry", bus=[ - node + " urban central heat" - if node + " urban central heat" in n.buses.index - else node + " services urban decentral heat" + ( + node + " urban central heat" + if node + " urban central heat" in n.buses.index + else node + " services urban decentral heat" + ) for node in nodes ], carrier="low-temperature heat for industry", @@ -3129,24 +3081,48 @@ def add_industry(n, costs): mwh_coal_per_mwh_coke = 1.366 # from eurostat energy balance p_set = ( - industrial_demand["coal"].sum() - + mwh_coal_per_mwh_coke * industrial_demand["coke"].sum() + industrial_demand["coal"] + + mwh_coal_per_mwh_coke * industrial_demand["coke"] ) / nhours + p_set.rename(lambda x: x + " coal for industry", inplace=True) + + if not options["regional_coal_demand"]: + p_set = p_set.sum() + + n.madd( + "Bus", + spatial.coal.industry, + location=spatial.coal.demand_locations, + carrier="coal for industry", + unit="MWh_LHV", + ) + n.madd( "Load", - spatial.coal.nodes, - suffix=" for industry", - bus=spatial.coal.nodes, + spatial.coal.industry, + bus=spatial.coal.industry, carrier="coal for industry", p_set=p_set, ) + n.madd( + "Link", + spatial.coal.industry, + bus0=spatial.coal.nodes, + bus1=spatial.coal.industry, + bus2="co2 atmosphere", + carrier="coal for industry", + p_nom_extendable=True, + efficiency2=costs.at["coal", "CO2 intensity"], + ) + def add_waste_heat(n): # TODO options? logger.info("Add possibility to use industrial waste heat in district heating") + cf_industry = snakemake.params.industry # AC buses with district heating urban_central = n.buses.index[n.buses.carrier == "urban central heat"] @@ -3344,47 +3320,6 @@ def remove_h2_network(n): n.stores.drop("EU H2 Store", inplace=True) -def maybe_adjust_costs_and_potentials(n, opts): - for o in opts: - flags = ["+e", "+p", "+m"] - if all(flag not in o for flag in flags): - continue - oo = o.split("+") - carrier_list = np.hstack( - ( - n.generators.carrier.unique(), - n.links.carrier.unique(), - n.stores.carrier.unique(), - n.storage_units.carrier.unique(), - ) - ) - suptechs = map(lambda c: c.split("-", 2)[0], carrier_list) - if oo[0].startswith(tuple(suptechs)): - carrier = oo[0] - attr_lookup = {"p": "p_nom_max", "e": "e_nom_max", "c": "capital_cost"} - attr = attr_lookup[oo[1][0]] - factor = float(oo[1][1:]) - # beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan - if carrier == "AC": # lines do not have carrier - n.lines[attr] *= factor - else: - if attr == "p_nom_max": - comps = {"Generator", "Link", "StorageUnit"} - elif attr == "e_nom_max": - comps = {"Store"} - else: - comps = {"Generator", "Link", "StorageUnit", "Store"} - for c in n.iterate_components(comps): - if carrier == "solar": - sel = c.df.carrier.str.contains( - carrier - ) & ~c.df.carrier.str.contains("solar rooftop") - else: - sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel, attr] *= factor - logger.info(f"changing {attr} for {carrier} by factor {factor}") - - def limit_individual_line_extension(n, maxext): logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW") n.lines["s_nom_max"] = n.lines["s_nom"] + maxext @@ -3467,7 +3402,7 @@ def cluster_heat_buses(n): # cluster heat nodes # static dataframe agg = define_clustering(df.columns, aggregate_dict) - df = df.groupby(level=0).agg(agg, **agg_group_kwargs) + df = df.groupby(level=0).agg(agg, numeric_only=False) # time-varying data pnl = c.pnl agg = define_clustering(pd.Index(pnl.keys()), aggregate_dict) @@ -3476,7 +3411,7 @@ def cluster_heat_buses(n): def renamer(s): return s.replace("residential ", "").replace("services ", "") - pnl[k] = pnl[k].T.groupby(renamer).agg(agg[k], **agg_group_kwargs).T + pnl[k] = pnl[k].T.groupby(renamer).agg(agg[k], numeric_only=False).T # remove unclustered assets of service/residential to_drop = c.df.index.difference(df.index) @@ -3502,7 +3437,7 @@ def apply_time_segmentation( """ try: import tsam.timeseriesaggregation as tsam - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" ) @@ -3554,31 +3489,31 @@ def apply_time_segmentation( return n -def set_temporal_aggregation(n, opts, solver_name): +def set_temporal_aggregation(n, resolution, solver_name): """ Aggregate network temporally. """ - for o in opts: - # temporal averaging - m = re.match(r"^\d+h$", o, re.IGNORECASE) - if m is not None: - n = average_every_nhours(n, m.group(0)) - break - # representative snapshots - m = re.match(r"(^\d+)sn$", o, re.IGNORECASE) - if m is not None: - sn = int(m[1]) - logger.info(f"Use every {sn} snapshot as representative") - n.set_snapshots(n.snapshots[::sn]) - n.snapshot_weightings *= sn - break - # segments with package tsam - m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) - if m is not None: - segments = int(m[1]) - logger.info(f"Use temporal segmentation with {segments} segments") - n = apply_time_segmentation(n, segments, solver_name=solver_name) - break + if not resolution: + return n + + # representative snapshots + if "sn" in resolution.lower(): + sn = int(resolution[:-2]) + logger.info("Use every %s snapshot as representative", sn) + n.set_snapshots(n.snapshots[::sn]) + n.snapshot_weightings *= sn + + # segments with package tsam + elif "seg" in resolution.lower(): + segments = int(resolution[:-3]) + logger.info("Use temporal segmentation with %s segments", segments) + n = apply_time_segmentation(n, segments, solver_name=solver_name) + + # temporal averaging + elif "h" in resolution.lower(): + logger.info("Aggregate to frequency %s", resolution) + n = average_every_nhours(n, resolution) + return n @@ -3604,10 +3539,9 @@ def lossy_bidirectional_links(n, carrier, efficiencies={}): ) n.links.loc[carrier_i, "p_min_pu"] = 0 - n.links.loc[ - carrier_i, "efficiency" - ] = efficiency_static * efficiency_per_1000km ** ( - n.links.loc[carrier_i, "length"] / 1e3 + n.links.loc[carrier_i, "efficiency"] = ( + efficiency_static + * efficiency_per_1000km ** (n.links.loc[carrier_i, "length"] / 1e3) ) rev_links = ( n.links.loc[carrier_i].copy().rename({"bus0": "bus1", "bus1": "bus0"}, axis=1) @@ -3642,20 +3576,18 @@ if __name__ == "__main__": configfiles="test/config.overnight.yaml", simpl="", opts="", - clusters="5", - ll="v1.5", - sector_opts="CO2L0-24H-T-H-B-I-A-solar+p3-dist1", + clusters="37", + ll="v1.0", + sector_opts="CO2L0-24H-T-H-B-I-A-dist1", planning_horizons="2030", ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) - - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + configure_logging(snakemake) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) options = snakemake.params.sector - opts = snakemake.wildcards.sector_opts.split("-") - investment_year = int(snakemake.wildcards.planning_horizons[-4:]) n = pypsa.Network(snakemake.input.network) @@ -3673,6 +3605,10 @@ if __name__ == "__main__": pop_weighted_energy_totals = ( pd.read_csv(snakemake.input.pop_weighted_energy_totals, index_col=0) * nyears ) + pop_weighted_heat_totals = ( + pd.read_csv(snakemake.input.pop_weighted_heat_totals, index_col=0) * nyears + ) + pop_weighted_energy_totals.update(pop_weighted_heat_totals) patch_electricity_network(n) @@ -3685,68 +3621,42 @@ if __name__ == "__main__": for carrier in conventional: add_carrier_buses(n, carrier) + add_eu_bus(n) + add_co2_tracking(n, costs, options) add_generation(n, costs) add_storage_and_grids(n, costs) - # TODO merge with opts cost adjustment below - for o in opts: - if o[:4] == "wave": - wave_cost_factor = float(o[4:].replace("p", ".").replace("m", "-")) - logger.info( - f"Including wave generators with cost factor of {wave_cost_factor}" - ) - add_wave(n, wave_cost_factor) - if o[:4] == "dist": - options["electricity_distribution_grid"] = True - options["electricity_distribution_grid_cost_factor"] = float( - o[4:].replace("p", ".").replace("m", "-") - ) - if o == "biomasstransport": - options["biomass_transport"] = True - - if "nodistrict" in opts: - options["district_heating"]["progress"] = 0.0 - - if "nowasteheat" in opts: - logger.info("Disabling waste heat.") - options["use_fischer_tropsch_waste_heat"] = False - options["use_methanolisation_waste_heat"] = False - options["use_haber_bosch_waste_heat"] = False - options["use_methanation_waste_heat"] = False - options["use_fuel_cell_waste_heat"] = False - options["use_electrolysis_waste_heat"] = False - - if "T" in opts: + if options["transport"]: add_land_transport(n, costs) - if "H" in opts: + if options["heating"]: add_heat(n, costs) - if "B" in opts: + if options["biomass"]: add_biomass(n, costs) if options["ammonia"]: add_ammonia(n, costs) - if "I" in opts: + if options["industry"]: add_industry(n, costs) - if "H" in opts: + if options["heating"]: add_waste_heat(n) - if "A" in opts: # requires H and I + if options["agriculture"]: # requires H and I add_agriculture(n, costs) if options["dac"]: add_dac(n, costs) - if "decentral" in opts: + if not options["electricity_transmission_grid"]: decentral(n) - if "noH2network" in opts: + if not options["H2_network"]: remove_h2_network(n) if options["co2network"]: @@ -3756,51 +3666,37 @@ if __name__ == "__main__": add_allam(n, costs) solver_name = snakemake.config["solving"]["solver"]["name"] - n = set_temporal_aggregation(n, opts, solver_name) + resolution = snakemake.params.time_resolution + n = set_temporal_aggregation(n, resolution, solver_name) - limit_type = "config" - limit = get(snakemake.params.co2_budget, investment_year) - for o in opts: - if "cb" not in o: - continue - limit_type = "carbon budget" + co2_budget = snakemake.params.co2_budget + if isinstance(co2_budget, str) and co2_budget.startswith("cb"): fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv" if not os.path.exists(fn): emissions_scope = snakemake.params.emissions_scope - report_year = snakemake.params.eurostat_report_year input_co2 = snakemake.input.co2 build_carbon_budget( - o, + co2_budget, snakemake.input.eurostat, fn, emissions_scope, - report_year, input_co2, + options, ) co2_cap = pd.read_csv(fn, index_col=0).squeeze() limit = co2_cap.loc[investment_year] - break - for o in opts: - if "Co2L" not in o: - continue - limit_type = "wildcard" - limit = o[o.find("Co2L") + 4 :] - limit = float(limit.replace("p", ".").replace("m", "-")) - break - logger.info(f"Add CO2 limit from {limit_type}") - add_co2limit(n, nyears, limit) + else: + limit = get(co2_budget, investment_year) + add_co2limit(n, options, nyears, limit) - for o in opts: - if not o[:10] == "linemaxext": - continue - maxext = float(o[10:]) * 1e3 + maxext = snakemake.params["lines"]["max_extension"] + if maxext is not None: limit_individual_line_extension(n, maxext) - break if options["electricity_distribution_grid"]: insert_electricity_distribution_grid(n, costs) - maybe_adjust_costs_and_potentials(n, opts) + maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"]) if options["gas_distribution_grid"]: insert_gas_distribution_costs(n, costs) @@ -3830,5 +3726,6 @@ if __name__ == "__main__": n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) sanitize_carriers(n, snakemake.config) + sanitize_locations(n) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/retrieve_cost_data.py b/scripts/retrieve_cost_data.py new file mode 100644 index 00000000..e236fbfd --- /dev/null +++ b/scripts/retrieve_cost_data.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Retrieve cost data from ``technology-data``. +""" + +import logging +from pathlib import Path + +from _helpers import configure_logging, progress_retrieve, set_scenario_config + +logger = logging.getLogger(__name__) + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("retrieve_cost_data", year=2030) + rootpath = ".." + else: + rootpath = "." + configure_logging(snakemake) + set_scenario_config(snakemake) + + version = snakemake.params.version + if "/" in version: + baseurl = f"https://raw.githubusercontent.com/{version}/outputs/" + else: + baseurl = f"https://raw.githubusercontent.com/PyPSA/technology-data/{version}/outputs/" + filepath = Path(snakemake.output[0]) + url = baseurl + filepath.name + + print(url) + + to_fn = Path(rootpath) / filepath + + print(to_fn) + + logger.info(f"Downloading technology data from '{url}'.") + disable_progress = snakemake.config["run"].get("disable_progressbar", False) + progress_retrieve(url, to_fn, disable=disable_progress) + + logger.info(f"Technology data available at at {to_fn}") diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index 25894063..996bbeab 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2019-2022 Fabian Hofmann (TUB, FIAS) -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -36,7 +36,12 @@ import logging import tarfile from pathlib import Path -from _helpers import configure_logging, progress_retrieve, validate_checksum +from _helpers import ( + configure_logging, + progress_retrieve, + set_scenario_config, + validate_checksum, +) logger = logging.getLogger(__name__) @@ -49,9 +54,8 @@ if __name__ == "__main__": rootpath = ".." else: rootpath = "." - configure_logging( - snakemake - ) # TODO Make logging compatible with progressbar (see PR #102) + configure_logging(snakemake) + set_scenario_config(snakemake) if snakemake.config["tutorial"]: url = "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz" diff --git a/scripts/retrieve_electricity_demand.py b/scripts/retrieve_electricity_demand.py new file mode 100644 index 00000000..94077fdf --- /dev/null +++ b/scripts/retrieve_electricity_demand.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Retrieve electricity prices from OPSD. +""" + +import logging + +import pandas as pd + +logger = logging.getLogger(__name__) + +from _helpers import configure_logging, set_scenario_config + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("retrieve_electricity_demand") + rootpath = ".." + else: + rootpath = "." + configure_logging(snakemake) + set_scenario_config(snakemake) + + url = "https://data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv" + + df1, df2 = [ + pd.read_csv(url.format(version=version), index_col=0) + for version in snakemake.params.versions + ] + combined = pd.concat([df1, df2[df2.index > df1.index[-1]]]) + + pattern = "_load_actual_entsoe_transparency" + transparency = combined.filter(like=pattern).rename( + columns=lambda x: x.replace(pattern, "") + ) + pattern = "_load_actual_entsoe_power_statistics" + powerstatistics = combined.filter(like=pattern).rename( + columns=lambda x: x.replace(pattern, "") + ) + + res = transparency.fillna(powerstatistics) + + res.to_csv(snakemake.output[0]) diff --git a/scripts/retrieve_eurostat_data.py b/scripts/retrieve_eurostat_data.py new file mode 100644 index 00000000..4b4cea4a --- /dev/null +++ b/scripts/retrieve_eurostat_data.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2024- The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Retrieve and extract eurostat energy balances data. +""" + + +import logging +import zipfile +from pathlib import Path + +from _helpers import configure_logging, progress_retrieve, set_scenario_config + +logger = logging.getLogger(__name__) + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("retrieve_eurostat_data") + rootpath = ".." + else: + rootpath = "." + configure_logging(snakemake) + set_scenario_config(snakemake) + + disable_progress = snakemake.config["run"].get("disable_progressbar", False) + url_eurostat = "https://ec.europa.eu/eurostat/documents/38154/4956218/Balances-December2022.zip/f7cf0d19-5c0f-60ad-4e48-098a5ddd6e48?t=1671184070589" + tarball_fn = Path(f"{rootpath}/data/eurostat/eurostat_2023.zip") + to_fn = Path( + f"{rootpath}/data/eurostat/eurostat-energy_balances-april_2023_edition/" + ) + + logger.info(f"Downloading Eurostat data from '{url_eurostat}'.") + progress_retrieve(url_eurostat, tarball_fn, disable=disable_progress) + + logger.info("Extracting Eurostat data.") + with zipfile.ZipFile(tarball_fn, "r") as zip_ref: + zip_ref.extractall(to_fn) + + logger.info(f"Eurostat data available in '{to_fn}'.") diff --git a/scripts/retrieve_gas_infrastructure_data.py b/scripts/retrieve_gas_infrastructure_data.py index d984b9fe..8d7d0e08 100644 --- a/scripts/retrieve_gas_infrastructure_data.py +++ b/scripts/retrieve_gas_infrastructure_data.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,7 +11,12 @@ import logging import zipfile from pathlib import Path -from _helpers import progress_retrieve, validate_checksum +from _helpers import ( + configure_logging, + progress_retrieve, + set_scenario_config, + validate_checksum, +) logger = logging.getLogger(__name__) @@ -24,6 +29,8 @@ if __name__ == "__main__": rootpath = ".." else: rootpath = "." + configure_logging(snakemake) + set_scenario_config(snakemake) url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip" diff --git a/scripts/retrieve_irena.py b/scripts/retrieve_irena.py deleted file mode 100644 index 7b123475..00000000 --- a/scripts/retrieve_irena.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Thomas Gilon (Climact) -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: MIT -""" -This rule downloads the existing capacities from `IRENASTAT `_ and extracts it in the ``data/existing_capacities`` sub-directory. - -**Relevant Settings** - -.. code:: yaml - - enable: - retrieve_irena: - -.. seealso:: - Documentation of the configuration file ``config.yaml`` at - :ref:`enable_cf` - -**Outputs** - -- ``data/existing_capacities``: existing capacities for offwind, onwind and solar - -""" - -import logging - -import pandas as pd -from _helpers import configure_logging - -logger = logging.getLogger(__name__) - -REGIONS = [ - "Albania", - "Austria", - "Belgium", - "Bosnia and Herzegovina", - "Bulgaria", - "Croatia", - "Czechia", - "Denmark", - "Estonia", - "Finland", - "France", - "Germany", - "Greece", - "Hungary", - "Ireland", - "Italy", - "Latvia", - "Lithuania", - "Luxembourg", - "Montenegro", - # "Netherlands", - "Netherlands (Kingdom of the)", - "North Macedonia", - "Norway", - "Poland", - "Portugal", - "Romania", - "Serbia", - "Slovakia", - "Slovenia", - "Spain", - "Sweden", - "Switzerland", - # "United Kingdom", - "United Kingdom of Great Britain and Northern Ireland (the)", -] - -REGIONS_DICT = { - "Bosnia and Herzegovina": "Bosnia Herzg", - "Netherlands (Kingdom of the)": "Netherlands", - "United Kingdom of Great Britain and Northern Ireland (the)": "UK", -} - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - snakemake = mock_snakemake("retrieve_irena") - configure_logging(snakemake) - - irena_raw = pd.read_csv( - "https://pxweb.irena.org:443/sq/99e64b12-fe03-4a7b-92ea-a22cc3713b92", - skiprows=2, - index_col=[0, 1, 3], - encoding="latin-1", - ) - - var = "Installed electricity capacity (MW)" - irena = irena_raw[var].unstack(level=2).reset_index(level=1).replace(0, "") - - irena = irena[irena.index.isin(REGIONS)] - irena.rename(index=REGIONS_DICT, inplace=True) - - df_offwind = irena[irena.Technology.str.contains("Offshore")].drop( - columns=["Technology"] - ) - df_onwind = irena[irena.Technology.str.contains("Onshore")].drop( - columns=["Technology"] - ) - df_pv = irena[irena.Technology.str.contains("Solar")].drop(columns=["Technology"]) - - df_offwind.to_csv(snakemake.output["offwind"]) - df_onwind.to_csv(snakemake.output["onwind"]) - df_pv.to_csv(snakemake.output["solar"]) diff --git a/scripts/retrieve_monthly_fuel_prices.py b/scripts/retrieve_monthly_fuel_prices.py index 11e351ce..74861c42 100644 --- a/scripts/retrieve_monthly_fuel_prices.py +++ b/scripts/retrieve_monthly_fuel_prices.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,12 +7,11 @@ Retrieve monthly fuel prices from Destatis. """ import logging - -logger = logging.getLogger(__name__) - from pathlib import Path -from _helpers import configure_logging, progress_retrieve +from _helpers import configure_logging, progress_retrieve, set_scenario_config + +logger = logging.getLogger(__name__) if __name__ == "__main__": if "snakemake" not in globals(): @@ -23,6 +22,7 @@ if __name__ == "__main__": else: rootpath = "." configure_logging(snakemake) + set_scenario_config(snakemake) url = "https://www.destatis.de/EN/Themes/Economy/Prices/Publications/Downloads-Energy-Price-Trends/energy-price-trends-xlsx-5619002.xlsx?__blob=publicationFile" diff --git a/scripts/retrieve_sector_databundle.py b/scripts/retrieve_sector_databundle.py index cb6cc969..3b825da2 100644 --- a/scripts/retrieve_sector_databundle.py +++ b/scripts/retrieve_sector_databundle.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,13 +7,17 @@ Retrieve and extract data bundle for sector-coupled studies. """ import logging - -logger = logging.getLogger(__name__) - import tarfile from pathlib import Path -from _helpers import configure_logging, progress_retrieve, validate_checksum +from _helpers import ( + configure_logging, + progress_retrieve, + set_scenario_config, + validate_checksum, +) + +logger = logging.getLogger(__name__) if __name__ == "__main__": if "snakemake" not in globals(): @@ -24,6 +28,7 @@ if __name__ == "__main__": else: rootpath = "." configure_logging(snakemake) + set_scenario_config(snakemake) url = "https://zenodo.org/record/5824485/files/pypsa-eur-sec-data-bundle.tar.gz" diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index f88d10d4..558e4cf2 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -86,14 +86,16 @@ The rule :mod:`simplify_network` does up to four things: """ import logging -from functools import partial, reduce +from functools import reduce +import geopandas as gpd import numpy as np import pandas as pd import pypsa import scipy as sp -from _helpers import configure_logging, update_p_nom_max +from _helpers import configure_logging, set_scenario_config, update_p_nom_max from add_electricity import load_costs +from base_network import append_bus_shapes from cluster_network import cluster_regions, clustering_for_n_clusters from pypsa.clustering.spatial import ( aggregateoneport, @@ -207,7 +209,7 @@ def _compute_connection_costs_to_bus( return connection_costs_to_bus -def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output): +def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus): connection_costs = {} for tech in connection_costs_to_bus: tech_b = n.generators.carrier == tech @@ -228,14 +230,12 @@ def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, out ) ) connection_costs[tech] = costs - pd.DataFrame(connection_costs).to_csv(output.connection_costs) def _aggregate_and_move_components( n, busmap, connection_costs_to_bus, - output, aggregate_one_ports={"Load", "StorageUnit"}, aggregation_strategies=dict(), exclude_carriers=None, @@ -248,7 +248,7 @@ def _aggregate_and_move_components( if not df.empty: import_series_from_dataframe(n, df, c, attr) - _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output) + _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus) generator_strategies = aggregation_strategies["generators"] @@ -281,7 +281,6 @@ def simplify_links( length_factor, p_max_pu, exclude_carriers, - output, aggregation_strategies=dict(), ): ## Complex multi-node links are folded into end-points @@ -406,7 +405,6 @@ def simplify_links( n, busmap, connection_costs_to_bus, - output, aggregation_strategies=aggregation_strategies, exclude_carriers=exclude_carriers, ) @@ -419,7 +417,6 @@ def remove_stubs( renewable_carriers, length_factor, simplify_network, - output, aggregation_strategies=dict(), ): logger.info("Removing stubs") @@ -436,7 +433,6 @@ def remove_stubs( n, busmap, connection_costs_to_bus, - output, aggregation_strategies=aggregation_strategies, exclude_carriers=simplify_network["exclude_carriers"], ) @@ -468,9 +464,9 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None): dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index ) - dist[ - buses_i - ] = np.inf # bus in buses_i should not be assigned to different bus in buses_i + dist[buses_i] = ( + np.inf + ) # bus in buses_i should not be assigned to different bus in buses_i for c in n.buses.country.unique(): incountry_b = n.buses.country == c @@ -529,6 +525,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("simplify_network", simpl="") configure_logging(snakemake) + set_scenario_config(snakemake) params = snakemake.params solver_name = snakemake.config["solving"]["solver"]["name"] @@ -555,7 +552,6 @@ if __name__ == "__main__": params.length_factor, params.p_max_pu, params.simplify_network["exclude_carriers"], - snakemake.output, params.aggregation_strategies, ) @@ -568,7 +564,6 @@ if __name__ == "__main__": params.renewable_carriers, params.length_factor, params.simplify_network, - snakemake.output, aggregation_strategies=params.aggregation_strategies, ) busmaps.append(stub_map) @@ -593,7 +588,23 @@ if __name__ == "__main__": ) busmaps.append(busmap_hac) + # some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed + # and are lost when clustering (for example with the simpl wildcard), we remove them for consistency: + remove = [ + "symbol", + "tags", + "under_construction", + "onshore_bus", + "substation_lv", + "substation_off", + "geometry", + "underground", + ] + n.buses.drop(remove, axis=1, inplace=True, errors="ignore") + n.lines.drop(remove, axis=1, errors="ignore", inplace=True) + if snakemake.wildcards.simpl: + shapes = n.shapes n, cluster_map = cluster( n, int(snakemake.wildcards.simpl), @@ -603,28 +614,19 @@ if __name__ == "__main__": params.simplify_network["feature"], params.aggregation_strategies, ) + n.shapes = shapes busmaps.append(cluster_map) - # some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed - # and are lost when clustering (for example with the simpl wildcard), we remove them for consistency: - remove = [ - "symbol", - "tags", - "under_construction", - "substation_lv", - "substation_off", - "geometry", - "underground", - ] - n.buses.drop(remove, axis=1, inplace=True, errors="ignore") - n.lines.drop(remove, axis=1, errors="ignore", inplace=True) - update_p_nom_max(n) - n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) - n.export_to_netcdf(snakemake.output.network) - busmap_s = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) busmap_s.to_csv(snakemake.output.busmap) - cluster_regions(busmaps, snakemake.input, snakemake.output) + for which in ["regions_onshore", "regions_offshore"]: + regions = gpd.read_file(snakemake.input[which]) + clustered_regions = cluster_regions(busmaps, regions) + clustered_regions.to_file(snakemake.output[which]) + append_bus_shapes(n, clustered_regions, type=which.split("_")[1]) + + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + n.export_to_netcdf(snakemake.output.network) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index d5f8d308..9a6384b5 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -36,13 +36,18 @@ import numpy as np import pandas as pd import pypsa import xarray as xr +import yaml from _benchmark import memory_logger -from _helpers import configure_logging, get_opt, update_config_with_sector_opts +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from pypsa.descriptors import get_activity_mask +from pypsa.descriptors import get_switchable_as_dense as get_as_dense logger = logging.getLogger(__name__) pypsa.pf.logger.setLevel(logging.WARNING) -from pypsa.descriptors import get_switchable_as_dense as get_as_dense def add_land_use_constraint(n, planning_horizons, config): @@ -150,10 +155,13 @@ def _add_land_use_constraint(n): def _add_land_use_constraint_m(n, planning_horizons, config): # if generators clustering is lower than network clustering, land_use accounting is at generators clusters - grouping_years = config["existing_capacities"]["grouping_years"] + grouping_years = config["existing_capacities"]["grouping_years_power"] current_horizon = snakemake.wildcards.planning_horizons for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc", "offwind-float"]: + extendable_i = (n.generators.carrier == carrier) & n.generators.p_nom_extendable + n.generators.loc[extendable_i, "p_nom_min"] = 0 + existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"] ind = list( {i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] for i in existing.index} @@ -161,7 +169,7 @@ def _add_land_use_constraint_m(n, planning_horizons, config): previous_years = [ str(y) - for y in planning_horizons + grouping_years + for y in set(planning_horizons + grouping_years) if y < int(snakemake.wildcards.planning_horizons) ] @@ -175,19 +183,26 @@ def _add_land_use_constraint_m(n, planning_horizons, config): sel_p_year ].rename(lambda x: x[:-4] + current_horizon) + # check if existing capacities are larger than technical potential + existing_large = n.generators[ + n.generators["p_nom_min"] > n.generators["p_nom_max"] + ].index + if len(existing_large): + logger.warning( + f"Existing capacities larger than technical potential for {existing_large},\ + adjust technical potential to existing capacities" + ) + n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[ + existing_large, "p_nom_min" + ] + n.generators.p_nom_max.clip(lower=0, inplace=True) -def add_co2_sequestration_limit(n, config, limit=200): +def add_co2_sequestration_limit(n, limit=200): """ Add a global constraint on the amount of Mt CO2 that can be sequestered. """ - limit = limit * 1e6 - for o in opts: - if "seq" not in o: - continue - limit = float(o[o.find("seq") + 3 :]) * 1e6 - break if not n.investment_periods.empty: periods = n.investment_periods @@ -200,7 +215,7 @@ def add_co2_sequestration_limit(n, config, limit=200): "GlobalConstraint", names, sense=">=", - constant=-limit, + constant=-limit * 1e6, type="operational_limit", carrier_attribute="co2 sequestered", investment_period=periods, @@ -208,7 +223,7 @@ def add_co2_sequestration_limit(n, config, limit=200): def add_carbon_constraint(n, snapshots): - glcs = n.global_constraints.query('type == "co2_limit"') + glcs = n.global_constraints.query('type == "co2_atmosphere"') if glcs.empty: return for name, glc in glcs.iterrows(): @@ -260,7 +275,7 @@ def add_carbon_budget_constraint(n, snapshots): n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") -def add_max_growth(n, config): +def add_max_growth(n): """ Add maximum growth rates for different carriers. """ @@ -341,6 +356,8 @@ def prepare_network( for df in ( n.generators_t.p_max_pu, n.generators_t.p_min_pu, + n.links_t.p_max_pu, + n.links_t.p_min_pu, n.storage_units_t.inflow, ): df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True) @@ -391,11 +408,11 @@ def prepare_network( if foresight == "perfect": n = add_land_use_constraint_perfect(n) if snakemake.params["sector"]["limit_max_growth"]["enable"]: - n = add_max_growth(n, config) + n = add_max_growth(n) if n.stores.carrier.eq("co2 sequestered").any(): limit = co2_sequestration_potential - add_co2_sequestration_limit(n, config, limit=limit) + add_co2_sequestration_limit(n, limit=limit) return n @@ -416,7 +433,7 @@ def add_CCL_constraints(n, config): Example ------- scenario: - opts: [Co2L-CCL-24H] + opts: [Co2L-CCL-24h] electricity: agg_p_nom_limits: data/agg_p_nom_minmax.csv """ @@ -461,7 +478,7 @@ def add_EQ_constraints(n, o, scaling=1e-1): Example ------- scenario: - opts: [Co2L-EQ0.7-24H] + opts: [Co2L-EQ0.7-24h] Require each country or node to on average produce a minimal share of its total electricity consumption itself. Example: EQ0.7c demands each country @@ -525,7 +542,7 @@ def add_BAU_constraints(n, config): Example ------- scenario: - opts: [Co2L-BAU-24H] + opts: [Co2L-BAU-24h] electricity: BAU_mincapacities: solar: 0 @@ -542,7 +559,7 @@ def add_BAU_constraints(n, config): ext_carrier_i = xr.DataArray(ext_i.carrier.rename_axis("Generator-ext")) lhs = p_nom.groupby(ext_carrier_i).sum() index = mincaps.index.intersection(lhs.indexes["carrier"]) - rhs = mincaps[index].rename_axis("carrier") + rhs = mincaps[lhs.indexes["carrier"]].rename_axis("carrier") n.model.add_constraints(lhs >= rhs, name="bau_mincaps") @@ -562,7 +579,7 @@ def add_SAFE_constraints(n, config): config.yaml requires to specify opts: scenario: - opts: [Co2L-SAFE-24H] + opts: [Co2L-SAFE-24h] electricity: SAFE_reservemargin: 0.1 Which sets a reserve margin of 10% above the peak demand. @@ -570,7 +587,7 @@ def add_SAFE_constraints(n, config): peakdemand = n.loads_t.p_set.sum(axis=1).max() margin = 1.0 + config["electricity"]["SAFE_reservemargin"] reserve_margin = peakdemand * margin - conventional_carriers = config["electricity"]["conventional_carriers"] + conventional_carriers = config["electricity"]["conventional_carriers"] # noqa: F841 ext_gens_i = n.generators.query( "carrier in @conventional_carriers & p_nom_extendable" ).index @@ -688,11 +705,11 @@ def add_battery_constraints(n): def add_lossy_bidirectional_link_constraints(n): - if not n.links.p_nom_extendable.any() or not "reversed" in n.links.columns: + if not n.links.p_nom_extendable.any() or "reversed" not in n.links.columns: return n.links["reversed"] = n.links.reversed.fillna(0).astype(bool) - carriers = n.links.loc[n.links.reversed, "carrier"].unique() + carriers = n.links.loc[n.links.reversed, "carrier"].unique() # noqa: F841 forward_i = n.links.query( "carrier in @carriers and ~reversed and p_nom_extendable" @@ -701,9 +718,11 @@ def add_lossy_bidirectional_link_constraints(n): def get_backward_i(forward_i): return pd.Index( [ - re.sub(r"-(\d{4})$", r"-reversed-\1", s) - if re.search(r"-\d{4}$", s) - else s + "-reversed" + ( + re.sub(r"-(\d{4})$", r"-reversed-\1", s) + if re.search(r"-\d{4}$", s) + else s + "-reversed" + ) for s in forward_i ] ) @@ -795,6 +814,29 @@ def add_pipe_retrofit_constraint(n): n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit") +def add_co2_atmosphere_constraint(n, snapshots): + glcs = n.global_constraints[n.global_constraints.type == "co2_atmosphere"] + + if glcs.empty: + return + for name, glc in glcs.iterrows(): + carattr = glc.carrier_attribute + emissions = n.carriers.query(f"{carattr} != 0")[carattr] + + if emissions.empty: + continue + + # stores + n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) + stores = n.stores.query("carrier in @emissions.index and not e_cyclic") + if not stores.empty: + last_i = snapshots[-1] + lhs = n.model["Store-e"].loc[last_i, stores.index] + rhs = glc.constant + + n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") + + def extra_functionality(n, snapshots): """ Collects supplementary constraints which will be passed to @@ -804,30 +846,20 @@ def extra_functionality(n, snapshots): location to add them. The arguments ``opts`` and ``snakemake.config`` are expected to be attached to the network. """ - opts = n.opts config = n.config constraints = config["solving"].get("constraints", {}) - if ( - "BAU" in opts or constraints.get("BAU", False) - ) and n.generators.p_nom_extendable.any(): + if constraints["BAU"] and n.generators.p_nom_extendable.any(): add_BAU_constraints(n, config) - if ( - "SAFE" in opts or constraints.get("SAFE", False) - ) and n.generators.p_nom_extendable.any(): + if constraints["SAFE"] and n.generators.p_nom_extendable.any(): add_SAFE_constraints(n, config) - if ( - "CCL" in opts or constraints.get("CCL", False) - ) and n.generators.p_nom_extendable.any(): + if constraints["CCL"] and n.generators.p_nom_extendable.any(): add_CCL_constraints(n, config) reserve = config["electricity"].get("operational_reserve", {}) if reserve.get("activate"): add_operational_reserve_margin(n, snapshots, config) - EQ_config = constraints.get("EQ", False) - EQ_wildcard = get_opt(opts, r"^EQ+[0-9]*\.?[0-9]+(c|)") - EQ_o = EQ_wildcard or EQ_config - if EQ_o: + if EQ_o := constraints["EQ"]: add_EQ_constraints(n, EQ_o.replace("EQ", "")) add_battery_constraints(n) @@ -837,6 +869,8 @@ def extra_functionality(n, snapshots): add_carbon_constraint(n, snapshots) add_carbon_budget_constraint(n, snapshots) add_retrofit_gas_boiler_constraint(n, snapshots) + else: + add_co2_atmosphere_constraint(n, snapshots) if snakemake.params.custom_extra_functionality: source_path = snakemake.params.custom_extra_functionality @@ -848,7 +882,7 @@ def extra_functionality(n, snapshots): custom_extra_functionality(n, snapshots, snakemake) -def solve_network(n, config, solving, opts="", **kwargs): +def solve_network(n, config, solving, **kwargs): set_of_options = solving["solver"]["options"] cf_solving = solving["options"] @@ -863,6 +897,7 @@ def solve_network(n, config, solving, opts="", **kwargs): "linearized_unit_commitment", False ) kwargs["assign_all_duals"] = cf_solving.get("assign_all_duals", False) + kwargs["io_api"] = cf_solving.get("io_api", None) if kwargs["solver_name"] == "gurobi": logging.getLogger("gurobipy").setLevel(logging.CRITICAL) @@ -875,7 +910,6 @@ def solve_network(n, config, solving, opts="", **kwargs): # add to network for extra_functionality n.config = config - n.opts = opts if rolling_horizon: kwargs["horizon"] = cf_solving.get("horizon", 365) @@ -898,7 +932,7 @@ def solve_network(n, config, solving, opts="", **kwargs): ) if "infeasible" in condition: labels = n.model.compute_infeasibilities() - logger.info("Labels:\n" + labels) + logger.info(f"Labels:\n{labels}") n.model.print_infeasibilities() raise RuntimeError("Solving status 'infeasible'") @@ -910,25 +944,19 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "solve_sector_network_perfect", + "solve_sector_network", configfiles="../config/test/config.perfect.yaml", simpl="", opts="", - clusters="5", - ll="v1.5", - sector_opts="8760H-T-H-B-I-A-solar+p3-dist1", + clusters="37", + ll="v1.0", + sector_opts="CO2L0-1H-T-H-B-I-A-dist1", planning_horizons="2030", ) configure_logging(snakemake) - if "sector_opts" in snakemake.wildcards.keys(): - update_config_with_sector_opts( - snakemake.config, snakemake.wildcards.sector_opts - ) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) - opts = snakemake.wildcards.opts - if "sector_opts" in snakemake.wildcards.keys(): - opts += "-" + snakemake.wildcards.sector_opts - opts = [o for o in opts.split("-") if o != ""] solve_opts = snakemake.params.solving["options"] np.random.seed(solve_opts.get("seed", 123)) @@ -951,11 +979,19 @@ if __name__ == "__main__": n, config=snakemake.config, solving=snakemake.params.solving, - opts=opts, log_fn=snakemake.log.solver, ) logger.info(f"Maximum memory usage: {mem.mem_usage}") n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) - n.export_to_netcdf(snakemake.output[0]) + n.export_to_netcdf(snakemake.output.network) + + with open(snakemake.output.config, "w") as file: + yaml.dump( + n.meta, + file, + default_flow_style=False, + allow_unicode=True, + sort_keys=False, + ) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index dca49d02..bd4ce7aa 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -12,7 +12,11 @@ import logging import numpy as np import pypsa -from _helpers import configure_logging, update_config_with_sector_opts +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from solve_network import prepare_network, solve_network logger = logging.getLogger(__name__) @@ -34,10 +38,9 @@ if __name__ == "__main__": ) configure_logging(snakemake) - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) - opts = f"{snakemake.wildcards.opts}-{snakemake.wildcards.sector_opts}".split("-") - opts = [o for o in opts if o != ""] solve_opts = snakemake.params.options np.random.seed(solve_opts.get("seed", 123)) @@ -46,9 +49,7 @@ if __name__ == "__main__": n.optimize.fix_optimal_capacities() n = prepare_network(n, solve_opts, config=snakemake.config) - n = solve_network( - n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver - ) + n = solve_network(n, config=snakemake.config, log_fn=snakemake.log.solver) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/test.sh b/test.sh new file mode 100755 index 00000000..a3dfb65f --- /dev/null +++ b/test.sh @@ -0,0 +1,13 @@ +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +set -x && \ + +snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.scenarios.yaml --rerun-triggers=mtime -n && \ + +set +x