diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 0b78b5b6..3f1edbd8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,3 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 +9865a970893d9e515786f33c629b14f71645bf1e diff --git a/.gitattributes b/.gitattributes index 3f5e771d..b82aaff1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c0fb745d..c17c0425 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -19,7 +19,7 @@ on: - cron: "0 5 * * TUE" env: - DATA_CACHE_NUMBER: 2 + DATA_CACHE_NUMBER: 1 jobs: build: @@ -32,7 +32,14 @@ jobs: - ubuntu-latest - macos-latest - windows-latest - + inhouse: + - stable + - master + exclude: + - os: macos-latest + inhouse: master + - os: windows-latest + inhouse: master runs-on: ${{ matrix.os }} defaults: @@ -46,16 +53,6 @@ jobs: run: | echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc - - name: Add solver to environment - run: | - echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml - if: ${{ matrix.os }} == 'windows-latest' - - - name: Add solver to environment - run: | - echo -e "- glpk\n- ipopt" >> envs/environment.yaml - if: ${{ matrix.os }} != 'windows-latest' - - name: Setup micromamba uses: mamba-org/setup-micromamba@v1 with: @@ -66,6 +63,11 @@ jobs: cache-environment: true cache-downloads: true + - name: Install inhouse packages + run: | + pip install git+https://github.com/PyPSA/atlite.git@master git+https://github.com/PyPSA/powerplantmatching.git@master git+https://github.com/PyPSA/linopy.git@master + if: ${{ matrix.inhouse }} == 'master' + - name: Set cache dates run: | echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV @@ -79,14 +81,10 @@ jobs: key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }} - name: Test snakemake workflow - run: | - snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime - snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime - snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime - snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime + run: ./test.sh - name: Upload artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4.3.0 with: name: resources-results path: | @@ -94,3 +92,4 @@ jobs: results if-no-files-found: warn retention-days: 1 + if: matrix.os == 'ubuntu' && matrix.inhouse == 'stable' diff --git a/.gitignore b/.gitignore index c9d2e171..3336fca7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -20,10 +20,18 @@ gurobi.log /notebooks /data /cutouts - +/tmp doc/_build +/scripts/old +/scripts/create_scenarios.py +/config/create_scenarios.py + +config/config.yaml +config/scenarios.yaml + config.yaml +config/config.yaml dconf /data/links_p_nom.csv @@ -53,25 +61,15 @@ d1gam3xoknrgr2.cloudfront.net/ *.nc *~ -/scripts/old *.pyc -/cutouts -/tmp -/pypsa *.xlsx -config.yaml - -doc/_build - *.xls *.geojson *.ipynb -data/costs_* - merger-todos.md diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7b9009c3..28d0278a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,8 +50,8 @@ repos: - id: blackdoc # Formatting with "black" coding style -- repo: https://github.com/psf/black - rev: 23.12.1 +- repo: https://github.com/psf/black-pre-commit-mirror + rev: 24.2.0 hooks: # Format Python files - id: black @@ -74,7 +74,7 @@ repos: # Format Snakemake rule / workflow files - repo: https://github.com/snakemake/snakefmt - rev: v0.8.5 + rev: v0.10.0 hooks: - id: snakefmt @@ -87,6 +87,6 @@ repos: # Check for FSFE REUSE compliance (licensing) - repo: https://github.com/fsfe/reuse-tool - rev: v2.1.0 + rev: v3.0.1 hooks: - id: reuse diff --git a/.readthedocs.yml b/.readthedocs.yml index 30684052..ca388d80 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/.reuse/dep5 b/.reuse/dep5 index cd8e2660..27edd808 100644 --- a/.reuse/dep5 +++ b/.reuse/dep5 @@ -4,33 +4,33 @@ Upstream-Contact: Tom Brown Source: https://github.com/pypsa/pypsa-eur Files: doc/img/* -Copyright: 2019-2023 The PyPSA-Eur Authors +Copyright: 2019-2024 The PyPSA-Eur Authors License: CC-BY-4.0 Files: doc/data.csv -Copyright: 2019-2023 The PyPSA-Eur Authors +Copyright: 2019-2024 The PyPSA-Eur Authors License: CC-BY-4.0 Files: doc/configtables/* -Copyright: 2019-2023 The PyPSA-Eur Authors +Copyright: 2019-2024 The PyPSA-Eur Authors License: CC-BY-4.0 Files: data/* -Copyright: 2017-2023 The PyPSA-Eur Authors +Copyright: 2017-2024 The PyPSA-Eur Authors License: CC-BY-4.0 Files: .github/* -Copyright: 2019-2023 The PyPSA-Eur Authors +Copyright: 2019-2024 The PyPSA-Eur Authors License: CC0-1.0 Files: matplotlibrc -Copyright: 2017-2023 The PyPSA-Eur Authors +Copyright: 2017-2024 The PyPSA-Eur Authors License: CC0-1.0 Files: borg-it -Copyright: 2017-2023 The PyPSA-Eur Authors +Copyright: 2017-2024 The PyPSA-Eur Authors License: CC0-1.0 Files: graphics/* -Copyright: 2017-2023 The PyPSA-Eur Authors +Copyright: 2017-2024 The PyPSA-Eur Authors License: CC-BY-4.0 diff --git a/.sync-send b/.sync-send index 72252956..483c7a99 100644 --- a/.sync-send +++ b/.sync-send @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/CITATION.cff b/CITATION.cff index c80b73ef..af26fd53 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -6,7 +6,7 @@ cff-version: 1.1.0 message: "If you use this package, please cite it in the following way." title: "PyPSA-Eur: An open sector-coupled optimisation model of the European energy system" repository: https://github.com/pypsa/pypsa-eur -version: 0.8.1 +version: 0.10.0 license: MIT authors: - family-names: Brown diff --git a/LICENSES/MIT.txt b/LICENSES/MIT.txt index 87f6d959..baf15333 100644 --- a/LICENSES/MIT.txt +++ b/LICENSES/MIT.txt @@ -1,6 +1,6 @@ MIT License -Copyright 2017-2023 The PyPSA-Eur Authors +Copyright 2017-2024 The PyPSA-Eur Authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.md b/README.md index 4a58d75c..b4c03574 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ diff --git a/Snakefile b/Snakefile index 468d06f2..c2d71cfd 100644 --- a/Snakefile +++ b/Snakefile @@ -1,32 +1,51 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT from os.path import normpath, exists from shutil import copyfile, move, rmtree +from pathlib import Path +import yaml from snakemake.utils import min_version min_version("8") +from scripts._helpers import path_provider -if not exists("config/config.yaml"): - copyfile("config/config.default.yaml", "config/config.yaml") +default_files = { + "config/config.default.yaml": "config/config.yaml", + "config/scenarios.template.yaml": "config/scenarios.yaml", +} +for template, target in default_files.items(): + target = os.path.join(workflow.current_basedir, target) + template = os.path.join(workflow.current_basedir, template) + if not exists(target) and exists(template): + copyfile(template, target) +configfile: "config/config.default.yaml" configfile: "config/config.yaml" -COSTS = f"data/costs_{config['costs']['year']}.csv" -ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4) +run = config["run"] +scenarios = run.get("scenarios", {}) +if run["name"] and scenarios.get("enable"): + fn = Path(scenarios["file"]) + scenarios = yaml.safe_load(fn.read_text()) + RDIR = "{run}/" + if run["name"] == "all": + config["run"]["name"] = list(scenarios.keys()) +elif run["name"]: + RDIR = run["name"] + "/" +else: + RDIR = "" -run = config.get("run", {}) -RDIR = run["name"] + "/" if run.get("name") else "" -CDIR = RDIR if not run.get("shared_cutouts") else "" +logs = path_provider("logs/", RDIR, run["shared_resources"]) +benchmarks = path_provider("benchmarks/", RDIR, run["shared_resources"]) +resources = path_provider("resources/", RDIR, run["shared_resources"]) -LOGS = "logs/" + RDIR -BENCHMARKS = "benchmarks/" + RDIR -RESOURCES = "resources/" + RDIR if not run.get("shared_resources") else "resources/" +CDIR = "" if run["shared_cutouts"] else RDIR RESULTS = "results/" + RDIR @@ -69,10 +88,19 @@ if config["foresight"] == "perfect": rule all: input: - RESULTS + "graphs/costs.pdf", + expand(RESULTS + "graphs/costs.pdf", run=config["run"]["name"]), default_target: True +rule create_scenarios: + output: + config["run"]["scenarios"]["file"], + conda: + "envs/retrieve.yaml" + script: + "config/create_scenarios.py" + + rule purge: run: import builtins @@ -93,9 +121,9 @@ rule dag: message: "Creating DAG of workflow." output: - dot=RESOURCES + "dag.dot", - pdf=RESOURCES + "dag.pdf", - png=RESOURCES + "dag.png", + dot=resources("dag.dot"), + pdf=resources("dag.pdf"), + png=resources("dag.png"), conda: "envs/environment.yaml" shell: @@ -121,6 +149,7 @@ rule sync: shell: """ rsync -uvarh --ignore-missing-args --files-from=.sync-send . {params.cluster} + rsync -uvarh --no-g {params.cluster}/resources . || echo "No resources directory, skipping rsync" rsync -uvarh --no-g {params.cluster}/results . || echo "No results directory, skipping rsync" rsync -uvarh --no-g {params.cluster}/logs . || echo "No logs directory, skipping rsync" """ diff --git a/config/config.default.yaml b/config/config.default.yaml index a6df173b..e7bfe18f 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -1,9 +1,9 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#top-level-configuration -version: 0.8.1 +version: 0.10.0 tutorial: false logging: @@ -21,6 +21,9 @@ remote: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run run: name: "" + scenarios: + enable: false + file: config/scenarios.yaml disable_progressbar: false shared_resources: false shared_cutouts: true @@ -44,7 +47,7 @@ scenario: opts: - '' sector_opts: - - Co2L0-3H-T-H-B-I-A-solar+p3-dist1 + - Co2L0-3H-T-H-B-I-A-dist1 planning_horizons: # - 2020 # - 2030 @@ -74,6 +77,7 @@ enable: retrieve_natura_raster: true custom_busmap: false + # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget co2_budget: 2020: 0.701 @@ -87,7 +91,9 @@ co2_budget: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#electricity electricity: voltages: [220., 300., 380., 500., 750.] + gaslimit_enable: false gaslimit: false + co2limit_enable: false co2limit: 7.75e+7 co2base: 1.487e+9 agg_p_nom_limits: data/agg_p_nom_minmax.csv @@ -108,8 +114,9 @@ electricity: Store: [battery, H2] Link: [] # H2 pipeline - powerplants_filter: (DateOut >= 2022 or DateOut != DateOut) + powerplants_filter: (DateOut >= 2023 or DateOut != DateOut) and not (Country == 'Germany' and Fueltype == 'Nuclear') custom_powerplants: false + everywhere_powerplants: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] renewable_carriers: [solar, onwind, offwind-ac, offwind-dc, hydro] @@ -124,6 +131,10 @@ electricity: Onshore: [onwind] PV: [solar] + autarky: + enable: false + by_country: false + # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#atlite atlite: default_cutout: europe-2013-era5 @@ -158,45 +169,51 @@ renewable: resource: method: wind turbine: Vestas_V112_3MW + add_cutout_windspeed: true capacity_per_sqkm: 3 # correction_factor: 0.93 corine: grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] distance: 1000 distance_grid_codes: [1, 2, 3, 4, 5, 6] + luisa: false + # grid_codes: [1111, 1121, 1122, 1123, 1130, 1210, 1221, 1222, 1230, 1241, 1242] + # distance: 1000 + # distance_grid_codes: [1111, 1121, 1122, 1123, 1130, 1210, 1221, 1222, 1230, 1241, 1242] natura: true excluder_resolution: 100 - potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-ac: cutout: europe-2013-era5 resource: method: wind - turbine: NREL_ReferenceTurbine_5MW_offshore + turbine: NREL_ReferenceTurbine_2020ATB_5.5MW + add_cutout_windspeed: true capacity_per_sqkm: 2 correction_factor: 0.8855 corine: [44, 255] + luisa: false # [0, 5230] natura: true ship_threshold: 400 max_depth: 50 max_shore_distance: 30000 excluder_resolution: 200 - potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-dc: cutout: europe-2013-era5 resource: method: wind - turbine: NREL_ReferenceTurbine_5MW_offshore + turbine: NREL_ReferenceTurbine_2020ATB_5.5MW + add_cutout_windspeed: true capacity_per_sqkm: 2 correction_factor: 0.8855 corine: [44, 255] + luisa: false # [0, 5230] natura: true ship_threshold: 400 max_depth: 50 min_shore_distance: 30000 excluder_resolution: 200 - potential: simple # or conservative clip_p_max_pu: 1.e-2 solar: cutout: europe-2013-sarah @@ -206,12 +223,12 @@ renewable: orientation: slope: 35. azimuth: 180. - capacity_per_sqkm: 1.7 + capacity_per_sqkm: 5.1 # correction_factor: 0.854337 corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] + luisa: false # [1111, 1121, 1122, 1123, 1130, 1210, 1221, 1222, 1230, 1241, 1242, 1310, 1320, 1330, 1410, 1421, 1422, 2110, 2120, 2130, 2210, 2220, 2230, 2310, 2410, 2420, 3210, 3320, 3330] natura: true excluder_resolution: 100 - potential: simple # or conservative clip_p_max_pu: 1.e-2 hydro: cutout: europe-2013-era5 @@ -239,7 +256,7 @@ lines: 750.: "Al/St 560/50 4-bundle 750.0" s_max_pu: 0.7 s_nom_max: .inf - max_extension: .inf + max_extension: 20000 #MW length_factor: 1.25 reconnect_crimea: true under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity @@ -254,7 +271,7 @@ lines: links: p_max_pu: 1.0 p_nom_max: .inf - max_extension: .inf + max_extension: 30000 #MW include_tyndp: true under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity @@ -264,7 +281,7 @@ transformers: s_nom: 2000. type: '' -# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#load +# docs-load in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#load load: power_statistics: true interpolate_limit: 3 @@ -289,6 +306,7 @@ pypsa_eur: - offwind-dc - solar - ror + - nuclear StorageUnit: - PHS - hydro @@ -296,9 +314,8 @@ pypsa_eur: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#energy energy: - energy_totals_year: 2011 + energy_totals_year: 2019 base_emissions_year: 1990 - eurostat_report_year: 2016 emissions: CO2 # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#biomass @@ -336,9 +353,10 @@ solar_thermal: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#existing-capacities existing_capacities: - grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030] + grouping_years_power: [1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030] grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020 threshold_capacity: 10 + default_heating_lifetime: 20 conventional_carriers: - lignite - coal @@ -347,15 +365,23 @@ existing_capacities: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#sector sector: + transport: true + heating: true + biomass: true + industry: true + agriculture: true district_heating: potential: 0.6 progress: 2020: 0.0 + 2025: 0.15 2030: 0.3 + 2035: 0.45 2040: 0.6 + 2045: 0.8 2050: 1.0 district_heating_loss: 0.15 - cluster_heat_buses: false + cluster_heat_buses: true bev_dsm_restriction_value: 0.75 bev_dsm_restriction_time: 7 transport_heating_deadband_upper: 20. @@ -375,18 +401,27 @@ sector: v2g: true land_transport_fuel_cell_share: 2020: 0 - 2030: 0.05 - 2040: 0.1 - 2050: 0.15 + 2025: 0 + 2030: 0 + 2035: 0 + 2040: 0 + 2045: 0 + 2050: 0 land_transport_electric_share: 2020: 0 - 2030: 0.25 - 2040: 0.6 - 2050: 0.85 + 2025: 0.15 + 2030: 0.3 + 2035: 0.45 + 2040: 0.7 + 2045: 0.85 + 2050: 1 land_transport_ice_share: 2020: 1 + 2025: 0.85 2030: 0.7 + 2035: 0.55 2040: 0.3 + 2045: 0.15 2050: 0 transport_fuel_cell_efficiency: 0.5 transport_internal_combustion_efficiency: 0.3 @@ -400,18 +435,27 @@ sector: shipping_hydrogen_liquefaction: false shipping_hydrogen_share: 2020: 0 + 2025: 0 2030: 0 + 2035: 0 2040: 0 + 2045: 0 2050: 0 shipping_methanol_share: 2020: 0 + 2025: 0.15 2030: 0.3 + 2035: 0.5 2040: 0.7 + 2045: 0.85 2050: 1 shipping_oil_share: 2020: 1 + 2025: 0.85 2030: 0.7 + 2035: 0.5 2040: 0.3 + 2045: 0.15 2050: 0 shipping_methanol_efficiency: 0.46 shipping_oil_efficiency: 0.40 @@ -440,15 +484,16 @@ sector: decentral: 3 central: 180 boilers: true + resistive_heaters: true oil_boilers: false biomass_boiler: true + overdimension_individual_heating: 1.1 #to cover demand peaks bigger than data chp: true micro_chp: false solar_thermal: true solar_cf_correction: 0.788457 # = >>> 1/1.2683 marginal_cost_storage: 0. #1e-4 methanation: true - helmeth: false coal_cc: false dac: true co2_vent: false @@ -458,6 +503,9 @@ sector: hydrogen_turbine: false SMR: true SMR_cc: true + regional_methanol_demand: false + regional_oil_demand: false + regional_coal_demand: false regional_co2_sequestration_potential: enable: false attribute: 'conservative estimate Mt' @@ -470,6 +518,7 @@ sector: co2_sequestration_lifetime: 50 co2_spatial: false co2network: false + co2_network_cost_factor: 1 cc_fraction: 0.9 hydrogen_underground_storage: true hydrogen_underground_storage_locations: @@ -477,14 +526,29 @@ sector: - nearshore # within 50 km of sea # - offshore ammonia: false - min_part_load_fischer_tropsch: 0.9 - min_part_load_methanolisation: 0.5 + min_part_load_fischer_tropsch: 0.7 + min_part_load_methanolisation: 0.3 + min_part_load_methanation: 0.3 use_fischer_tropsch_waste_heat: true + use_haber_bosch_waste_heat: true + use_methanolisation_waste_heat: true + use_methanation_waste_heat: true use_fuel_cell_waste_heat: true - use_electrolysis_waste_heat: false + use_electrolysis_waste_heat: true + electricity_transmission_grid: true electricity_distribution_grid: true electricity_distribution_grid_cost_factor: 1.0 electricity_grid_connection: true + transmission_efficiency: + DC: + efficiency_static: 0.98 + efficiency_per_1000km: 0.977 + H2 pipeline: + efficiency_per_1000km: 1 # 0.982 + compression_per_1000km: 0.018 + gas pipeline: + efficiency_per_1000km: 1 #0.977 + compression_per_1000km: 0.01 H2_network: true gas_network: false H2_retrofit: false @@ -494,6 +558,7 @@ sector: gas_distribution_grid_cost_factor: 1.0 biomass_spatial: false biomass_transport: false + biogas_upgrading_cc: false conventional_generation: OCGT: gas biomass_to_liquid: false @@ -544,14 +609,48 @@ industry: MWh_NH3_per_tNH3: 5.166 MWh_CH4_per_tNH3_SMR: 10.8 MWh_elec_per_tNH3_SMR: 0.7 - MWh_H2_per_tNH3_electrolysis: 6.5 - MWh_elec_per_tNH3_electrolysis: 1.17 + MWh_H2_per_tNH3_electrolysis: 5.93 + MWh_elec_per_tNH3_electrolysis: 0.2473 MWh_NH3_per_MWh_H2_cracker: 1.46 # https://github.com/euronion/trace/blob/44a5ff8401762edbef80eff9cfe5a47c8d3c8be4/data/efficiencies.csv NH3_process_emissions: 24.5 petrochemical_process_emissions: 25.5 - HVC_primary_fraction: 1. - HVC_mechanical_recycling_fraction: 0. - HVC_chemical_recycling_fraction: 0. + #HVC primary/recycling based on values used in Neumann et al https://doi.org/10.1016/j.joule.2023.06.016, linearly interpolated between 2020 and 2050 + #2020 recycling rates based on Agora https://static.agora-energiewende.de/fileadmin/Projekte/2021/2021_02_EU_CEAP/A-EW_254_Mobilising-circular-economy_study_WEB.pdf + #fractions refer to the total primary HVC production in 2020 + #assumes 6.7 Mtplastics produced from recycling in 2020 + HVC_primary_fraction: + 2020: 1.0 + 2025: 0.9 + 2030: 0.8 + 2035: 0.7 + 2040: 0.6 + 2045: 0.5 + 2050: 0.4 + HVC_mechanical_recycling_fraction: + 2020: 0.12 + 2025: 0.15 + 2030: 0.18 + 2035: 0.21 + 2040: 0.24 + 2045: 0.27 + 2050: 0.30 + HVC_chemical_recycling_fraction: + 2020: 0.0 + 2025: 0.0 + 2030: 0.04 + 2035: 0.08 + 2040: 0.12 + 2045: 0.16 + 2050: 0.20 + sector_ratios_fraction_future: + 2020: 0.0 + 2025: 0.1 + 2030: 0.3 + 2035: 0.5 + 2040: 0.7 + 2045: 0.9 + 2050: 1.0 + basic_chemicals_without_NH3_production_today: 69. #Mt/a, = 86 Mtethylene-equiv - 17 MtNH3 HVC_production_today: 52. MWh_elec_per_tHVC_mechanical_recycling: 0.547 MWh_elec_per_tHVC_chemical_recycling: 6.9 @@ -568,7 +667,7 @@ industry: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs costs: year: 2030 - version: v0.6.0 + version: v0.8.1 rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) social_discountrate: 0.02 fill_values: @@ -594,7 +693,9 @@ costs: battery: 0. battery inverter: 0. emission_prices: + enable: false co2: 0. + co2_monthly_prices: false # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#clustering clustering: @@ -616,6 +717,14 @@ clustering: committable: any ramp_limit_up: max ramp_limit_down: max + temporal: + resolution_elec: false + resolution_sector: false + +# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#adjustments +adjustments: + electricity: false + sector: false # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving solving: @@ -627,14 +736,22 @@ solving: skip_iterations: true rolling_horizon: false seed: 123 + custom_extra_functionality: "../data/custom_extra_functionality.py" + # io_api: "direct" # Increases performance but only supported for the highs and gurobi solvers # options that go into the optimize function track_iterations: false min_iterations: 4 max_iterations: 6 - transmission_losses: 0 + transmission_losses: 2 linearized_unit_commitment: true horizon: 365 + constraints: + CCL: false + EQ: false + BAU: false + SAFE: false + solver: name: gurobi options: gurobi-default @@ -689,6 +806,10 @@ solving: solutiontype: 2 # non basic solution, ie no crossover barrier.convergetol: 1.e-5 feasopt.tolerance: 1.e-6 + copt-default: + Threads: 8 + LpMethod: 2 + Crossover: 0 cbc-default: {} # Used in CI glpk-default: {} # Used in CI @@ -702,6 +823,13 @@ plotting: color_geomap: ocean: white land: white + projection: + name: "EqualEarth" + # See https://scitools.org.uk/cartopy/docs/latest/reference/projections.html for alternatives, for example: + # name: "LambertConformal" + # central_longitude: 10. + # central_latitude: 50. + # standard_parallels: [35, 65] eu_node_location: x: -5.5 y: 46. @@ -748,7 +876,6 @@ plotting: hydroelectricity: '#298c81' PHS: '#51dbcc' hydro+PHS: "#08ad97" - wave: '#a7d4cf' # solar solar: "#f9d002" solar PV: "#f9d002" @@ -775,6 +902,7 @@ plotting: fossil gas: '#e05b09' natural gas: '#e05b09' biogas to gas: '#e36311' + biogas to gas CC: '#e51245' CCGT: '#a85522' CCGT marginal: '#a85522' allam: '#B98F76' @@ -876,6 +1004,7 @@ plotting: # heat demand Heat load: '#cc1f1f' heat: '#cc1f1f' + heat vent: '#aa3344' heat demand: '#cc1f1f' rural heat: '#ff5c5c' residential rural heat: '#ff7c7c' @@ -895,9 +1024,11 @@ plotting: air heat pump: '#36eb41' residential urban decentral air heat pump: '#48f74f' services urban decentral air heat pump: '#5af95d' + services rural air heat pump: '#5af95d' urban central air heat pump: '#6cfb6b' ground heat pump: '#2fb537' residential rural ground heat pump: '#48f74f' + residential rural air heat pump: '#48f74f' services rural ground heat pump: '#5af95d' Ambient: '#98eb9d' CHP: '#8a5751' @@ -945,7 +1076,6 @@ plotting: Sabatier: '#9850ad' methanation: '#c44ce6' methane: '#c44ce6' - helmeth: '#e899ff' # synfuels Fischer-Tropsch: '#25c49a' liquid: '#25c49a' @@ -960,6 +1090,7 @@ plotting: CO2 sequestration: '#f29dae' DAC: '#ff5270' co2 stored: '#f2385a' + co2 sequestered: '#f2682f' co2: '#f29dae' co2 vent: '#ffd4dc' CO2 pipeline: '#f5627f' diff --git a/config/config.entsoe-all.yaml b/config/config.entsoe-all.yaml index dd19d2c7..40e3c0a5 100644 --- a/config/config.entsoe-all.yaml +++ b/config/config.entsoe-all.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/config/config.perfect.yaml b/config/config.perfect.yaml index f355763c..7bfdbdd2 100644 --- a/config/config.perfect.yaml +++ b/config/config.perfect.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 run: @@ -19,13 +19,16 @@ scenario: opts: - '' sector_opts: - - 1p5-4380H-T-H-B-I-A-solar+p3-dist1 - - 1p7-4380H-T-H-B-I-A-solar+p3-dist1 - - 2p0-4380H-T-H-B-I-A-solar+p3-dist1 + - 1p5-4380H-T-H-B-I-A-dist1 + - 1p7-4380H-T-H-B-I-A-dist1 + - 2p0-4380H-T-H-B-I-A-dist1 planning_horizons: - 2020 + - 2025 - 2030 + - 2035 - 2040 + - 2045 - 2050 diff --git a/config/config.validation.yaml b/config/config.validation.yaml index 5bcd5c31..c8fb2fa5 100644 --- a/config/config.validation.yaml +++ b/config/config.validation.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 run: diff --git a/config/create_scenarios.py b/config/create_scenarios.py new file mode 100644 index 00000000..cccc29bc --- /dev/null +++ b/config/create_scenarios.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +# This script helps to generate a scenarios.yaml file for PyPSA-Eur. +# You can modify the template to your needs and define all possible combinations of config values that should be considered. + +if "snakemake" in globals(): + filename = snakemake.output[0] +else: + filename = "../config/scenarios.yaml" + +import itertools + +# Insert your config values that should be altered in the template. +# Change `config_section` and `config_section2` to the actual config sections. +template = """ +scenario{scenario_number}: + config_section: + config_key: {config_value} + config_section2: + config_key2: {config_value2} +""" + +# Define all possible combinations of config values. +# This must define all config values that are used in the template. +config_values = dict(config_value=["true", "false"], config_value2=[1, 2, 3, 4]) + +combinations = [ + dict(zip(config_values.keys(), values)) + for values in itertools.product(*config_values.values()) +] + +with open(filename, "w") as f: + for i, config in enumerate(combinations): + f.write(template.format(scenario_number=i, **config)) diff --git a/config/scenarios.template.yaml b/config/scenarios.template.yaml new file mode 100644 index 00000000..0eba9d75 --- /dev/null +++ b/config/scenarios.template.yaml @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +# This file is used to define the scenarios that are run by snakemake. Each entry on the first level is a scenario. Each scenario can contain configuration overrides with respect to the config/config.yaml settings. +# +# Example +# +# custom-scenario: # name of the scenario +# electricity: +# renewable_carriers: [wind, solar] # override the list of renewable carriers + +normal: + electricity: + renewable_carriers: + - solar + - onwind + - offwind-ac + - offwind-dc + - hydro + +no-offwind: + electricity: + renewable_carriers: + - solar + - onwind + - hydro diff --git a/config/test/config.electricity.yaml b/config/test/config.electricity.yaml index b750bf62..57964415 100644 --- a/config/test/config.electricity.yaml +++ b/config/test/config.electricity.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -8,14 +8,14 @@ tutorial: true run: name: "test-elec" # use this to keep track of runs with different settings disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true scenario: clusters: - 5 opts: - - Co2L-24H + - Co2L-24h countries: ['BE'] diff --git a/config/test/config.myopic.yaml b/config/test/config.myopic.yaml index d566c6cb..5abae36d 100644 --- a/config/test/config.myopic.yaml +++ b/config/test/config.myopic.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -7,7 +7,7 @@ tutorial: true run: name: "test-sector-myopic" disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true foresight: myopic @@ -18,7 +18,7 @@ scenario: clusters: - 5 sector_opts: - - 24H-T-H-B-I-A-solar+p3-dist1 + - 24h-T-H-B-I-A-dist1 planning_horizons: - 2030 - 2040 diff --git a/config/test/config.overnight.yaml b/config/test/config.overnight.yaml index a2a0f5a4..7fb53e42 100644 --- a/config/test/config.overnight.yaml +++ b/config/test/config.overnight.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -7,7 +7,7 @@ tutorial: true run: name: "test-sector-overnight" disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true @@ -17,7 +17,7 @@ scenario: clusters: - 5 sector_opts: - - CO2L0-24H-T-H-B-I-A-solar+p3-dist1 + - CO2L0-24h-T-H-B-I-A-dist1 planning_horizons: - 2030 diff --git a/config/test/config.perfect.yaml b/config/test/config.perfect.yaml index 49886b26..5d77c9c5 100644 --- a/config/test/config.perfect.yaml +++ b/config/test/config.perfect.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -7,7 +7,7 @@ tutorial: true run: name: "test-sector-perfect" disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true foresight: perfect @@ -18,7 +18,7 @@ scenario: clusters: - 5 sector_opts: - - 8760H-T-H-B-I-A-solar+p3-dist1 + - 8760h-T-H-B-I-A-dist1 planning_horizons: - 2030 - 2040 diff --git a/config/test/config.scenarios.yaml b/config/test/config.scenarios.yaml new file mode 100644 index 00000000..8ecbb91b --- /dev/null +++ b/config/test/config.scenarios.yaml @@ -0,0 +1,60 @@ +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +tutorial: true + +run: + name: + - test-elec-no-offshore-wind + - test-elec-no-onshore-wind + scenarios: + enable: true + file: "config/test/scenarios.yaml" + disable_progressbar: true + shared_resources: base + shared_cutouts: true + +scenario: + clusters: + - 5 + opts: + - Co2L-24H + +countries: ['BE'] + +snapshots: + start: "2013-03-01" + end: "2013-03-08" + +electricity: + extendable_carriers: + Generator: [OCGT] + StorageUnit: [battery, H2] + Store: [] + +atlite: + default_cutout: be-03-2013-era5 + cutouts: + be-03-2013-era5: + module: era5 + x: [4., 15.] + y: [46., 56.] + time: ["2013-03-01", "2013-03-08"] + +renewable: + onwind: + cutout: be-03-2013-era5 + offwind-ac: + cutout: be-03-2013-era5 + max_depth: false + offwind-dc: + cutout: be-03-2013-era5 + max_depth: false + solar: + cutout: be-03-2013-era5 + +solving: + solver: + name: glpk + options: "glpk-default" diff --git a/config/test/scenarios.yaml b/config/test/scenarios.yaml new file mode 100644 index 00000000..962cc91e --- /dev/null +++ b/config/test/scenarios.yaml @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +test-elec-no-offshore-wind: + electricity: + renewable_carriers: [solar, onwind] + +test-elec-no-onshore-wind: + electricity: + renewable_carriers: [solar, offwind-ac, offwind-dc] diff --git a/data/custom_extra_functionality.py b/data/custom_extra_functionality.py new file mode 100644 index 00000000..e7a9df0f --- /dev/null +++ b/data/custom_extra_functionality.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023- The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + + +def custom_extra_functionality(n, snapshots, snakemake): + """ + Add custom extra functionality constraints. + """ + pass diff --git a/data/switzerland-new_format-all_years.csv b/data/switzerland-new_format-all_years.csv new file mode 100644 index 00000000..d083e8a8 --- /dev/null +++ b/data/switzerland-new_format-all_years.csv @@ -0,0 +1,25 @@ +country,item,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022 +CH,total residential,268.2,223.4,243.4,261.3,214.2,229.1,241.2,236.5,223.7,226.5,219.1,241.2,211.3 +CH,total residential space,192.2,149,168.1,185.5,139.7,154.4,167.3,161.5,147.2,150.4,140.2,166.2,131.9 +CH,total residential water,32.2,31.6,31.9,32.2,31.7,31.9,31.8,31.8,31.8,31.7,33.3,32.5,32.5 +CH,total residential cooking,9.3,9.3,9.3,9.4,9.5,9.6,9.9,10,10.1,10.2,10.5,10.3,10.3 +CH,electricity residential,67.9,63.7,65.7,67.6,63,64.4,69.7,69.2,67.7,68.1,68.7,70.8,66.8 +CH,electricity residential space,15.9,12.8,14.3,15.8,12.3,13.5,15.8,15.6,14.7,15.3,14.8,17.8,14.8 +CH,electricity residential water,8.8,8.5,8.5,8.6,8.5,8.6,8.9,9,9.2,9.3,9.7,9.5,9.5 +CH,electricity residential cooking,4.9,4.9,4.9,4.9,5,5,5,5.1,5.1,5.1,5.4,5.2,5.3 +CH,total services,145.9,127.4,136.7,144,124.5,132.5,150.5,147.7,141.5,143.1,129.7,144.2,122.5 +CH,total services space,80,62.2,70.8,77.4,58.3,64.3,77,74.4,68.2,69.8,64.3,75.7,58.7 +CH,total services water,10.1,10,10.1,10.1,10,10,11.4,11.3,11.2,11.1,9.7,10.4,12 +CH,total services cooking,2.5,2.4,2.3,2.3,2.4,2.3,3.1,3.1,3.2,3.3,2.1,2.6,3.2 +CH,electricity services,60.5,59.2,60.3,61.4,60.3,62.6,65.9,65.7,65.5,65.6,58.8,61.6,61.6 +CH,electricity services space,4,3.2,3.8,4.2,3.3,3.6,2.7,2.5,2.3,2.3,2.2,2.5,2.5 +CH,electricity services water,0.7,0.7,0.7,0.7,0.7,0.7,1.2,1.1,1.1,1.1,0.9,1,1 +CH,electricity services cooking,2.5,2.4,2.3,2.3,2.4,2.3,3.1,3.1,3.1,3.2,3.3,2.1,3.2 +CH,total rail,11.5,11.1,11.2,11.4,11.1,11.4,11.6,11.4,11.2,11,10.2,10.6,10.8 +CH,total road,199.4,200.4,200.4,201.2,202,203.1,203.9,203.7,202.6,200.5,182.6,188.3,193.3 +CH,electricity road,0,0,0,0,0,0,0.1,0.2,0.3,0.4,0.5,0.8,1.3 +CH,electricity rail,11.5,11.1,11.2,11.4,11.1,11.4,11.5,11.3,11.1,11,10.1,10.6,10.7 +CH,total domestic aviation,3.3,3.2,3.4,3.4,3.5,3.5,3.6,3.1,3.1,2.9,2.5,2.8,3 +CH,total international aviation,58,62,63.5,64.2,64.5,66.8,70.6,72.8,77.2,78.2,28.2,31.2,56.8 +CH,total domestic navigation,1.6,1.6,1.6,1.6,1.6,1.6,1.4,1.4,1.4,1.4,1.4,1.4,1.4 +CH,total international navigation,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/doc/Makefile b/doc/Makefile index a2ae2428..9eea4532 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT diff --git a/doc/conf.py b/doc/conf.py index 1ddae466..afa01d3a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -72,7 +72,7 @@ master_doc = "index" # General information about the project. project = "PyPSA-Eur" -copyright = "2017-2023 Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), Fabian Neumann (TUB, KIT), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB)" +copyright = "2017-2024 Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), Fabian Neumann (TUB, KIT), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB)" author = "Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), Fabian Neumann (TUB, KIT), Marta Victoria (Aarhus University), Lisa Zeyen (KIT, TUB)" # The version info for the project you're documenting, acts as replacement for @@ -80,9 +80,9 @@ author = "Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann # built documents. # # The short X.Y version. -version = "0.8" +version = "0.10" # The full version, including alpha/beta/rc tags. -release = "0.8.1" +release = "0.10.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/configtables/adjustments.csv b/doc/configtables/adjustments.csv new file mode 100644 index 00000000..52617352 --- /dev/null +++ b/doc/configtables/adjustments.csv @@ -0,0 +1,8 @@ +,Unit,Values,Description +adjustments,,, +-- electricity,bool or dict,,"Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_network.`" +-- -- {attr},,,"Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``" +-- -- -- {carrier},float,per-unit,"Any carrier of the network to which parameter adjustment factor should be applied." +-- sector,bool or dict,,"Parameter adjustments for capital cost, marginal cost, and maximum capacities of carriers. Applied in :mod:`prepare_sector_network.`" +-- -- {attr},,,"Attribute can be ``e_nom_opt``, ``p_nom_opt``, ``marginal_cost`` or ``capital_cost``" +-- -- -- {carrier},float,per-unit,"Any carrier of the network to which parameter adjustment factor should be applied." diff --git a/doc/configtables/clustering.csv b/doc/configtables/clustering.csv index e831ca84..65411738 100644 --- a/doc/configtables/clustering.csv +++ b/doc/configtables/clustering.csv @@ -17,3 +17,6 @@ aggregation_strategies,,, -- -- {key},str,"{key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator." -- buses,,, -- -- {key},str,"{key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus." +temporal,,,Options for temporal resolution +-- resolution_elec,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks." +-- resolution_sector,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_sector_network`." diff --git a/doc/configtables/costs.csv b/doc/configtables/costs.csv index 9797d77e..b69c0bf9 100644 --- a/doc/configtables/costs.csv +++ b/doc/configtables/costs.csv @@ -1,9 +1,12 @@ -,Unit,Values,Description -year,--,"YYYY; e.g. '2030'","Year for which to retrieve cost assumptions of ``resources/costs.csv``." -version,--,"vX.X.X; e.g. 'v0.5.0'","Version of ``technology-data`` repository to use." -rooftop_share,--,float,"Share of rooftop PV when calculating capital cost of solar (joint rooftop and utility-scale PV)." -fill_values,--,float,"Default values if not specified for a technology in ``resources/costs.csv``." -capital_cost,EUR/MW,"Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.","For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." -marginal_cost,EUR/MWh,"Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.","For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." -emission_prices,,,"Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs." --- co2,EUR/t,float,"Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``." +,Unit,Values,Description +year,--,YYYY; e.g. '2030',Year for which to retrieve cost assumptions of ``resources/costs.csv``. +version,--,vX.X.X; e.g. 'v0.5.0',Version of ``technology-data`` repository to use. +rooftop_share,--,float,Share of rooftop PV when calculating capital cost of solar (joint rooftop and utility-scale PV). +social_discountrate,p.u.,float,Social discount rate to compare costs in different investment periods. 0.02 corresponds to a social discount rate of 2%. +fill_values,--,float,Default values if not specified for a technology in ``resources/costs.csv``. +capital_cost,EUR/MW,Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.,"For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." +marginal_cost,EUR/MWh,Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.,"For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." +emission_prices,,,Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs. +-- enable,bool,true or false,Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well) +-- co2,EUR/t,float,Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``. +-- co2_monthly_price,bool,true or false,Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices`` diff --git a/doc/configtables/electricity.csv b/doc/configtables/electricity.csv index 4c04fee6..22a22d57 100644 --- a/doc/configtables/electricity.csv +++ b/doc/configtables/electricity.csv @@ -1,6 +1,8 @@ ,Unit,Values,Description voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider +gaslimit_enable,bool,true or false,Add an overall absolute gas limit configured in ``electricity: gaslimit``. gaslimit,MWhth,float or false,Global gas usage limit +co2limit_enable,bool,true or false,Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard. agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. @@ -22,6 +24,8 @@ powerplants_filter,--,"use `pandas.query `_ strings here, e.g. ``Country in ['Germany']``",Filter query for the custom powerplant database. ,,, +everywhere_powerplants,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to add to every node in the model with zero initial capacity. To be used in combination with ``extendable_carriers`` to allow for building conventional powerplants irrespective of existing locations." +,,, conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in ``extendable_carriers``, the capacity is taken as a lower bound." ,,, renewable_carriers,--,"Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro}",List of renewable generators to include in the model. @@ -34,3 +38,6 @@ estimate_renewable_capacities,,, -- -- Offshore,--,"Any subset of {offwind-ac, offwind-dc}","List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) onshore technology." -- -- Offshore,--,{onwind},"List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) offshore technology." -- -- PV,--,{solar},"List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) PV technology." +autarky,,, +-- enable,bool,true or false,Require each node to be autarkic by removing all lines and links. +-- by_country,bool,true or false,Require each country to be autarkic by removing all cross-border lines and links. ``electricity: autarky`` must be enabled. diff --git a/doc/configtables/energy.csv b/doc/configtables/energy.csv index 8718d75e..3d13b9c3 100644 --- a/doc/configtables/energy.csv +++ b/doc/configtables/energy.csv @@ -1,7 +1,4 @@ ,Unit,Values,Description energy_totals_year ,--,"{1990,1995,2000,2005,2010,2011,…} ",The year for the sector energy use. The year must be avaliable in the Eurostat report base_emissions_year ,--,"YYYY; e.g. 1990","The base year for the sector emissions. See `European Environment Agency (EEA) `_." - -eurostat_report_year ,--,"{2016,2017,2018}","The publication year of the Eurostat report. 2016 includes Bosnia and Herzegovina, 2017 does not" - emissions ,--,"{CO2, All greenhouse gases - (CO2 equivalent)}","Specify which sectoral emissions are taken into account. Data derived from EEA. Currently only CO2 is implemented." diff --git a/doc/configtables/existing_capacities.csv b/doc/configtables/existing_capacities.csv index 87519193..eacae35b 100644 --- a/doc/configtables/existing_capacities.csv +++ b/doc/configtables/existing_capacities.csv @@ -3,4 +3,5 @@ grouping_years_power ,--,A list of years,Intervals to group existing capacities grouping_years_heat ,--,A list of years below 2020,Intervals to group existing capacities for heat threshold_capacity ,MW,float,Capacities generators and links of below threshold are removed during add_existing_capacities +default_heating_lifetime ,years,int,Default lifetime for heating technologies conventional_carriers ,--,"Any subset of {uranium, coal, lignite, oil} ",List of conventional power plants to include in the sectoral network diff --git a/doc/configtables/industry.csv b/doc/configtables/industry.csv index fc1b3f0f..d1b560ed 100644 --- a/doc/configtables/industry.csv +++ b/doc/configtables/industry.csv @@ -17,6 +17,8 @@ HVC_primary_fraction,--,float,The fraction of high value chemicals (HVC) produce HVC_mechanical_recycling _fraction,--,float,The fraction of high value chemicals (HVC) produced using mechanical recycling HVC_chemical_recycling _fraction,--,float,The fraction of high value chemicals (HVC) produced using chemical recycling ,,, +sector_ratios_fraction_future,--,Dictionary with planning horizons as keys.,The fraction of total progress in fuel and process switching achieved in the industry sector. +basic_chemicals_without_NH3_production_today,Mt/a,float,"The amount of basic chemicals produced without ammonia (= 86 Mtethylene-equiv - 17 MtNH3)." HVC_production_today,MtHVC/a,float,"The amount of high value chemicals (HVC) produced. This includes ethylene, propylene and BTX. From `DECHEMA (2017) `_, Figure 16, page 107" Mwh_elec_per_tHVC _mechanical_recycling,MWh/tHVC,float,"The energy amount of electricity needed to produce a ton of high value chemical (HVC) using mechanical recycling. From SI of `Meys et al (2020) `_, Table S5, for HDPE, PP, PS, PET. LDPE would be 0.756." Mwh_elec_per_tHVC _chemical_recycling,MWh/tHVC,float,"The energy amount of electricity needed to produce a ton of high value chemical (HVC) using chemical recycling. The default value is based on pyrolysis and electric steam cracking. From `Material Economics (2019) `_, page 125" diff --git a/doc/configtables/licenses-sector.csv b/doc/configtables/licenses-sector.csv index d65d3b36..7f20b5a6 100644 --- a/doc/configtables/licenses-sector.csv +++ b/doc/configtables/licenses-sector.csv @@ -9,9 +9,8 @@ Swiss energy statistics from Swiss Federal Office of Energy,switzerland-sfoe/,un BASt emobility statistics,emobility/,unknown,http://www.bast.de/DE/Verkehrstechnik/Fachthemen/v2-verkehrszaehlung/Stundenwerte.html?nn=626916 BDEW heating profile,heat_load_profile_BDEW.csv,unknown,https://github.com/oemof/demandlib heating profiles for Aarhus,heat_load_profile_DK_AdamJensen.csv,unknown,Adam Jensen MA thesis at Aarhus University -George Lavidas wind/wave costs,WindWaveWEC_GLTB.xlsx,unknown,George Lavidas co2 budgets,co2_budget.csv,CC BY 4.0,https://arxiv.org/abs/2004.11009 -existing heating potentials,existing_infrastructure/existing_heating_raw.csv,unknown,https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 +existing heating potentials,existing_infrastructure/existing_heating_raw.csv,unknown,https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en IRENA existing VRE capacities,existing_infrastructure/{solar|onwind|offwind}_capcity_IRENA.csv,unknown,https://www.irena.org/Statistics/Download-Data USGS ammonia production,myb1-2017-nitro.xls,unknown,https://www.usgs.gov/centers/nmic/nitrogen-statistics-and-information hydrogen salt cavern potentials,h2_salt_caverns_GWh_per_sqkm.geojson,CC BY 4.0,https://doi.org/10.1016/j.ijhydene.2019.12.161 https://doi.org/10.20944/preprints201910.0187.v1 diff --git a/doc/configtables/load.csv b/doc/configtables/load.csv index 6e98f881..ac666947 100644 --- a/doc/configtables/load.csv +++ b/doc/configtables/load.csv @@ -1,5 +1,4 @@ ,Unit,Values,Description -power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards). interpolate_limit,hours,integer,"Maximum gap size (consecutive nans) which interpolated linearly." time_shift_for_large_gaps,string,string,"Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings." manual_adjustments,bool,"{true, false}","Whether to adjust the load data manually according to the function in :func:`manual_adjustment`." diff --git a/doc/configtables/offwind-ac.csv b/doc/configtables/offwind-ac.csv index 6b756799..b2533f04 100644 --- a/doc/configtables/offwind-ac.csv +++ b/doc/configtables/offwind-ac.csv @@ -2,15 +2,15 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `_","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." correction_factor,--,float,"Correction factor for capacity factor time series." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." corine,--,"Any *realistic* subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement." +luisa,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes which are generally eligible for AC-connected offshore wind turbine placement." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." ship_threshold,--,float,"Ship density threshold from which areas are excluded." max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential." min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." max_shore_distance,m,float,"Maximum distance to the shore above which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." -potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." diff --git a/doc/configtables/offwind-dc.csv b/doc/configtables/offwind-dc.csv index 1f72228a..7c537543 100644 --- a/doc/configtables/offwind-dc.csv +++ b/doc/configtables/offwind-dc.csv @@ -2,15 +2,15 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `__","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." correction_factor,--,float,"Correction factor for capacity factor time series." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." corine,--,"Any *realistic* subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement." +luisa,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes which are generally eligible for DC-connected offshore wind turbine placement." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." ship_threshold,--,float,"Ship density threshold from which areas are excluded." max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential." min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build." max_shore_distance,m,float,"Maximum distance to the shore above which wind turbines cannot be build." -potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." diff --git a/doc/configtables/onwind.csv b/doc/configtables/onwind.csv index ba9482e5..3b09214b 100644 --- a/doc/configtables/onwind.csv +++ b/doc/configtables/onwind.csv @@ -2,14 +2,17 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `__","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." corine,,, -- grid_codes,--,"Any subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for wind turbine placement." -- distance,m,float,"Distance to keep from areas specified in ``distance_grid_codes``" -- distance_grid_codes,--,"Any subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes to which wind turbines must maintain a distance specified in the setting ``distance``." +luisa,,, +-- grid_codes,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes which are generally eligible for wind turbine placement." +-- distance,m,float,"Distance to keep from areas specified in ``distance_grid_codes``" +-- distance_grid_codes,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes to which wind turbines must maintain a distance specified in the setting ``distance``." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." -potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." correction_factor,--,float,"Correction factor for capacity factor time series." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." diff --git a/doc/configtables/opts.csv b/doc/configtables/opts.csv index 8c8a706f..b133c718 100644 --- a/doc/configtables/opts.csv +++ b/doc/configtables/opts.csv @@ -1,13 +1,13 @@ -Trigger, Description, Definition, Status -``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `__), In active use -``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use -``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use -``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `__, In active use -``Ept``, Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices``, In active use -``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use -``EQ``, "Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.", ``solve_network``, In active use -``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use -``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested -``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `__, Untested -``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use -``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use +Trigger, Description, Definition, Status +``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `__), In active use +``nSEG``; e.g. ``4380SEG``,"Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use +``Co2L``,Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use +``Ep``,Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `__, In active use +``Ept``,Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices``, In active use, +``CCL``,Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use +``EQ``,Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption., ``solve_network``, In active use +``ATK``,Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links., ``prepare_network``, In active use +``BAU``,Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested +``SAFE``,Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `__, Untested +``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use +``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use diff --git a/doc/configtables/plotting.csv b/doc/configtables/plotting.csv index ed5d9c9f..82fc203c 100644 --- a/doc/configtables/plotting.csv +++ b/doc/configtables/plotting.csv @@ -1,6 +1,9 @@ ,Unit,Values,Description map,,, -- boundaries,°,"[x1,x2,y1,y2]",Boundaries of the map plots in degrees latitude (y) and longitude (x) +projection,,,, +-- name,--,"Valid Cartopy projection name","See https://scitools.org.uk/cartopy/docs/latest/reference/projections.html for list of available projections." +-- args,--,--,"Other entries under 'projection' are passed as keyword arguments to the projection constructor, e.g. ``central_longitude: 10.``." costs_max,bn Euro,float,Upper y-axis limit in cost bar plots. costs_threshold,bn Euro,float,Threshold below which technologies will not be shown in cost bar plots. energy_max,TWh,float,Upper y-axis limit in energy bar plots. diff --git a/doc/configtables/run.csv b/doc/configtables/run.csv index 90cf65ad..2835a324 100644 --- a/doc/configtables/run.csv +++ b/doc/configtables/run.csv @@ -1,5 +1,8 @@ ,Unit,Values,Description -name,--,"any string","Specify a name for your run. Results will be stored under this name." -disable_progrssbar,bool,"{true, false}","Switch to select whether progressbar should be disabled." -shared_resources,bool,"{true, false}","Switch to select whether resources should be shared across runs." +name,--,str/list,"Specify a name for your run. Results will be stored under this name. If ``scenario: enable:`` is set to ``true``, the name must contain a subset of scenario names defined in ``scenario: file:``. If the name is 'all', all defined scenarios will be run." +scenarios,,, +-- enable,bool,"{true, false}","Switch to select whether workflow should generate scenarios based on ``file``." +-- file,str,,"Path to the scenario yaml file. The scenario file contains config overrides for each scenario. In order to be taken account, ``run: scenarios`` has to be set to ``true`` and ``run: name`` has to be a subset of top level keys given in the scenario file. In order to automatically create a `scenario.yaml` file based on a combination of settings, alter and use the ``config/create_scenarios.py`` script in the ``config`` directory." +disable_progressbar,bool,"{true, false}","Switch to select whether progressbar should be disabled." +shared_resources,bool/str,,"Switch to select whether resources should be shared across runs. If a string is passed, this is used as a subdirectory name for shared resources. If set to 'base', only resources before creating the elec.nc file are shared." shared_cutouts,bool,"{true, false}","Switch to select whether cutouts should be shared across runs." diff --git a/doc/configtables/sector-opts.csv b/doc/configtables/sector-opts.csv index ea39c3b0..fc9e8c10 100644 --- a/doc/configtables/sector-opts.csv +++ b/doc/configtables/sector-opts.csv @@ -7,5 +7,5 @@ Trigger, Description, Definition, Status ``B``,Add biomass,,In active use ``I``,Add industry sector,,In active use ``A``,Add agriculture sector,,In active use -``dist``+``n``,Add distribution grid with investment costs of ``n`` times costs in ``data/costs_{cost_year}.csv``,,In active use +``dist``+``n``,Add distribution grid with investment costs of ``n`` times costs in ``resources/costs_{cost_year}.csv``,,In active use ``seq``+``n``,Sets the CO2 sequestration potential to ``n`` Mt CO2 per year,,In active use diff --git a/doc/configtables/sector.csv b/doc/configtables/sector.csv index 856ea074..1f8bb030 100644 --- a/doc/configtables/sector.csv +++ b/doc/configtables/sector.csv @@ -1,4 +1,9 @@ ,Unit,Values,Description +transport,--,"{true, false}",Flag to include transport sector. +heating,--,"{true, false}",Flag to include heating sector. +biomass,--,"{true, false}",Flag to include biomass sector. +industry,--,"{true, false}",Flag to include industry sector. +agriculture,--,"{true, false}",Flag to include agriculture sector. district_heating,--,,`prepare_sector_network.py `_ -- potential,--,float,maximum fraction of urban demand which can be supplied by district heating -- progress,--,Dictionary with planning horizons as keys., Increase of today's district heating demand to potential maximum district heating share. Progress = 0 means today's district heating share. Progress = 1 means maximum fraction of urban demand is supplied by district heating @@ -62,16 +67,17 @@ tes,--,"{true, false}",Add option for storing thermal energy in large water pits tes_tau,,,The time constant used to calculate the decay of thermal energy in thermal energy storage (TES): 1- :math:`e^{-1/24τ}`. -- decentral,days,float,The time constant in decentralized thermal energy storage (TES) -- central,days,float,The time constant in centralized thermal energy storage (TES) -boilers,--,"{true, false}",Add option for transforming electricity into heat using resistive heater +boilers,--,"{true, false}",Add option for transforming gas into heat using gas boilers +resistive_heaters,--,"{true, false}",Add option for transforming electricity into heat using resistive heaters (independently from gas boilers) oil_boilers,--,"{true, false}",Add option for transforming oil into heat using boilers biomass_boiler,--,"{true, false}",Add option for transforming biomass into heat using boilers +overdimension_individual_heating,--,"float",Add option for overdimensioning individual heating systems by a certain factor. This allows them to cover heat demand peaks e.g. 10% higher than those in the data with a setting of 1.1. chp,--,"{true, false}",Add option for using Combined Heat and Power (CHP) micro_chp,--,"{true, false}",Add option for using Combined Heat and Power (CHP) for decentral areas. solar_thermal,--,"{true, false}",Add option for using solar thermal to generate heat. solar_cf_correction,--,float,The correction factor for the value provided by the solar thermal profile calculations marginal_cost_storage,currency/MWh ,float,The marginal cost of discharging batteries in distributed grids methanation,--,"{true, false}",Add option for transforming hydrogen and CO2 into methane using methanation. -helmeth,--,"{true, false}",Add option for transforming power into gas using HELMETH (Integrated High-Temperature ELectrolysis and METHanation for Effective Power to Gas Conversion) coal_cc,--,"{true, false}",Add option for coal CHPs with carbon capture dac,--,"{true, false}",Add option for Direct Air Capture (DAC) co2_vent,--,"{true, false}",Add option for vent out CO2 from storages to the atmosphere. @@ -80,6 +86,8 @@ hydrogen_fuel_cell,--,"{true, false}",Add option to include hydrogen fuel cell f hydrogen_turbine,--,"{true, false}",Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs SMR,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) SMR CC,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) and Carbon Capture (CC) +regional_methanol_demand,--,"{true, false}",Spatially resolve methanol demand. Set to true if regional CO2 constraints needed. +regional_oil_demand,--,"{true, false}",Spatially resolve oil demand. Set to true if regional CO2 constraints needed. regional_co2 _sequestration_potential,,, -- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP `_. -- attribute,--,string,Name of the attribute for the sequestration potential @@ -89,9 +97,11 @@ regional_co2 _sequestration_potential,,, -- years_of_storage,years,float,The years until potential exhausted at optimised annual rate co2_sequestration_potential,MtCO2/a,float,The potential of sequestering CO2 in Europe per year co2_sequestration_cost,currency/tCO2,float,The cost of sequestering a ton of CO2 +co2_sequestration_lifetime,years,int,The lifetime of a CO2 sequestration site co2_spatial,--,"{true, false}","Add option to spatially resolve carrier representing stored carbon dioxide. This allows for more detailed modelling of CCUTS, e.g. regarding the capturing of industrial process emissions, usage as feedstock for electrofuels, transport of carbon dioxide, and geological sequestration sites." ,,, co2network,--,"{true, false}",Add option for planning a new carbon dioxide transmission network +co2_network_cost_factor,p.u.,float,The cost factor for the capital cost of the carbon dioxide transmission network ,,, cc_fraction,--,float,The default fraction of CO2 captured with post-combustion capture hydrogen_underground _storage,--,"{true, false}",Add options for storing hydrogen underground. Storage potential depends regionally. @@ -104,10 +114,16 @@ min_part_load _methanolisation,per unit of p_nom ,float,The minimum unit dispatc use_fischer_tropsch _waste_heat,--,"{true, false}",Add option for using waste heat of Fischer Tropsch in district heating networks use_fuel_cell_waste_heat,--,"{true, false}",Add option for using waste heat of fuel cells in district heating networks use_electrolysis_waste _heat,--,"{true, false}",Add option for using waste heat of electrolysis in district heating networks +electricity_transmission _grid,--,"{true, false}",Switch for enabling/disabling the electricity transmission grid. electricity_distribution _grid,--,"{true, false}",Add a simplified representation of the exchange capacity between transmission and distribution grid level through a link. electricity_distribution _grid_cost_factor,,,Multiplies the investment cost of the electricity distribution grid ,,, electricity_grid _connection,--,"{true, false}",Add the cost of electricity grid connection for onshore wind and solar +transmission_efficiency,,,Section to specify transmission losses or compression energy demands of bidirectional links. Splits them into two capacity-linked unidirectional links. +-- {carrier},--,str,The carrier of the link. +-- -- efficiency_static,p.u.,float,Length-independent transmission efficiency. +-- -- efficiency_per_1000km,p.u. per 1000 km,float,Length-dependent transmission efficiency ($\eta^{\text{length}}$) +-- -- compression_per_1000km,p.u. per 1000 km,float,Length-dependent electricity demand for compression ($\eta \cdot \text{length}$) implemented as multi-link to local electricity bus. H2_network,--,"{true, false}",Add option for new hydrogen pipelines gas_network,--,"{true, false}","Add existing natural gas infrastructure, incl. LNG terminals, production and entry-points. The existing gas network is added with a lossless transport model. A length-weighted `k-edge augmentation algorithm `_ can be run to add new candidate gas pipelines such that all regions of the model can be connected to the gas network. When activated, all the gas demands are regionally disaggregated as well." H2_retrofit,--,"{true, false}",Add option for retrofiting existing pipelines to transport hydrogen. @@ -118,6 +134,14 @@ gas_distribution_grid _cost_factor,,,Multiplier for the investment cost of the g ,,, biomass_spatial,--,"{true, false}",Add option for resolving biomass demand regionally biomass_transport,--,"{true, false}",Add option for transporting solid biomass between nodes +biogas_upgrading_cc,--,"{true, false}",Add option to capture CO2 from biomass upgrading conventional_generation,,,Add a more detailed description of conventional carriers. Any power generation requires the consumption of fuel from nodes representing that fuel. biomass_to_liquid,--,"{true, false}",Add option for transforming solid biomass into liquid fuel with the same properties as oil biosng,--,"{true, false}",Add option for transforming solid biomass into synthesis gas with the same properties as natural gas +limit_max_growth,,, +-- enable,--,"{true, false}",Add option to limit the maximum growth of a carrier +-- factor,p.u.,float,The maximum growth factor of a carrier (e.g. 1.3 allows 30% larger than max historic growth) +-- max_growth,,, +-- -- {carrier},GW,float,The historic maximum growth of a carrier +-- max_relative_growth, +-- -- {carrier},p.u.,float,The historic maximum relative growth of a carrier diff --git a/doc/configtables/snapshots.csv b/doc/configtables/snapshots.csv index d60c78dc..4be0439b 100644 --- a/doc/configtables/snapshots.csv +++ b/doc/configtables/snapshots.csv @@ -1,4 +1,4 @@ -,Unit,Values,Description -start,--,"str or datetime-like; e.g. YYYY-MM-DD","Left bound of date range" -end,--,"str or datetime-like; e.g. YYYY-MM-DD","Right bound of date range" -inclusive,--,"One of {'neither', 'both', ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``." +,Unit,Values,Description +start,--,str or datetime-like; e.g. YYYY-MM-DD,Left bound of date range +end,--,str or datetime-like; e.g. YYYY-MM-DD,Right bound of date range +inclusive,--,"One of {'neither', 'both', ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``." diff --git a/doc/configtables/solar.csv b/doc/configtables/solar.csv index 803445d5..18587694 100644 --- a/doc/configtables/solar.csv +++ b/doc/configtables/solar.csv @@ -2,14 +2,14 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module can be ERA5 or SARAH-2.","Specifies the directory where the relevant weather data ist stored that is specified at ``atlite/cutouts`` configuration. Both ``sarah`` and ``era5`` work." resource,,, -- method,--,"Must be 'pv'","A superordinate technology type." --- panel,--,"One of {'Csi', 'CdTe', 'KANENA'} as defined in `atlite `__","Specifies the solar panel technology and its characteristic attributes." +-- panel,--,"One of {'Csi', 'CdTe', 'KANENA'} as defined in `atlite `_ . Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the solar panel technology and its characteristic attributes." -- orientation,,, -- -- slope,°,"Realistically any angle in [0., 90.]","Specifies the tilt angle (or slope) of the solar panel. A slope of zero corresponds to the face of the panel aiming directly overhead. A positive tilt angle steers the panel towards the equator." -- -- azimuth,°,"Any angle in [0., 360.]","Specifies the `azimuth `_ orientation of the solar panel. South corresponds to 180.°." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of solar panel placement." correction_factor,--,float,"A correction factor for the capacity factor (availability) time series." corine,--,"Any subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for solar panel placement." +luisa,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes which are generally eligible for solar panel placement." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." -potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." diff --git a/doc/configtables/solving.csv b/doc/configtables/solving.csv index 45d50d84..7189399b 100644 --- a/doc/configtables/solving.csv +++ b/doc/configtables/solving.csv @@ -6,12 +6,19 @@ options,,, -- skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches. Defaults to true." -- rolling_horizon,bool,"{'true','false'}","Whether to optimize the network in a rolling horizon manner, where the snapshot range is split into slices of size `horizon` which are solved consecutively." -- seed,--,int,Random seed for increased deterministic behaviour. +-- custom_extra_functionality,--,str,Path to a Python file with custom extra functionality code to be injected into the solving rules of the workflow relative to ``rules`` directory. +-- io_api,string,"{'lp','mps','direct'}",Passed to linopy and determines the API used to communicate with the solver. With the ``'lp'`` and ``'mps'`` options linopy passes a file to the solver; with the ``'direct'`` option (only supported for HIGHS and Gurobi) linopy uses an in-memory python API resulting in better performance. -- track_iterations,bool,"{'true','false'}",Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration) -- min_iterations,--,int,Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. -- max_iterations,--,int,Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. -- transmission_losses,int,[0-9],"Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored." -- linearized_unit_commitment,bool,"{'true','false'}",Whether to optimise using the linearized unit commitment formulation. -- horizon,--,int,Number of snapshots to consider in each iteration. Defaults to 100. +constraints ,,, +-- CCL,bool,"{'true','false'}",Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``. +-- EQ,bool/string,"{'false',`n(c| )``; i.e. ``0.5``-``0.7c``}",Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption. +-- BAU,bool,"{'true','false'}",Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities`` +-- SAFE,bool,"{'true','false'}",Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network. solver,,, -- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible",Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow. -- options,--,Key listed under ``solver_options``.,Link to specific parameter settings. diff --git a/doc/configuration.rst b/doc/configuration.rst index ceda1141..dae91380 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -9,7 +9,7 @@ Configuration ########################################## -PyPSA-Eur has several configuration options which are documented in this section and are collected in a ``config/config.yaml`` file located in the root directory. Users should copy the provided default configuration (``config/config.default.yaml``) and amend their own modifications and assumptions in the user-specific configuration file (``config/config.yaml``); confer installation instructions at :ref:`defaultconfig`. +PyPSA-Eur has several configuration options which are documented in this section and are collected in a ``config/config.yaml`` file. This file defines deviations from the default configuration (``config/config.default.yaml``); confer installation instructions at :ref:`defaultconfig`. .. _toplevel_cf: @@ -383,7 +383,7 @@ overwrite the existing values. .. literalinclude:: ../config/config.default.yaml :language: yaml - :start-after: type: + :start-after: # docs-load :end-before: # docs .. csv-table:: @@ -561,6 +561,21 @@ The list of available biomass is given by the category in `ENSPRESO_BIOMASS `_ and then -saved to a file ``data/costs_{year}.csv``. The ``config/config.yaml`` provides options +saved to a file ``resources/costs_{year}.csv``. The ``config/config.yaml`` provides options to choose a reference year and use a specific version of the repository. .. literalinclude:: ../config/config.default.yaml @@ -50,7 +50,7 @@ Modifying Assumptions Some cost assumptions (e.g. marginal cost and capital cost) can be directly set in the ``config/config.yaml`` (cf. Section :ref:`costs_cf` in :ref:`config`). To change cost assumptions in more detail, make a copy of -``data/costs_{year}.csv`` and reference the new cost file in the ``Snakefile``: +``resources/costs_{year}.csv`` and reference the new cost file in the ``Snakefile``: .. literalinclude:: ../Snakefile :start-at: COSTS diff --git a/doc/foresight.rst b/doc/foresight.rst index dd1e0ecc..c749c84c 100644 --- a/doc/foresight.rst +++ b/doc/foresight.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2021-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2021-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -41,7 +41,7 @@ Perfect foresight scenarios .. warning:: - Perfect foresight is currently implemented as a first test version. + Perfect foresight is currently implemented as an experimental test version. For running perfect foresight scenarios, you can adjust the ``config/config.perfect.yaml``: diff --git a/doc/img/intro-workflow.png b/doc/img/intro-workflow.png index da2c06d8..27b5a389 100644 Binary files a/doc/img/intro-workflow.png and b/doc/img/intro-workflow.png differ diff --git a/doc/index.rst b/doc/index.rst index d30dd8b9..acff820b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -35,6 +35,8 @@ PyPSA-Eur: A Sector-Coupled Open Optimisation Model of the European Energy Syste :target: https://stackoverflow.com/questions/tagged/pypsa :alt: Stackoverflow +| + PyPSA-Eur is an open model dataset of the European energy system at the transmission network level that covers the full ENTSO-E area. It covers demand and supply for all energy sectors. From version v0.8.0, PyPSA-Eur includes all @@ -116,7 +118,7 @@ of the individual parts. topics we are working on. Please feel free to help or make suggestions. This project is currently maintained by the `Department of Digital -Transformation in Energy Systems `_ at the +Transformation in Energy Systems `_ at the `Technische Universität Berlin `_. Previous versions were developed within the `IAI `_ at the `Karlsruhe Institute of Technology (KIT) `_ which was funded by @@ -209,24 +211,6 @@ If you want to cite a specific PyPSA-Eur version, each release of PyPSA-Eur is s :target: https://doi.org/10.5281/zenodo.3520874 -Pre-Built Networks as a Dataset -=============================== - -There are pre-built networks available as a dataset on Zenodo as well for every release of PyPSA-Eur. - -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3601881.svg - :target: https://doi.org/10.5281/zenodo.3601881 - -The included ``.nc`` files are PyPSA network files which can be imported with PyPSA via: - -.. code:: python - - import pypsa - - filename = "elec_s_1024_ec.nc" # example - n = pypsa.Network(filename) - - Operating Systems ================= diff --git a/doc/installation.rst b/doc/installation.rst index 01fdafeb..fbabfd15 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -31,7 +31,7 @@ Install Python Dependencies PyPSA-Eur relies on a set of other Python packages to function. We recommend using the package manager `mamba `_ to install them and manage your environments. -For instructions for your operating system follow the ``mamba`` `installation guide `_. +For instructions for your operating system follow the ``mamba`` `installation guide `_. You can also use ``conda`` equivalently. The package requirements are curated in the `envs/environment.yaml `_ file. @@ -79,31 +79,9 @@ Nevertheless, you can still use open-source solvers for smaller problems. `Instructions how to install a solver in the documentation of PyPSA `_ .. note:: - The rules :mod:`cluster_network` and :mod:`simplify_network` solve a quadratic optimisation problem for clustering. - The open-source solvers Cbc and GlPK cannot handle this. A fallback to Ipopt is implemented in this case, but requires - it to be installed. For an open-source solver setup install in your ``conda`` environment on OSX/Linux - - .. code:: bash - - mamba activate pypsa-eur - mamba install -c conda-forge ipopt coincbc - - and on Windows - - .. code:: bash - - mamba activate pypsa-eur - mamba install -c conda-forge ipopt glpk - - For HiGHS, run - - .. code:: bash - - mamba activate pypsa-eur - mamba install -c conda-forge ipopt - pip install highspy - - For Gurobi, run + The rules :mod:`cluster_network` and :mod:`simplify_network` solve a mixed-integer quadratic optimisation problem for clustering. + The open-source solvers HiGHS, Cbc and GlPK cannot handle this. A fallback to SCIP is implemented in this case. + For an open-source solver setup install in your ``conda`` environment on OSX/Linux. To install the default solver Gurobi, run .. code:: bash @@ -118,11 +96,10 @@ Nevertheless, you can still use open-source solvers for smaller problems. Handling Configuration Files ============================ -PyPSA-Eur has several configuration options that must be specified in a -``config/config.yaml`` file located in the root directory. An example configuration -``config/config.default.yaml`` is maintained in the repository, which will be used to -automatically create your customisable ``config/config.yaml`` on first use. More -details on the configuration options are in :ref:`config`. +PyPSA-Eur has several configuration options that users can specify in a +``config/config.yaml`` file. The default configuration +``config/config.default.yaml`` is maintained in the repository. More details on +the configuration options are in :ref:`config`. You can also use ``snakemake`` to specify another file, e.g. ``config/config.mymodifications.yaml``, to update the settings of the ``config/config.yaml``. @@ -130,8 +107,3 @@ You can also use ``snakemake`` to specify another file, e.g. .. code:: bash .../pypsa-eur % snakemake -call --configfile config/config.mymodifications.yaml - -.. warning:: - Users are advised to regularly check their own ``config/config.yaml`` against changes - in the ``config/config.default.yaml`` when pulling a new version from the remote - repository. diff --git a/doc/introduction.rst b/doc/introduction.rst index df060723..d271391c 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -74,7 +74,7 @@ what data to retrieve and what files to produce. Details are explained in :ref:`wildcards` and :ref:`scenario`. The model also has several further configuration options collected in the -``config/config.yaml`` file located in the root directory, which that are not part of +``config/config.default.yaml`` file located in the root directory, which that are not part of the scenarios. Options are explained in :ref:`config`. Folder Structure @@ -89,8 +89,8 @@ Folder Structure - ``results``: Stores the solved PyPSA network data, summary files and plots. - ``logs``: Stores log files. - ``benchmarks``: Stores ``snakemake`` benchmarks. -- ``test``: Includes the test configuration files used for continuous integration. - ``doc``: Includes the documentation of PyPSA-Eur. +- ``graphics``: Includes some graphics for the documentation of PyPSA-Eur. System Requirements =================== diff --git a/doc/licenses.rst b/doc/licenses.rst index beb6f5b8..74640ea5 100644 --- a/doc/licenses.rst +++ b/doc/licenses.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/limitations.rst b/doc/limitations.rst index a67fad0c..2aa8ecfe 100644 --- a/doc/limitations.rst +++ b/doc/limitations.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/make.bat b/doc/make.bat index 3037f934..d64ffdc8 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -1,4 +1,4 @@ -REM SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors +REM SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors REM SPDX-License-Identifier: MIT @ECHO OFF diff --git a/doc/plotting.rst b/doc/plotting.rst index 895eab3b..a5229d8d 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -22,7 +22,22 @@ Rule ``plot_summary`` .. _map_plot: -Rule ``plot_network`` -======================== +Rule ``plot_power_network`` +=========================== -.. automodule:: plot_network +.. automodule:: plot_power_network + +Rule ``plot_power_network_perfect`` +=================================== + +.. automodule:: plot_power_network_perfect + +Rule ``plot_hydrogen_network`` +============================== + +.. automodule:: plot_hydrogen_network + +Rule ``plot_gas_network`` +========================= + +.. automodule:: plot_gas_network diff --git a/doc/preparation.rst b/doc/preparation.rst index 5cdc8031..bb55ba6b 100644 --- a/doc/preparation.rst +++ b/doc/preparation.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -94,6 +94,13 @@ Rule ``build_electricity_demand`` .. automodule:: build_electricity_demand +.. _monthlyprices: + +Rule ``build_monthly_prices`` +============================= + +.. automodule:: build_monthly_prices + .. _ship: Rule ``build_ship_raster`` @@ -102,6 +109,12 @@ Rule ``build_ship_raster`` .. automodule:: build_ship_raster +.. _availabilitymatrixmdua: + +Rule ``determine_availability_matrix_MD_UA`` +============================================ + +.. automodule:: determine_availability_matrix_MD_UA .. _renewableprofiles: diff --git a/doc/publications.bib b/doc/publications.bib index 4be6676a..5e1ee364 100644 --- a/doc/publications.bib +++ b/doc/publications.bib @@ -1,5 +1,5 @@ @Comment{ -SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors +SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC0-1.0 } diff --git a/doc/publications.rst b/doc/publications.rst index c824873e..f6d7986b 100644 --- a/doc/publications.rst +++ b/doc/publications.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/release_notes.rst b/doc/release_notes.rst index d7931f0e..4500cbc9 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -10,39 +10,322 @@ Release Notes Upcoming Release ================ -* Pin ``snakemake`` version to below 8.0.0, as the new version is not yet - supported by ``pypsa-eur``. +* Corrected a bug leading to power plants operating after their DateOut + (https://github.com/PyPSA/pypsa-eur/pull/958). Added additional grouping years + before 1980. -* Updated Global Energy Monitor LNG terminal data to March 2023 version. +* The Eurostat data was updated to the 2023 version in :mod:`build_energy_totals`. -* For industry distribution, use EPRTR as fallback if ETS data is not available. +* The latest `Swiss energy totals + `_ + have been updated to the 2023 version. -* The minimum capacity for renewable generators when using the myopic option has been fixed. +* The JRC-IDEES data is only available until 2015. For energy totals years (``energy: energy_totals_year``) after + 2015, the data scaled using the ratio of Eurostat data reported for the energy + totals year and 2015. -* Files downloaded from zenodo are now write-protected to prevent accidental re-download. +* The default energy totals year (``energy: energy_totals_year``) was updated to 2019. -* Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``. +* Upgrade default techno-economic assumptions to ``technology-data`` v0.8.1. -* New feature multi-decade optimisation with perfect foresight. +* Linearly interpolate missing investment periods in year-dependent + configuration options. -* It is now possible to specify years for biomass potentials which do not exist - in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated. +* Added new scenario management that supports the simultaneous execution of + multiple scenarios with a single ``snakemake`` call. For this purpose, a + ``scenarios.yaml`` file is introduced which contains customizable scenario + names with configuration overrides. To enable it, set the ``run: scenarios: + true`` and define the list of scenario names to run under ``run: name:`` in + the configuration file. The latter must be a subset of toplevel keys in the + scenario file. -* In pathway mode, the biomass potential is linked to the investment year. + - To get started, a scenarios template file ``config/scenarios.template.yaml`` + is included in the repository, which is copied to ``config/scenarios.yaml`` + on first use. -* Rule ``purge`` now initiates a dialog to confirm if purge is desired. + - The scenario file can be changed via ``run: scenarios: file:``. -* Rule ``retrieve_irena`` get updated values for renewables capacities. + - If scenario management is activated with ``run: scenarios: enable: true``, a + new wildcard ``{run}`` is introduced. This means that the configuration + settings may depend on the new ``{run}`` wildcard. Therefore, a new + ``config_provider()`` function is used in the ``Snakefile`` and ``.smk`` + files, which takes wildcard values into account. The calls to the ``config`` + object have been reduced in ``.smk`` files since there is no awareness of + wildcard values outside rule definitions. -* Rule ``retrieve_wdpa`` updated to not only check for current and previous, but also potentially next months dataset availability. + - The scenario files can also be programmatically created using the template + script ``config/create_scenarios.py``. This script can be run with + ``snakemake -j1 create_scenarios`` and creates the scenarios file referenced + under ``run: scenarios: file:``. -* Split configuration to enable SMR and SMR CC. + - The setting ``run: name: all`` will run all scenarios in + ``config/scenarios.yaml``. Otherwise, it will run those passed as list in + ``run: name:`` as long as ``run: scenarios: enable: true``. -* The configuration setting for country focus weights when clustering the - network has been moved from ``focus_weights:`` to ``clustering: - focus_weights:``. Backwards compatibility to old config files is maintained. + - The setting ``run: shared_resources:`` indicates via a boolean whether the + resources should be encapsulated by the ``run: name:``. The special setting + ``run: shared_resources: base`` shares resources until ``add_electricity`` + that do not contain wildcards other than ``{"technology", "year", + "scope"}``. -* The ``mock_snakemake`` function can now be used with a Snakefile from a different directory using the new ``root_dir`` argument. + - Added new configuration options for all ``{opts}`` and ``{sector_opts}`` + wildcard values to create a unique configuration file (``config.yaml``) per + PyPSA network file. This is done with the help of a new function + ``update_config_from_wildcards()`` which parses configuration settings from + wildcards and updates the ``snakemake.config`` object. These updated + configuration settings are used in the scripts rather than directly parsed + values from ``snakemake.wildcards``. + + - The cost data was moved from ``data/costs_{year}.csv`` to + ``resources/costs_{year}.csv`` since it depends on configuration settings. + The ``retrieve_cost_data`` rule was changed to calling a Python script. + + - Moved time clustering settings to ``clustering: temporal:`` from + ``snapshots:`` so that the latter is only used to define the + ``pandas.DatetimeIndex`` which simplifies the scenario management. + + - Collection rules get a new wildcard ``run=config["run"]["name"]`` so they + can collect outputs across different scenarios. + + - **Warning:** One caveat remains for the scenario management with myopic or + perfect foresight pathway optimisation. The first investment period must be + shared across all scenarios. The reason is that the ``wildcard_constraints`` + defined for the rule ``add_existing_baseyear`` do not accept wildcard-aware + input functions (cf. + `https://github.com/snakemake/snakemake/issues/2703`_). + +* The outputs of the rule ``retrieve_gas_infrastructure_data`` no longer + marked as ``protected()`` as the download size is small. + +* Bugfix: allow modelling sector-coupled landlocked regions. (Fixed handling of offshore wind.) + +PyPSA-Eur 0.10.0 (19th February 2024) +===================================== + +**New Features** + +* Improved representation of industry transition pathways. A new script was + added to interpolate industry sector ratios from today's status quo to future + systems (i.e. specific emissions and demands for energy and feedstocks). For + each country we gradually switch industry processes from today's specific + energy carrier usage per ton material output to the best-in-class energy + consumption of tomorrow. This is done on a per-country basis. The ratio of + today to tomorrow's energy consumption is set with the ``industry: + sector_ratios_fraction_future:`` parameter + (https://github.com/PyPSA/pypsa-eur/pull/929). + +* Add new default to overdimension heating in individual buildings. This allows + them to cover heat demand peaks e.g. 10% higher than those in the data. The + disadvantage of manipulating the costs is that the capacity is then not quite + right. This way at least the costs are right + (https://github.com/PyPSA/pypsa-eur/pull/918). + +* Allow industrial coal demand to be regional so its emissions can be included + in regional emission limits (https://github.com/PyPSA/pypsa-eur/pull/923). + +* Add option to specify to set a default heating lifetime for existing heating + (``existing_capacities: default_heating_lifetime:``) + (https://github.com/PyPSA/pypsa-eur/pull/918). + +* Added option to specify turbine and solar panel models for specific years as a + dictionary (e.g. ``renewable: onwind: resource: turbine:``). The years will be + interpreted as years from when the the corresponding turbine model substitutes + the previous model for new installations. This will only have an effect on + workflows with foresight ``"myopic"`` and still needs to be added foresight + option ``"perfect"`` (https://github.com/PyPSA/pypsa-eur/pull/912). + +* New configuration option ``everywhere_powerplants`` to build conventional + powerplants everywhere, irrespective of existing powerplants locations, in the + network (https://github.com/PyPSA/pypsa-eur/pull/850). + +* Add the option to customise map projection in plotting config under + ``plotting: projection: name`` (https://github.com/PyPSA/pypsa-eur/pull/898). + +* Add support for the linopy ``io_api`` option under ``solving: options: + io_api:``. Set to ``"direct"`` to increase model reading and writing + performance for the highs and gurobi solvers on slow file systems + (https://github.com/PyPSA/pypsa-eur/pull/892). + +* It is now possible to determine the directory for shared resources by setting + `shared_resources` to a string (https://github.com/PyPSA/pypsa-eur/pull/906). + +* Improve ``mock_snakemake()`` for usage in Snakemake modules + (https://github.com/PyPSA/pypsa-eur/pull/869). + +**Breaking Changes** + +* Remove long-deprecated function ``attach_extendable_generators`` in + :mod:`add_electricity`. + +* Remove option for wave energy as technology data is not maintained. + +* The order of buses (bus0, bus1, ...) for DAC components has changed to meet + the convention of the other components. Therefore, `bus0` refers to the + electricity bus (input), `bus1` to the heat bus (input), 'bus2' to the CO2 + atmosphere bus (input), and `bus3` to the CO2 storage bus (output) + (https://github.com/PyPSA/pypsa-eur/pull/901). + +**Changes** + +* Upgrade default techno-economic assumptions to ``technology-data`` v0.8.0. + +* Update hydrogen pipeline losses to latest data from Danish Energy Agency + (https://github.com/PyPSA/pypsa-eur/pull/933). + +* Move building of daily heat profile to its own rule + :mod:`build_hourly_heat_demand` from :mod:`prepare_sector_network` + (https://github.com/PyPSA/pypsa-eur/pull/884). + +* In :mod:`build_energy_totals`, district heating shares are now reported in a + separate file (https://github.com/PyPSA/pypsa-eur/pull/884). + +* Move calculation of district heating share to its own rule + :mod:`build_district_heat_share` + (https://github.com/PyPSA/pypsa-eur/pull/884). + +* Move building of distribution of existing heating to own rule + :mod:`build_existing_heating_distribution`. This makes the distribution of + existing heating to urban/rural, residential/services and spatially more + transparent (https://github.com/PyPSA/pypsa-eur/pull/884). + +* Default settings for recycling rates and primary product shares of high-value + chemicals have been set in accordance with the values used in `Neumann et al. + (2023) `_ linearly interpolated + between 2020 and 2050. The recycling rates are based on data from `Agora + Energiewende (2021) + `_. + +* Air-sourced heat pumps can now also be built in rural areas. Previously, only + ground-sourced heat pumps were considered for this category + (https://github.com/PyPSA/pypsa-eur/pull/890). + +* The default configuration ``config/config.default.yaml`` is now automatically + used as a base configuration file. The file ``config/config.yaml`` can now be + used to only define deviations from the default configuration. The + ``config/config.default.yaml`` is still copied into ``config/config.yaml`` on + first usage (https://github.com/PyPSA/pypsa-eur/pull/925). + +* Regions are assigned to all buses with unique coordinates in the network with + a preference given to substations. Previously, only substations had assigned + regions, but this could lead to issues when a high spatial resolution was + applied (https://github.com/PyPSA/pypsa-eur/pull/922). + +* Define global constraint for CO2 emissions on the final state of charge of the + CO2 atmosphere store. This gives a more sparse constraint that should improve + the performance of the solving process + (https://github.com/PyPSA/pypsa-eur/pull/862). + +* Switched the energy totals year from 2011 to 2013 to comply with the assumed + default weather year (https://github.com/PyPSA/pypsa-eur/pull/934). + +* Cluster residential and services heat buses by default. Can be disabled with + ``cluster_heat_buses: false`` (https://github.com/PyPSA/pypsa-eur/pull/877). + +* The rule ``plot_network`` has been split into separate rules for plotting + electricity, hydrogen and gas networks + (https://github.com/PyPSA/pypsa-eur/pull/900). + +* To determine the optimal topology to meet the number of clusters, the workflow + used pyomo in combination with ``ipopt`` or ``gurobi``. This dependency has + been replaced by using ``linopy`` in combination with ``scipopt`` or + ``gurobi``. The environment file has been updated accordingly + (https://github.com/PyPSA/pypsa-eur/pull/903). + +* The ``highs`` solver was added to the default environment file. + +* New default solver settings for COPT solver + (https://github.com/PyPSA/pypsa-eur/pull/882). + +* Data retrieval rules now use their own minimal conda environment. This can + avoid unnecessary reruns of the workflow + (https://github.com/PyPSA/pypsa-eur/pull/888). + +* Merged two OPSD time series data versions into such that the option ``load: + power_statistics:`` becomes superfluous and was hence removed + (https://github.com/PyPSA/pypsa-eur/pull/924). + +* The filtering of power plants in the ``config.default.yaml`` has been updated + regarding phased-out power plants in 2023. + +* Include all countries in ammonia production resource. This is so that the full + EU28 ammonia demand can be correctly subtracted in the rule + :mod:`build_industry_sector_ratios` + (https://github.com/PyPSA/pypsa-eur/pull/931). + +* Correctly source the existing heating technologies for buildings since the + source URL has changed. It represents the year 2012 and is only for buildings, + not district heating (https://github.com/PyPSA/pypsa-eur/pull/918). + +* Add warning when BEV availability weekly profile has negative values in + `build_transport_demand` (https://github.com/PyPSA/pypsa-eur/pull/858). + +* Time series clipping for very small values was added for Links + (https://github.com/PyPSA/pypsa-eur/pull/870). + +* A ``test.sh`` script was added to the repository to run the tests locally. + +* The CI now tests additionally against ``master`` versions of PyPSA, atlite and + powerplantmatching (https://github.com/PyPSA/pypsa-eur/pull/904). + +* A function ``sanitize_locations()`` was added to improve the coverage of the + ``location`` attribute of network components. + +**Bugs and Compatibility** + +* Bugfix: Do not reduce district heat share when building population-weighted + energy statistics. Previously the district heating share was being multiplied + by the population weighting, reducing the DH share with multiple nodes + (https://github.com/PyPSA/pypsa-eur/pull/884). + +* Bugfix: The industry coal emissions for industry were not properly tracked + (https://github.com/PyPSA/pypsa-eur/pull/923). + +* Bugfix: Correct units of subtracted chlorine and methanol demand in + :mod:`build_industry_sector_ratios` + (https://github.com/PyPSA/pypsa-eur/pull/930). + +* Various minor bugfixes to the perfect foresight workflow, though perfect + foresight must still be considered experimental + (https://github.com/PyPSA/pypsa-eur/pull/910). + +* Fix plotting of retrofitted hydrogen pipelines with myopic pathway + optimisation (https://github.com/PyPSA/pypsa-eur/pull/937). + +* Bugfix: Correct technology keys for the electricity production plotting to + work out the box. + +* Bugfix: Assure entering of code block which corrects Norwegian heat demand + (https://github.com/PyPSA/pypsa-eur/pull/870). + +* Stacktrace of uncaught exceptions should now be correctly included inside log + files (via `configure_logging(..)`) + (https://github.com/PyPSA/pypsa-eur/pull/875). + +* Bugfix: Correctly read out number of solver threads from configuration file + (https://github.com/PyPSA/pypsa-eur/pull/889). + +* Made copying default config file compatible with snakemake module + (https://github.com/PyPSA/pypsa-eur/pull/894). + +* Compatibility with ``pandas=2.2`` + (https://github.com/PyPSA/pypsa-eur/pull/861). + +Special thanks for this release to Koen van Greevenbroek (`@koen-vg +`_) for various new features, bugfixes and taking +care of deprecations. + + +PyPSA-Eur 0.9.0 (5th January 2024) +================================== + +**New Features** + +* Add option to specify losses for bidirectional links, e.g. pipelines or HVDC + links, in configuration file under ``sector: transmission_efficiency:``. Users + can specify static or length-dependent values as well as a length-dependent + electricity demand for compression, which is implemented as a multi-link to + the local electricity buses. The bidirectional links will then be split into + two unidirectional links with linked capacities (https://github.com/PyPSA/pypsa-eur/pull/739). * Merged option to extend geographical scope to Ukraine and Moldova. These countries are excluded by default and is currently constrained to power-sector @@ -52,14 +335,264 @@ Upcoming Release Moldova). Moldova can currently only be included in conjunction with Ukraine due to the absence of demand data. The Crimean power system is manually reconnected to the main Ukrainian grid with the configuration option - `reconnect_crimea`. + `reconnect_crimea` (https://github.com/PyPSA/pypsa-eur/pull/321). +* New experimental support for multi-decade optimisation with perfect foresight + (``foresight: perfect``). Maximum growth rates for carriers, global carbon + budget constraints and emission constraints for particular investment periods. + +* Add option to reference an additional source file where users can specify + custom ``extra_functionality`` constraints in the configuration file. The + default setting points to an empty hull at + ``data/custom_extra_functionality.py`` (https://github.com/PyPSA/pypsa-eur/pull/824). + +* Add locations, capacities and costs of existing gas storage using Global + Energy Monitor's `Europe Gas Tracker + `_ + (https://github.com/PyPSA/pypsa-eur/pull/835). + +* Add option to use `LUISA Base Map + `_ 50m land + coverage dataset for land eligibility analysis in + :mod:`build_renewable_profiles`. Settings are analogous to the CORINE dataset + but with the key ``luisa:`` in the configuration file. To leverage the + dataset's full advantages, set the excluder resolution to 50m + (``excluder_resolution: 50``). For land category codes, see `Annex 1 of the + technical documentation + `_ + (https://github.com/PyPSA/pypsa-eur/pull/842). + +* Add option to capture CO2 contained in biogas when upgrading (``sector: + biogas_to_gas_cc``) (https://github.com/PyPSA/pypsa-eur/pull/615). + +* If load shedding is activated, it is now applied to all carriers, not only + electricity (https://github.com/PyPSA/pypsa-eur/pull/784). + +* Add option for heat vents in district heating (``sector: + central_heat_vent:``). The combination of must-run conditions for some + power-to-X processes, waste heat usage enabled and decreasing heating demand, + can lead to infeasibilities in pathway optimisation for some investment + periods since larger Fischer-Tropsch capacities are needed in early years but + the waste heat exceeds the heat demand in later investment periods. + (https://github.com/PyPSA/pypsa-eur/pull/791). + +* Allow possibility to go from copperplated to regionally resolved methanol and + oil demand with switches ``sector: regional_methanol_demand: true`` and + ``sector: regional_oil_demand: true``. This allows nodal/regional CO2 + constraints to be applied (https://github.com/PyPSA/pypsa-eur/pull/827). + +* Allow retrofitting of existing gas boilers to hydrogen boilers in pathway + optimisation. + +* Add option to add time-varying CO2 emission prices (electricity-only, ``costs: + emission_prices: co2_monthly_prices: true``). This is linked to the new + ``{opts}`` wildcard option ``Ept``. + +* Network clustering can now consider efficiency classes when aggregating + carriers. The option ``clustering: consider_efficiency_classes:`` aggregates + each carriers into the top 10-quantile (high), the bottom 90-quantile (low), + and everything in between (medium). + +* Added option ``conventional: dynamic_fuel_price:`` to consider the monthly + fluctuating fuel prices for conventional generators. Refer to the CSV file + ``data/validation/monthly_fuel_price.csv``. + +* For hydro-electricity, add switches ``flatten_dispatch`` to consider an upper + limit for the hydro dispatch. The limit is given by the average capacity + factor plus the buffer given in ``flatten_dispatch_buffer``. + +* Extend options for waste heat usage from Haber-Bosch, methanolisation and + methanation (https://github.com/PyPSA/pypsa-eur/pull/834). + +* Add new ``sector_opts`` wildcard option "nowasteheat" to disable all waste + heat usage (https://github.com/PyPSA/pypsa-eur/pull/834). + +* Add new rule ``retrieve_irena`` to automatically retrieve up-to-date values + for existing renewables capacities (https://github.com/PyPSA/pypsa-eur/pull/756). + +* Print Irreducible Infeasible Subset (IIS) if model is infeasible. Only for + solvers with IIS support (https://github.com/PyPSA/pypsa-eur/pull/841). + +* More wildcard options now have a corresponding config entry. If the wildcard + is given, then its value is used. If the wildcard is not given but the options + in config are enabled, then the value from config is used. If neither is + given, the options are skipped (https://github.com/PyPSA/pypsa-eur/pull/827). + +* Validate downloads from Zenodo using MD5 checksums. This identifies corrupted + or incomplete downloads (https://github.com/PyPSA/pypsa-eur/pull/821). + +* Add rule ``sync`` to synchronise with a remote machine using the ``rsync`` + library. Configuration settings are found under ``remote:``. + +**Breaking Changes** + +* Remove all negative loads on the ``co2 atmosphere`` bus representing emissions + for e.g. fixed fossil demands for transport oil. Instead these are handled + more transparently with a fixed transport oil demand and a link taking care of + the emissions to the ``co2 atmosphere`` bus. This is also a preparation for + endogenous transport optimisation, where demand will be subject to + optimisation (e.g. fuel switching in the transport sector) + (https://github.com/PyPSA/pypsa-eur/pull/827). + +* Process emissions from steam crackers (i.e. naphtha processing for HVC) are + now piped from the consumption link to the process emissions bus where the + model can decide about carbon capture. Previously the process emissions for + naphtha were a fixed load (https://github.com/PyPSA/pypsa-eur/pull/827). + +* Distinguish between stored and sequestered CO2. Stored CO2 is stored + overground in tanks and can be used for CCU (e.g. methanolisation). + Sequestered CO2 is stored underground and can no longer be used for CCU. This + distinction is made because storage in tanks is more expensive than + underground storage. The link that connects stored and sequestered CO2 is + unidirectional (https://github.com/PyPSA/pypsa-eur/pull/844). + +* Files extracted from sector-coupled data bundle have been moved from ``data/`` + to ``data/sector-bundle``. + +* Split configuration to enable SMR and SMR CC (``sector: smr:`` and ``sector: + smr_cc:``) (https://github.com/PyPSA/pypsa-eur/pull/757). + +* Add separate option to add resistive heaters to the technology choices + (``sector: resistive_heaters:``). Previously they were always added when + boilers were added (https://github.com/PyPSA/pypsa-eur/pull/808). + +* Remove HELMETH option (``sector: helmeth:``). + +* Remove "conservative" renewable potentials estimation option + (https://github.com/PyPSA/pypsa-eur/pull/838). + +* With this release we stop posting updates to the network pre-builts. + +**Changes** + +* Updated Global Energy Monitor LNG terminal data to March 2023 version + (https://github.com/PyPSA/pypsa-eur/pull/707). + +* For industry distribution, use EPRTR as fallback if ETS data is not available + (https://github.com/PyPSA/pypsa-eur/pull/721). + +* It is now possible to specify years for biomass potentials which do not exist + in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated + (https://github.com/PyPSA/pypsa-eur/pull/744). + +* In pathway mode, the biomass potential is linked to the investment year + (https://github.com/PyPSA/pypsa-eur/pull/744). + +* Increase allowed deployment density of solar to 5.1 MW/sqkm by default. + +* Default to full electrification of land transport by 2050. + +* Provide exogenous transition settings in 5-year steps. + +* Default to approximating transmission losses in HVAC lines + (``transmission_losses: 2``). + +* Use electrolysis waste heat by default. + +* Set minimum part loads for PtX processes to 30% for methanolisation and + methanation, and to 70% for Fischer-Tropsch synthesis. + +* Add VOM as marginal cost to PtX processes + (https://github.com/PyPSA/pypsa-eur/pull/830). + +* Add pelletizing costs for biomass boilers (https://github.com/PyPSA/pypsa-eur/pull/833). + +* Update default offshore wind turbine model to "NREL Reference 2020 ATB 5.5 MW" + (https://github.com/PyPSA/pypsa-eur/pull/832). + +* Switch to using hydrogen and electricity inputs for Haber-Bosch from + https://github.com/PyPSA/technology-data (https://github.com/PyPSA/pypsa-eur/pull/831). + +* The configuration setting for country focus weights when clustering the + network has been moved from ``focus_weights:`` to ``clustering: + focus_weights:``. Backwards compatibility to old config files is maintained + (https://github.com/PyPSA/pypsa-eur/pull/794). + +* The ``mock_snakemake`` function can now be used with a Snakefile from a + different directory using the new ``root_dir`` argument + (https://github.com/PyPSA/pypsa-eur/pull/771). + +* Rule ``purge`` now initiates a dialog to confirm if purge is desired + (https://github.com/PyPSA/pypsa-eur/pull/745). + +* Files downloaded from zenodo are now write-protected to prevent accidental + re-download (https://github.com/PyPSA/pypsa-eur/pull/730). + +* Performance improvements for rule ``build_ship_raster`` + (https://github.com/PyPSA/pypsa-eur/pull/845). + +* Improve time logging in :mod:`build_renewable_profiles` + (https://github.com/PyPSA/pypsa-eur/pull/837). + +* In myopic pathway optimisation, disable power grid expansion if line volume + already hit (https://github.com/PyPSA/pypsa-eur/pull/840). + +* JRC-ENSPRESO data is now downloaded from a Zenodo mirror because the link was + unreliable (https://github.com/PyPSA/pypsa-eur/pull/801). + +* Add focus weights option for clustering to documentation + (https://github.com/PyPSA/pypsa-eur/pull/781). + +* Add proxy for biomass transport costs if no explicit biomass transport network + is considered (https://github.com/PyPSA/pypsa-eur/pull/711). **Bugs and Compatibility** -* A bug preventing custom powerplants specified in ``data/custom_powerplants.csv`` was fixed. (https://github.com/PyPSA/pypsa-eur/pull/732) -* Fix nodal fraction in ``add_existing_year`` when using distributed generators -* Fix typo in buses definition for oil boilers in ``add_industry`` in ``prepare_sector_network`` +* The minimum PyPSA version is now 0.26.1. + +* Update to ``tsam>=0.2.3`` for performance improvents in temporal clustering. + +* Pin ``snakemake`` version to below 8.0.0, as the new version is not yet + supported. The next release will switch to the requirement ``snakemake>=8``. + +* Bugfix: Add coke and coal demand for integrated steelworks + (https://github.com/PyPSA/pypsa-eur/pull/718). + +* Bugfix: Make :mod:`build_renewable_profiles` consider subsets of cutout time + scope (https://github.com/PyPSA/pypsa-eur/pull/709). + +* Bugfix: In :mod:`simplify network`, remove 'underground' column to avoid + consense error (https://github.com/PyPSA/pypsa-eur/pull/714). + +* Bugfix: Fix in :mod:`add_existing_baseyear` to account for the case when there + is no rural heating demand for some nodes in network + (https://github.com/PyPSA/pypsa-eur/pull/706). + +* Bugfix: The unit of the capital cost of Haber-Bosch plants was corrected + (https://github.com/PyPSA/pypsa-eur/pull/829). + +* The minimum capacity for renewable generators when using the myopic option has + been fixed (https://github.com/PyPSA/pypsa-eur/pull/728). + +* Compatibility for running with single node and single country + (https://github.com/PyPSA/pypsa-eur/pull/839). + +* A bug preventing the addition of custom powerplants specified in + ``data/custom_powerplants.csv`` was fixed. + (https://github.com/PyPSA/pypsa-eur/pull/732) + +* Fix nodal fraction in :mod:`add_existing_year` when using distributed + generators (https://github.com/PyPSA/pypsa-eur/pull/798). + +* Bugfix: District heating without progress caused division by zero + (https://github.com/PyPSA/pypsa-eur/pull/796). + +* Bugfix: Drop duplicates in :mod:`build_industrial_distribution_keys`, which + can occur through the geopandas ``.sjoin()`` function if a point is located on + a border (https://github.com/PyPSA/pypsa-eur/pull/726). + +* For network clustering fall back to ``ipopt`` when ``highs`` is designated + solver (https://github.com/PyPSA/pypsa-eur/pull/795). + +* Fix typo in buses definition for oil boilers in ``add_industry`` in + :mod:`prepare_sector_network` (https://github.com/PyPSA/pypsa-eur/pull/812). + +* Resolve code issues for endogenous building retrofitting. Select correct + sector names, address deprecations, distinguish between district heating, + decentral heating in urban areas or rural areas for floor area calculations + (https://github.com/PyPSA/pypsa-eur/pull/808). + +* Addressed various deprecations. PyPSA-Eur 0.8.1 (27th July 2023) @@ -185,6 +718,8 @@ PyPSA-Eur 0.8.1 (27th July 2023) (https://github.com/PyPSA/pypsa-eur/pull/672) +* Addressed deprecation warnings for ``pandas=2.0``. ``pandas=2.0`` is now minimum requirement. + PyPSA-Eur 0.8.0 (18th March 2023) ================================= @@ -1446,8 +1981,4 @@ Release Process * Make a `GitHub release `_, which automatically triggers archiving to the `zenodo code repository `_ with `MIT license `_. -* Create pre-built networks for ``config.default.yaml`` by running ``snakemake -call prepare_sector_networks``. - -* Upload pre-built networks to `zenodo data repository `_ with `CC BY 4.0 `_ license. - * Send announcement on the `PyPSA mailing list `_. diff --git a/doc/requirements.txt b/doc/requirements.txt index 3e760c81..a1cd0a5c 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2019-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2019-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 diff --git a/doc/retrieve.rst b/doc/retrieve.rst index 06a07441..f9d6e2a7 100644 --- a/doc/retrieve.rst +++ b/doc/retrieve.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -91,7 +91,7 @@ None. **Outputs** -- ``resources/load_raw.csv`` +- ``data/electricity_demand_raw.csv`` Rule ``retrieve_cost_data`` diff --git a/doc/sector.rst b/doc/sector.rst index 303e7ed2..84a5fdc9 100644 --- a/doc/sector.rst +++ b/doc/sector.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -20,6 +20,12 @@ Rule ``add_existing_baseyear`` .. automodule:: add_existing_baseyear +Rule ``build_existing_heating_distribution`` +============================================================================== + +.. automodule:: build_existing_heating_distribution + + Rule ``build_ammonia_production`` ============================================================================== @@ -60,10 +66,20 @@ Rule ``build_gas_network`` .. automodule:: build_gas_network -Rule ``build_heat_demand`` +Rule ``build_daily_heat_demand`` ============================================================================== -.. automodule:: build_heat_demand +.. automodule:: build_daily_heat_demand + +Rule ``build_hourly_heat_demand`` +============================================================================== + +.. automodule:: build_hourly_heat_demand + +Rule ``build_district_heat_share`` +============================================================================== + +.. automodule:: build_district_heat_share Rule ``build_industrial_distribution_key`` ============================================================================== diff --git a/doc/simplification.rst b/doc/simplification.rst index 21f61de7..2272505d 100644 --- a/doc/simplification.rst +++ b/doc/simplification.rst @@ -1,7 +1,7 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/solving.rst b/doc/solving.rst index 21cc5c25..a8020d29 100644 --- a/doc/solving.rst +++ b/doc/solving.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/spatial_resolution.rst b/doc/spatial_resolution.rst index 0293a5ce..a408f464 100644 --- a/doc/spatial_resolution.rst +++ b/doc/spatial_resolution.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2021-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2021-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -45,7 +45,7 @@ Here are some examples of how spatial resolution is set for different sectors in • CO2: It can be modeled as a single node for Europe or it can be nodally resolved with CO2 transport pipelines if activated in the `config `_. It should mentioned that in single node mode a transport and storage cost is added for sequestered CO2, the cost of which can be adjusted in the `config `_. -• Liquid hydrocarbons: Modeled as a single node for Europe, since transport costs for liquids are low and no bottlenecks are expected. +• Carbonaceous fuels: Modeled as a single node for Europe by default, since transport costs for liquids are low and no bottlenecks are expected. Can be regionally resolved in configuration. **Electricity distribution network** diff --git a/doc/supply_demand.rst b/doc/supply_demand.rst index b043268b..8f6edcad 100644 --- a/doc/supply_demand.rst +++ b/doc/supply_demand.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2021-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2021-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/support.rst b/doc/support.rst index 1d512d59..1a3e6d08 100644 --- a/doc/support.rst +++ b/doc/support.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/tutorial.rst b/doc/tutorial.rst index e58ad123..4f272292 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -133,82 +133,89 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; - 0[label = "solve_network", color = "0.33 0.6 0.85", style="rounded"]; - 1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.03 0.6 0.85", style="rounded"]; - 2[label = "add_extra_components", color = "0.45 0.6 0.85", style="rounded"]; - 3[label = "cluster_network\nclusters: 6", color = "0.46 0.6 0.85", style="rounded"]; - 4[label = "simplify_network\nsimpl: ", color = "0.52 0.6 0.85", style="rounded"]; - 5[label = "add_electricity", color = "0.55 0.6 0.85", style="rounded"]; - 6[label = "build_renewable_profiles\ntechnology: solar", color = "0.15 0.6 0.85", style="rounded"]; - 7[label = "base_network", color = "0.37 0.6 0.85", style="rounded,dashed"]; - 8[label = "build_shapes", color = "0.07 0.6 0.85", style="rounded,dashed"]; - 9[label = "retrieve_databundle", color = "0.60 0.6 0.85", style="rounded"]; - 10[label = "retrieve_natura_raster", color = "0.42 0.6 0.85", style="rounded"]; - 11[label = "build_bus_regions", color = "0.09 0.6 0.85", style="rounded,dashed"]; - 12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.15 0.6 0.85", style="rounded"]; - 13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.15 0.6 0.85", style="rounded"]; - 14[label = "build_ship_raster", color = "0.02 0.6 0.85", style="rounded"]; - 15[label = "retrieve_ship_raster", color = "0.40 0.6 0.85", style="rounded"]; - 16[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.15 0.6 0.85", style="rounded"]; - 17[label = "build_line_rating", color = "0.32 0.6 0.85", style="rounded"]; - 18[label = "retrieve_cost_data\nyear: 2030", color = "0.50 0.6 0.85", style="rounded"]; - 19[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded,dashed"]; - 20[label = "build_electricity_demand", color = "0.13 0.6 0.85", style="rounded,dashed"]; - 21[label = "retrieve_electricity_demand", color = "0.31 0.6 0.85", style="rounded"]; - 22[label = "copy_config", color = "0.23 0.6 0.85", style="rounded"]; + 0[label = "solve_network", color = "0.39 0.6 0.85", style="rounded"]; + 1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.29 0.6 0.85", style="rounded"]; + 2[label = "add_extra_components", color = "0.28 0.6 0.85", style="rounded"]; + 3[label = "cluster_network\nclusters: 6", color = "0.19 0.6 0.85", style="rounded"]; + 4[label = "simplify_network\nsimpl: ", color = "0.01 0.6 0.85", style="rounded"]; + 5[label = "add_electricity", color = "0.49 0.6 0.85", style="rounded"]; + 6[label = "build_renewable_profiles\ntechnology: solar", color = "0.21 0.6 0.85", style="rounded"]; + 7[label = "base_network", color = "0.27 0.6 0.85", style="rounded"]; + 8[label = "build_shapes", color = "0.26 0.6 0.85", style="rounded"]; + 9[label = "retrieve_databundle", color = "0.59 0.6 0.85", style="rounded"]; + 10[label = "retrieve_natura_raster", color = "0.47 0.6 0.85", style="rounded"]; + 11[label = "build_bus_regions", color = "0.13 0.6 0.85", style="rounded"]; + 12[label = "retrieve_cutout\ncutout: be-03-2013-era5", color = "0.36 0.6 0.85", style="rounded,dashed"]; + 13[label = "build_renewable_profiles\ntechnology: onwind", color = "0.21 0.6 0.85", style="rounded"]; + 14[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.21 0.6 0.85", style="rounded"]; + 15[label = "build_ship_raster", color = "0.00 0.6 0.85", style="rounded"]; + 16[label = "retrieve_ship_raster", color = "0.51 0.6 0.85", style="rounded,dashed"]; + 17[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.21 0.6 0.85", style="rounded"]; + 18[label = "build_line_rating", color = "0.05 0.6 0.85", style="rounded"]; + 19[label = "retrieve_cost_data\nyear: 2030", color = "0.15 0.6 0.85", style="rounded"]; + 20[label = "build_powerplants", color = "0.54 0.6 0.85", style="rounded"]; + 21[label = "build_electricity_demand", color = "0.52 0.6 0.85", style="rounded"]; + 22[label = "retrieve_electricity_demand", color = "0.22 0.6 0.85", style="rounded"]; + 23[label = "copy_config", color = "0.44 0.6 0.85", style="rounded"]; 1 -> 0 - 22 -> 0 + 23 -> 0 2 -> 1 - 18 -> 1 + 19 -> 1 3 -> 2 - 18 -> 2 + 19 -> 2 4 -> 3 - 18 -> 3 + 19 -> 3 5 -> 4 - 18 -> 4 + 19 -> 4 11 -> 4 6 -> 5 - 12 -> 5 13 -> 5 - 16 -> 5 - 7 -> 5 + 14 -> 5 17 -> 5 + 7 -> 5 18 -> 5 - 11 -> 5 19 -> 5 - 9 -> 5 + 11 -> 5 20 -> 5 + 9 -> 5 + 21 -> 5 8 -> 5 7 -> 6 9 -> 6 10 -> 6 8 -> 6 11 -> 6 + 12 -> 6 8 -> 7 9 -> 8 8 -> 11 7 -> 11 - 7 -> 12 - 9 -> 12 - 10 -> 12 - 8 -> 12 - 11 -> 12 7 -> 13 9 -> 13 10 -> 13 - 14 -> 13 8 -> 13 11 -> 13 + 12 -> 13 + 7 -> 14 + 9 -> 14 + 10 -> 14 15 -> 14 - 7 -> 16 - 9 -> 16 - 10 -> 16 - 14 -> 16 - 8 -> 16 - 11 -> 16 + 8 -> 14 + 11 -> 14 + 12 -> 14 + 16 -> 15 + 12 -> 15 7 -> 17 - 7 -> 19 - 21 -> 20 + 9 -> 17 + 10 -> 17 + 15 -> 17 + 8 -> 17 + 11 -> 17 + 12 -> 17 + 7 -> 18 + 12 -> 18 + 7 -> 20 + 22 -> 21 } | @@ -218,26 +225,29 @@ In the terminal, this will show up as a list of jobs to be run: .. code:: bash Building DAG of jobs... - job count min threads max threads - ------------------------ ------- ------------- ------------- - add_electricity 1 1 1 - add_extra_components 1 1 1 - base_network 1 1 1 - build_bus_regions 1 1 1 - build_hydro_profile 1 1 1 - build_electricity_demand 1 1 1 - build_powerplants 1 1 1 - build_renewable_profiles 4 1 1 - build_shapes 1 1 1 - build_ship_raster 1 1 1 - cluster_network 1 1 1 - prepare_network 1 1 1 - retrieve_cost_data 1 1 1 - retrieve_databundle 1 1 1 - retrieve_natura_raster 1 1 1 - simplify_network 1 1 1 - solve_network 1 1 1 - total 20 1 1 + Job stats: + job count + --------------------------- ------- + add_electricity 1 + add_extra_components 1 + base_network 1 + build_bus_regions 1 + build_electricity_demand 1 + build_line_rating 1 + build_powerplants 1 + build_renewable_profiles 4 + build_shapes 1 + build_ship_raster 1 + cluster_network 1 + copy_config 1 + prepare_network 1 + retrieve_cost_data 1 + retrieve_databundle 1 + retrieve_electricity_demand 1 + retrieve_natura_raster 1 + simplify_network 1 + solve_network 1 + total 22 ``snakemake`` then runs these jobs in the correct order. @@ -246,16 +256,16 @@ A job (here ``simplify_network``) will display its attributes and normally some .. code:: bash - [Mon Jan 1 00:00:00 2023] + [Mon Feb 19 17:06:17 2024] rule simplify_network: - input: networks/elec.nc, resources/costs.csv, resources/regions_onshore.geojson, resources/regions_offshore.geojson - output: networks/elec_s.nc, resources/regions_onshore_elec_s.geojson, resources/regions_offshore_elec_s.geojson, resources/busmap_elec_s.csv, resources/connection_costs_s.csv - log: logs/simplify_network/elec_s.log + input: resources/test/networks/elec.nc, data/costs_2030.csv, resources/test/regions_onshore.geojson, resources/test/regions_offshore.geojson + output: resources/test/networks/elec_s.nc, resources/test/regions_onshore_elec_s.geojson, resources/test/regions_offshore_elec_s.geojson, resources/test/busmap_elec_s.csv, resources/test/connection_costs_s.csv + log: logs/test-elec/simplify_network/elec_s.log jobid: 4 - benchmark: benchmarks/simplify_network/elec_s - reason: Missing output files: resources/busmap_elec_s.csv, resources/regions_onshore_elec_s.geojson, networks/elec_s.nc, resources/regions_offshore_elec_s.geojson; Input files updated by another job: resources/regions_offshore.geojson, resources/regions_onshore.geojson, resources/costs.csv, networks/elec.nc + benchmark: benchmarks/test-elec/simplify_network/elec_s + reason: Missing output files: resources/test/regions_offshore_elec_s.geojson, resources/test/busmap_elec_s.csv, resources/test/regions_onshore_elec_s.geojson, resources/test/networks/elec_s.nc; Input files updated by another job: resources/test/regions_offshore.geojson, resources/test/networks/elec.nc, resources/test/regions_onshore.geojson, data/costs_2030.csv wildcards: simpl= - resources: tmpdir=/tmp, mem_mb=4000, mem_mib=3815 + resources: tmpdir=/tmp, mem_mb=12000, mem_mib=11445 Once the whole worktree is finished, it should state so in the terminal. diff --git a/doc/tutorial_sector.rst b/doc/tutorial_sector.rst index 53a60353..a1556150 100644 --- a/doc/tutorial_sector.rst +++ b/doc/tutorial_sector.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -61,46 +61,69 @@ To run an overnight / greenfiled scenario with the specifications above, run snakemake -call all --configfile config/test/config.overnight.yaml -which will result in the following *additional* jobs ``snakemake`` wants to run -on top of those already included in the electricity-only tutorial: +which will result in the following jobs ``snakemake`` wants to run, some of +which were already included in the electricity-only tutorial: .. code:: bash - job count min threads max threads - ------------------------------------------------ ------- ------------- ------------- - all 1 1 1 - build_ammonia_production 1 1 1 - build_biomass_potentials 1 1 1 - build_clustered_population_layouts 1 1 1 - build_cop_profiles 1 1 1 - build_gas_input_locations 1 1 1 - build_gas_network 1 1 1 - build_heat_demands 3 1 1 - build_industrial_distribution_key 1 1 1 - build_industrial_energy_demand_per_country_today 1 1 1 - build_industrial_energy_demand_per_node 1 1 1 - build_industrial_energy_demand_per_node_today 1 1 1 - build_industrial_production_per_country 1 1 1 - build_industrial_production_per_country_tomorrow 1 1 1 - build_industrial_production_per_node 1 1 1 - build_industry_sector_ratios 1 1 1 - build_population_weighted_energy_totals 1 1 1 - build_salt_cavern_potentials 1 1 1 - build_shipping_demand 1 1 1 - build_simplified_population_layouts 1 1 1 - build_solar_thermal_profiles 3 1 1 - build_temperature_profiles 3 1 1 - build_transport_demand 1 1 1 - cluster_gas_network 1 1 1 - cluster_network 1 1 1 - copy_config 1 1 1 - make_summary 1 1 1 - plot_network 1 1 1 - plot_summary 1 1 1 - prepare_sector_network 1 1 1 - retrieve_gas_infrastructure_data 1 1 1 - retrieve_sector_databundle 1 1 1 - solve_sector_network 1 1 1 + job count + ------------------------------------------------ ------- + add_electricity 1 + add_extra_components 1 + all 1 + base_network 1 + build_ammonia_production 1 + build_biomass_potentials 1 + build_bus_regions 1 + build_clustered_population_layouts 1 + build_cop_profiles 1 + build_daily_heat_demand 1 + build_district_heat_share 1 + build_electricity_demand 1 + build_energy_totals 1 + build_gas_input_locations 1 + build_gas_network 1 + build_hourly_heat_demand 1 + build_industrial_distribution_key 1 + build_industrial_energy_demand_per_country_today 1 + build_industrial_energy_demand_per_node 1 + build_industrial_energy_demand_per_node_today 1 + build_industrial_production_per_country 1 + build_industrial_production_per_country_tomorrow 1 + build_industrial_production_per_node 1 + build_industry_sector_ratios 1 + build_industry_sector_ratios_intermediate 1 + build_population_layouts 1 + build_population_weighted_energy_totals 1 + build_powerplants 1 + build_renewable_profiles 4 + build_salt_cavern_potentials 1 + build_shapes 1 + build_ship_raster 1 + build_shipping_demand 1 + build_simplified_population_layouts 1 + build_temperature_profiles 3 + build_transport_demand 1 + cluster_gas_network 1 + cluster_network 1 + copy_config 1 + make_summary 1 + plot_gas_network 1 + plot_hydrogen_network 1 + plot_power_network 1 + plot_power_network_clustered 1 + plot_summary 1 + prepare_network 1 + prepare_sector_network 1 + retrieve_cost_data 1 + retrieve_databundle 1 + retrieve_electricity_demand 1 + retrieve_gas_infrastructure_data 1 + retrieve_natura_raster 1 + retrieve_sector_databundle 1 + simplify_network 1 + solve_sector_network 1 + total 60 This covers the retrieval of additional raw data from online resources and preprocessing data about the transport, industry, and heating sectors as well as @@ -119,161 +142,234 @@ successfully. graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; - 0[label = "all", color = "0.51 0.6 0.85", style="rounded"]; - 1[label = "plot_summary", color = "0.54 0.6 0.85", style="rounded"]; - 2[label = "make_summary", color = "0.44 0.6 0.85", style="rounded"]; - 3[label = "solve_sector_network", color = "0.46 0.6 0.85", style="rounded"]; - 4[label = "prepare_sector_network", color = "0.09 0.6 0.85", style="rounded"]; - 5[label = "cluster_gas_network", color = "0.38 0.6 0.85", style="rounded"]; - 6[label = "build_gas_network", color = "0.00 0.6 0.85", style="rounded"]; - 7[label = "retrieve_gas_infrastructure_data", color = "0.33 0.6 0.85", style="rounded"]; - 8[label = "cluster_network", color = "0.26 0.6 0.85", style="rounded"]; - 9[label = "simplify_network", color = "0.03 0.6 0.85", style="rounded"]; - 10[label = "add_electricity", color = "0.25 0.6 0.85", style="rounded"]; - 11[label = "build_renewable_profiles", color = "0.07 0.6 0.85", style="rounded"]; - 12[label = "base_network", color = "0.16 0.6 0.85", style="rounded"]; - 13[label = "build_shapes", color = "0.65 0.6 0.85", style="rounded"]; - 14[label = "retrieve_databundle", color = "0.20 0.6 0.85", style="rounded"]; - 15[label = "retrieve_natura_raster", color = "0.10 0.6 0.85", style="rounded"]; - 16[label = "build_bus_regions", color = "0.11 0.6 0.85", style="rounded"]; - 17[label = "build_ship_raster", color = "0.56 0.6 0.85", style="rounded"]; - 18[label = "retrieve_ship_raster", color = "0.15 0.6 0.85", style="rounded"]; - 19[label = "retrieve_cost_data", color = "0.50 0.6 0.85", style="rounded"]; - 20[label = "build_powerplants", color = "0.49 0.6 0.85", style="rounded"]; - 21[label = "build_electricity_demand", color = "0.39 0.6 0.85", style="rounded"]; - 22[label = "retrieve_electricity_demand", color = "0.05 0.6 0.85", style="rounded"]; - 23[label = "build_gas_input_locations", color = "0.45 0.6 0.85", style="rounded"]; - 24[label = "prepare_network", color = "0.31 0.6 0.85", style="rounded"]; - 25[label = "add_extra_components", color = "0.23 0.6 0.85", style="rounded"]; - 26[label = "build_energy_totals", color = "0.19 0.6 0.85", style="rounded"]; - 27[label = "build_population_weighted_energy_totals", color = "0.27 0.6 0.85", style="rounded"]; - 28[label = "build_clustered_population_layouts", color = "0.64 0.6 0.85", style="rounded"]; - 29[label = "build_population_layouts", color = "0.43 0.6 0.85", style="rounded"]; - 30[label = "build_shipping_demand", color = "0.57 0.6 0.85", style="rounded"]; - 31[label = "build_transport_demand", color = "0.53 0.6 0.85", style="rounded"]; - 32[label = "build_temperature_profiles", color = "0.58 0.6 0.85", style="rounded"]; - 33[label = "build_biomass_potentials", color = "0.30 0.6 0.85", style="rounded"]; - 34[label = "build_salt_cavern_potentials", color = "0.47 0.6 0.85", style="rounded"]; - 35[label = "build_simplified_population_layouts", color = "0.32 0.6 0.85", style="rounded"]; - 36[label = "build_industrial_energy_demand_per_node", color = "0.14 0.6 0.85", style="rounded"]; - 37[label = "build_industry_sector_ratios", color = "0.18 0.6 0.85", style="rounded"]; - 38[label = "build_ammonia_production", color = "0.48 0.6 0.85", style="rounded"]; - 39[label = "build_industrial_production_per_node", color = "0.12 0.6 0.85", style="rounded"]; - 40[label = "build_industrial_distribution_key", color = "0.61 0.6 0.85", style="rounded"]; - 41[label = "build_industrial_production_per_country_tomorrow", color = "0.22 0.6 0.85", style="rounded"]; - 42[label = "build_industrial_production_per_country", color = "0.59 0.6 0.85", style="rounded"]; - 43[label = "build_industrial_energy_demand_per_node_today", color = "0.62 0.6 0.85", style="rounded"]; - 44[label = "build_industrial_energy_demand_per_country_today", color = "0.41 0.6 0.85", style="rounded"]; - 45[label = "build_heat_demands", color = "0.08 0.6 0.85", style="rounded"]; - 46[label = "build_cop_profiles", color = "0.52 0.6 0.85", style="rounded"]; - 47[label = "build_solar_thermal_profiles", color = "0.17 0.6 0.85", style="rounded"]; - 48[label = "copy_config", color = "0.40 0.6 0.85", style="rounded"]; - 49[label = "plot_network", color = "0.60 0.6 0.85", style="rounded"]; - 1 -> 0 - 2 -> 1 - 49 -> 2 - 19 -> 2 - 3 -> 2 - 48 -> 3 - 4 -> 3 - 19 -> 3 - 9 -> 4 - 11 -> 4 - 45 -> 4 - 36 -> 4 - 47 -> 4 - 26 -> 4 - 27 -> 4 - 8 -> 4 - 33 -> 4 - 24 -> 4 - 35 -> 4 - 5 -> 4 - 23 -> 4 - 34 -> 4 - 19 -> 4 - 31 -> 4 - 46 -> 4 - 30 -> 4 - 32 -> 4 - 28 -> 4 - 6 -> 5 - 8 -> 5 - 7 -> 6 - 19 -> 8 - 9 -> 8 - 19 -> 9 - 10 -> 9 - 16 -> 9 - 14 -> 10 - 21 -> 10 - 20 -> 10 - 19 -> 10 - 11 -> 10 - 16 -> 10 - 13 -> 10 - 12 -> 10 - 14 -> 11 - 17 -> 11 - 15 -> 11 - 16 -> 11 - 12 -> 11 - 13 -> 11 - 13 -> 12 - 14 -> 13 - 12 -> 16 - 13 -> 16 - 18 -> 17 - 12 -> 20 - 22 -> 21 - 8 -> 23 - 7 -> 23 - 25 -> 24 - 19 -> 24 - 19 -> 25 - 8 -> 25 - 13 -> 26 - 28 -> 27 - 26 -> 27 - 8 -> 28 - 29 -> 28 - 13 -> 29 - 13 -> 30 - 8 -> 30 - 26 -> 30 - 32 -> 31 - 28 -> 31 - 27 -> 31 - 26 -> 31 - 8 -> 32 - 29 -> 32 - 13 -> 33 - 14 -> 33 - 8 -> 33 - 8 -> 34 - 9 -> 35 - 29 -> 35 - 37 -> 36 - 39 -> 36 - 43 -> 36 - 38 -> 37 - 41 -> 39 - 40 -> 39 - 28 -> 40 - 8 -> 40 - 42 -> 41 - 38 -> 42 - 44 -> 43 - 40 -> 43 - 38 -> 44 - 42 -> 44 - 8 -> 45 - 29 -> 45 - 32 -> 46 - 8 -> 47 - 29 -> 47 - 8 -> 49 - 3 -> 49 + 0[label = "all", color = "0.55 0.6 0.85", style="rounded"]; + 1[label = "plot_summary", color = "0.31 0.6 0.85", style="rounded"]; + 2[label = "make_summary", color = "0.37 0.6 0.85", style="rounded"]; + 3[label = "plot_power_network_clustered", color = "0.50 0.6 0.85", style="rounded"]; + 4[label = "cluster_network\nclusters: 5", color = "0.62 0.6 0.85", style="rounded"]; + 5[label = "simplify_network\nsimpl: ", color = "0.18 0.6 0.85", style="rounded"]; + 6[label = "add_electricity", color = "0.33 0.6 0.85", style="rounded"]; + 7[label = "build_renewable_profiles\ntechnology: solar", color = "0.20 0.6 0.85", style="rounded"]; + 8[label = "base_network", color = "0.31 0.6 0.85", style="rounded"]; + 9[label = "build_shapes", color = "0.36 0.6 0.85", style="rounded"]; + 10[label = "retrieve_databundle", color = "0.29 0.6 0.85", style="rounded"]; + 11[label = "retrieve_natura_raster", color = "0.01 0.6 0.85", style="rounded"]; + 12[label = "build_bus_regions", color = "0.10 0.6 0.85", style="rounded"]; + 13[label = "retrieve_cutout\ncutout: be-03-2013-era5", color = "0.37 0.6 0.85", style="rounded,dashed"]; + 14[label = "build_renewable_profiles\ntechnology: onwind", color = "0.20 0.6 0.85", style="rounded"]; + 15[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.20 0.6 0.85", style="rounded"]; + 16[label = "build_ship_raster", color = "0.64 0.6 0.85", style="rounded"]; + 17[label = "retrieve_ship_raster", color = "0.64 0.6 0.85", style="rounded,dashed"]; + 18[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.20 0.6 0.85", style="rounded"]; + 19[label = "retrieve_cost_data\nyear: 2030", color = "0.12 0.6 0.85", style="rounded"]; + 20[label = "build_powerplants", color = "0.23 0.6 0.85", style="rounded"]; + 21[label = "build_electricity_demand", color = "0.54 0.6 0.85", style="rounded"]; + 22[label = "retrieve_electricity_demand", color = "0.07 0.6 0.85", style="rounded"]; + 23[label = "solve_sector_network", color = "0.41 0.6 0.85", style="rounded"]; + 24[label = "prepare_sector_network\nsector_opts: CO2L0-24h-T-H-B-I-A-dist1", color = "0.22 0.6 0.85", style="rounded"]; + 25[label = "cluster_gas_network", color = "0.24 0.6 0.85", style="rounded"]; + 26[label = "build_gas_network", color = "0.10 0.6 0.85", style="rounded"]; + 27[label = "retrieve_gas_infrastructure_data", color = "0.17 0.6 0.85", style="rounded"]; + 28[label = "build_gas_input_locations", color = "0.16 0.6 0.85", style="rounded"]; + 29[label = "prepare_network\nll: v1.5\nopts: ", color = "0.49 0.6 0.85", style="rounded"]; + 30[label = "add_extra_components", color = "0.14 0.6 0.85", style="rounded"]; + 31[label = "build_energy_totals", color = "0.39 0.6 0.85", style="rounded"]; + 32[label = "retrieve_sector_databundle", color = "0.58 0.6 0.85", style="rounded"]; + 33[label = "build_population_weighted_energy_totals", color = "0.56 0.6 0.85", style="rounded"]; + 34[label = "build_clustered_population_layouts", color = "0.49 0.6 0.85", style="rounded"]; + 35[label = "build_population_layouts", color = "0.06 0.6 0.85", style="rounded"]; + 36[label = "build_shipping_demand", color = "0.47 0.6 0.85", style="rounded"]; + 37[label = "build_transport_demand", color = "0.45 0.6 0.85", style="rounded"]; + 38[label = "build_temperature_profiles\nscope: total", color = "0.04 0.6 0.85", style="rounded"]; + 39[label = "build_biomass_potentials\nplanning_horizons: 2030", color = "0.11 0.6 0.85", style="rounded"]; + 40[label = "build_salt_cavern_potentials", color = "0.15 0.6 0.85", style="rounded"]; + 41[label = "build_simplified_population_layouts", color = "0.46 0.6 0.85", style="rounded"]; + 42[label = "build_industrial_energy_demand_per_node", color = "0.63 0.6 0.85", style="rounded"]; + 43[label = "build_industry_sector_ratios_intermediate\nplanning_horizons: 2030", color = "0.07 0.6 0.85", style="rounded"]; + 44[label = "build_industry_sector_ratios", color = "0.59 0.6 0.85", style="rounded"]; + 45[label = "build_ammonia_production", color = "0.04 0.6 0.85", style="rounded"]; + 46[label = "build_industrial_energy_demand_per_country_today", color = "0.44 0.6 0.85", style="rounded"]; + 47[label = "build_industrial_production_per_country", color = "0.34 0.6 0.85", style="rounded"]; + 48[label = "build_industrial_production_per_node", color = "0.26 0.6 0.85", style="rounded"]; + 49[label = "build_industrial_distribution_key", color = "0.13 0.6 0.85", style="rounded"]; + 50[label = "build_industrial_production_per_country_tomorrow\nplanning_horizons: 2030", color = "0.32 0.6 0.85", style="rounded"]; + 51[label = "build_industrial_energy_demand_per_node_today", color = "0.48 0.6 0.85", style="rounded"]; + 52[label = "build_hourly_heat_demand", color = "0.28 0.6 0.85", style="rounded"]; + 53[label = "build_daily_heat_demand\nscope: total", color = "0.28 0.6 0.85", style="rounded"]; + 54[label = "build_district_heat_share\nplanning_horizons: 2030", color = "0.52 0.6 0.85", style="rounded"]; + 55[label = "build_temperature_profiles\nscope: rural", color = "0.04 0.6 0.85", style="rounded"]; + 56[label = "build_temperature_profiles\nscope: urban", color = "0.04 0.6 0.85", style="rounded"]; + 57[label = "build_cop_profiles", color = "0.38 0.6 0.85", style="rounded"]; + 58[label = "copy_config", color = "0.19 0.6 0.85", style="rounded"]; + 59[label = "plot_power_network", color = "0.60 0.6 0.85", style="rounded"]; + 60[label = "plot_hydrogen_network", color = "0.27 0.6 0.85", style="rounded"]; + 61[label = "plot_gas_network", color = "0.08 0.6 0.85", style="rounded"]; + 1 -> 0 + 2 -> 1 + 32 -> 1 + 3 -> 2 + 23 -> 2 + 19 -> 2 + 59 -> 2 + 60 -> 2 + 61 -> 2 + 4 -> 3 + 5 -> 4 + 19 -> 4 + 6 -> 5 + 19 -> 5 + 12 -> 5 + 7 -> 6 + 14 -> 6 + 15 -> 6 + 18 -> 6 + 8 -> 6 + 19 -> 6 + 12 -> 6 + 20 -> 6 + 10 -> 6 + 21 -> 6 + 9 -> 6 + 8 -> 7 + 10 -> 7 + 11 -> 7 + 9 -> 7 + 12 -> 7 + 13 -> 7 + 9 -> 8 + 10 -> 9 + 9 -> 12 + 8 -> 12 + 8 -> 14 + 10 -> 14 + 11 -> 14 + 9 -> 14 + 12 -> 14 + 13 -> 14 + 8 -> 15 + 10 -> 15 + 11 -> 15 + 16 -> 15 + 9 -> 15 + 12 -> 15 + 13 -> 15 + 17 -> 16 + 13 -> 16 + 8 -> 18 + 10 -> 18 + 11 -> 18 + 16 -> 18 + 9 -> 18 + 12 -> 18 + 13 -> 18 + 8 -> 20 + 22 -> 21 + 24 -> 23 + 58 -> 23 + 25 -> 24 + 28 -> 24 + 29 -> 24 + 31 -> 24 + 32 -> 24 + 33 -> 24 + 36 -> 24 + 37 -> 24 + 39 -> 24 + 19 -> 24 + 15 -> 24 + 18 -> 24 + 40 -> 24 + 5 -> 24 + 4 -> 24 + 34 -> 24 + 41 -> 24 + 42 -> 24 + 52 -> 24 + 54 -> 24 + 38 -> 24 + 55 -> 24 + 56 -> 24 + 57 -> 24 + 26 -> 25 + 4 -> 25 + 27 -> 26 + 27 -> 28 + 4 -> 28 + 30 -> 29 + 19 -> 29 + 4 -> 30 + 19 -> 30 + 9 -> 31 + 32 -> 31 + 31 -> 33 + 34 -> 33 + 35 -> 34 + 4 -> 34 + 13 -> 34 + 9 -> 35 + 13 -> 35 + 9 -> 36 + 4 -> 36 + 31 -> 36 + 34 -> 37 + 33 -> 37 + 31 -> 37 + 32 -> 37 + 38 -> 37 + 35 -> 38 + 4 -> 38 + 13 -> 38 + 32 -> 39 + 4 -> 39 + 10 -> 39 + 9 -> 39 + 32 -> 40 + 4 -> 40 + 35 -> 41 + 5 -> 41 + 13 -> 41 + 43 -> 42 + 48 -> 42 + 51 -> 42 + 44 -> 43 + 46 -> 43 + 47 -> 43 + 45 -> 44 + 32 -> 44 + 32 -> 45 + 32 -> 46 + 47 -> 46 + 45 -> 47 + 32 -> 47 + 49 -> 48 + 50 -> 48 + 4 -> 49 + 34 -> 49 + 32 -> 49 + 47 -> 50 + 49 -> 51 + 46 -> 51 + 53 -> 52 + 35 -> 53 + 4 -> 53 + 13 -> 53 + 31 -> 54 + 34 -> 54 + 35 -> 55 + 4 -> 55 + 13 -> 55 + 35 -> 56 + 4 -> 56 + 13 -> 56 + 38 -> 57 + 55 -> 57 + 56 -> 57 + 23 -> 59 + 4 -> 59 + 23 -> 60 + 4 -> 60 + 23 -> 61 + 4 -> 61 } | @@ -320,23 +416,10 @@ To run a myopic foresight scenario with the specifications above, run snakemake -call all --configfile config/test/config.myopic.yaml -which will result in the following *additional* jobs ``snakemake`` wants to run: - -.. code:: bash - - job count min threads max threads - ------------------------------------------------ ------- ------------- ------------- - all 1 1 1 - add_brownfield 2 1 1 - add_existing_baseyear 1 1 1 - plot_network 3 1 1 - plot_summary 1 1 1 - prepare_sector_network 3 1 1 - solve_sector_network_myopic 3 1 1 - -which translates to the following workflow diagram which nicely outlines -how the sequential pathway optimisation with myopic foresight is -implemented in the workflow: +which will result in additional jobs ``snakemake`` wants to run, which +translates to the following workflow diagram which nicely outlines how the +sequential pathway optimisation with myopic foresight is implemented in the +workflow: .. graphviz:: :class: full-width @@ -346,164 +429,386 @@ implemented in the workflow: graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; - 0[label = "all", color = "0.38 0.6 0.85", style="rounded"]; - 1[label = "plot_summary", color = "0.61 0.6 0.85", style="rounded"]; - 2[label = "make_summary", color = "0.51 0.6 0.85", style="rounded"]; - 3[label = "solve_sector_network_myopic", color = "0.32 0.6 0.85", style="rounded"]; - 4[label = "add_existing_baseyear", color = "0.20 0.6 0.85", style="rounded"]; - 5[label = "prepare_sector_network", color = "0.14 0.6 0.85", style="rounded"]; - 6[label = "prepare_network", color = "0.06 0.6 0.85", style="rounded"]; - 7[label = "add_extra_components", color = "0.00 0.6 0.85", style="rounded"]; - 8[label = "cluster_network", color = "0.18 0.6 0.85", style="rounded"]; - 9[label = "simplify_network", color = "0.30 0.6 0.85", style="rounded"]; - 10[label = "add_electricity", color = "0.24 0.6 0.85", style="rounded"]; - 11[label = "build_renewable_profiles", color = "0.40 0.6 0.85", style="rounded"]; - 12[label = "base_network", color = "0.11 0.6 0.85", style="rounded"]; - 13[label = "build_shapes", color = "0.29 0.6 0.85", style="rounded"]; - 14[label = "retrieve_databundle", color = "0.58 0.6 0.85", style="rounded"]; - 15[label = "retrieve_natura_raster", color = "0.39 0.6 0.85", style="rounded"]; - 16[label = "build_bus_regions", color = "0.60 0.6 0.85", style="rounded"]; - 17[label = "build_ship_raster", color = "0.65 0.6 0.85", style="rounded"]; - 18[label = "retrieve_ship_raster", color = "0.09 0.6 0.85", style="rounded"]; - 19[label = "retrieve_cost_data", color = "0.04 0.6 0.85", style="rounded"]; - 20[label = "build_powerplants", color = "0.28 0.6 0.85", style="rounded"]; - 21[label = "build_electricity_demand", color = "0.46 0.6 0.85", style="rounded"]; - 22[label = "retrieve_electricity_demand", color = "0.44 0.6 0.85", style="rounded"]; - 23[label = "build_energy_totals", color = "0.53 0.6 0.85", style="rounded"]; - 24[label = "build_population_weighted_energy_totals", color = "0.03 0.6 0.85", style="rounded"]; - 25[label = "build_clustered_population_layouts", color = "0.34 0.6 0.85", style="rounded"]; - 26[label = "build_population_layouts", color = "0.63 0.6 0.85", style="rounded"]; - 27[label = "build_shipping_demand", color = "0.05 0.6 0.85", style="rounded"]; - 28[label = "build_transport_demand", color = "0.52 0.6 0.85", style="rounded"]; - 29[label = "build_temperature_profiles", color = "0.16 0.6 0.85", style="rounded"]; - 30[label = "build_biomass_potentials", color = "0.47 0.6 0.85", style="rounded"]; - 31[label = "build_salt_cavern_potentials", color = "0.48 0.6 0.85", style="rounded"]; - 32[label = "build_simplified_population_layouts", color = "0.08 0.6 0.85", style="rounded"]; - 33[label = "build_industrial_energy_demand_per_node", color = "0.22 0.6 0.85", style="rounded"]; - 34[label = "build_industry_sector_ratios", color = "0.56 0.6 0.85", style="rounded"]; - 35[label = "build_ammonia_production", color = "0.57 0.6 0.85", style="rounded"]; - 36[label = "build_industrial_production_per_node", color = "0.66 0.6 0.85", style="rounded"]; - 37[label = "build_industrial_distribution_key", color = "0.41 0.6 0.85", style="rounded"]; - 38[label = "build_industrial_production_per_country_tomorrow", color = "0.54 0.6 0.85", style="rounded"]; - 39[label = "build_industrial_production_per_country", color = "0.10 0.6 0.85", style="rounded"]; - 40[label = "build_industrial_energy_demand_per_node_today", color = "0.55 0.6 0.85", style="rounded"]; - 41[label = "build_industrial_energy_demand_per_country_today", color = "0.35 0.6 0.85", style="rounded"]; - 42[label = "build_heat_demands", color = "0.49 0.6 0.85", style="rounded"]; - 43[label = "build_cop_profiles", color = "0.01 0.6 0.85", style="rounded"]; - 44[label = "build_solar_thermal_profiles", color = "0.45 0.6 0.85", style="rounded"]; - 45[label = "copy_config", color = "0.33 0.6 0.85", style="rounded"]; - 46[label = "add_brownfield", color = "0.59 0.6 0.85", style="rounded"]; - 47[label = "plot_network", color = "0.15 0.6 0.85", style="rounded"]; - 1 -> 0 - 2 -> 1 - 3 -> 2 - 19 -> 2 - 47 -> 2 - 46 -> 3 - 19 -> 3 - 4 -> 3 - 45 -> 3 - 43 -> 4 - 19 -> 4 - 20 -> 4 - 9 -> 4 - 5 -> 4 - 25 -> 4 - 8 -> 4 - 28 -> 5 - 23 -> 5 - 11 -> 5 - 33 -> 5 - 24 -> 5 - 43 -> 5 - 19 -> 5 - 27 -> 5 - 6 -> 5 - 31 -> 5 - 32 -> 5 - 44 -> 5 - 9 -> 5 - 30 -> 5 - 25 -> 5 - 29 -> 5 - 42 -> 5 - 8 -> 5 - 7 -> 6 - 19 -> 6 - 19 -> 7 - 8 -> 7 - 9 -> 8 - 19 -> 8 - 10 -> 9 - 19 -> 9 - 16 -> 9 - 11 -> 10 - 19 -> 10 - 14 -> 10 - 20 -> 10 - 12 -> 10 - 21 -> 10 - 16 -> 10 - 13 -> 10 - 15 -> 11 - 14 -> 11 - 13 -> 11 - 12 -> 11 - 16 -> 11 - 17 -> 11 - 13 -> 12 - 14 -> 13 - 13 -> 16 - 12 -> 16 - 18 -> 17 - 12 -> 20 - 22 -> 21 - 13 -> 23 - 25 -> 24 - 23 -> 24 - 8 -> 25 - 26 -> 25 - 13 -> 26 - 13 -> 27 - 23 -> 27 - 8 -> 27 - 24 -> 28 - 25 -> 28 - 29 -> 28 - 23 -> 28 - 8 -> 29 - 26 -> 29 - 13 -> 30 - 14 -> 30 - 8 -> 30 - 8 -> 31 - 9 -> 32 - 26 -> 32 - 34 -> 33 - 36 -> 33 - 40 -> 33 - 35 -> 34 - 37 -> 36 - 38 -> 36 - 25 -> 37 - 8 -> 37 - 39 -> 38 - 35 -> 39 - 41 -> 40 - 37 -> 40 - 39 -> 41 - 35 -> 41 - 8 -> 42 - 26 -> 42 - 29 -> 43 - 8 -> 44 - 26 -> 44 - 3 -> 46 - 19 -> 46 - 5 -> 46 - 43 -> 46 - 3 -> 47 - 8 -> 47 + 0[label = "all", color = "0.46 0.6 0.85", style="rounded"]; + 1[label = "plot_summary", color = "0.40 0.6 0.85", style="rounded"]; + 2[label = "make_summary", color = "0.59 0.6 0.85", style="rounded"]; + 3[label = "plot_power_network_clustered", color = "0.17 0.6 0.85", style="rounded"]; + 4[label = "cluster_network\nclusters: 5", color = "0.49 0.6 0.85", style="rounded"]; + 5[label = "simplify_network\nsimpl: ", color = "0.16 0.6 0.85", style="rounded"]; + 6[label = "add_electricity", color = "0.32 0.6 0.85", style="rounded"]; + 7[label = "build_renewable_profiles\ntechnology: solar", color = "0.63 0.6 0.85", style="rounded"]; + 8[label = "base_network", color = "0.12 0.6 0.85", style="rounded"]; + 9[label = "build_shapes", color = "0.23 0.6 0.85", style="rounded"]; + 10[label = "retrieve_databundle", color = "0.61 0.6 0.85", style="rounded"]; + 11[label = "retrieve_natura_raster", color = "0.50 0.6 0.85", style="rounded"]; + 12[label = "build_bus_regions", color = "0.51 0.6 0.85", style="rounded"]; + 13[label = "retrieve_cutout\ncutout: be-03-2013-era5", color = "0.37 0.6 0.85", style="rounded,dashed"]; + 14[label = "build_renewable_profiles\ntechnology: onwind", color = "0.63 0.6 0.85", style="rounded"]; + 15[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.63 0.6 0.85", style="rounded"]; + 16[label = "build_ship_raster", color = "0.24 0.6 0.85", style="rounded"]; + 17[label = "retrieve_ship_raster", color = "0.14 0.6 0.85", style="rounded,dashed"]; + 18[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.63 0.6 0.85", style="rounded"]; + 19[label = "retrieve_cost_data\nyear: 2030", color = "0.04 0.6 0.85", style="rounded"]; + 20[label = "build_powerplants", color = "0.58 0.6 0.85", style="rounded"]; + 21[label = "build_electricity_demand", color = "0.04 0.6 0.85", style="rounded"]; + 22[label = "retrieve_electricity_demand", color = "0.62 0.6 0.85", style="rounded"]; + 23[label = "solve_sector_network_myopic", color = "0.30 0.6 0.85", style="rounded"]; + 24[label = "add_existing_baseyear", color = "0.34 0.6 0.85", style="rounded"]; + 25[label = "prepare_sector_network\nsector_opts: 24h-T-H-B-I-A-dist1", color = "0.42 0.6 0.85", style="rounded"]; + 26[label = "cluster_gas_network", color = "0.39 0.6 0.85", style="rounded"]; + 27[label = "build_gas_network", color = "0.59 0.6 0.85", style="rounded"]; + 28[label = "retrieve_gas_infrastructure_data", color = "0.15 0.6 0.85", style="rounded"]; + 29[label = "build_gas_input_locations", color = "0.07 0.6 0.85", style="rounded"]; + 30[label = "prepare_network\nll: v1.5\nopts: ", color = "0.56 0.6 0.85", style="rounded"]; + 31[label = "add_extra_components", color = "0.11 0.6 0.85", style="rounded"]; + 32[label = "build_energy_totals", color = "0.18 0.6 0.85", style="rounded"]; + 33[label = "retrieve_sector_databundle", color = "0.06 0.6 0.85", style="rounded"]; + 34[label = "build_population_weighted_energy_totals", color = "0.03 0.6 0.85", style="rounded"]; + 35[label = "build_clustered_population_layouts", color = "0.25 0.6 0.85", style="rounded"]; + 36[label = "build_population_layouts", color = "0.57 0.6 0.85", style="rounded"]; + 37[label = "build_shipping_demand", color = "0.45 0.6 0.85", style="rounded"]; + 38[label = "build_transport_demand", color = "0.18 0.6 0.85", style="rounded"]; + 39[label = "build_temperature_profiles\nscope: total", color = "0.54 0.6 0.85", style="rounded"]; + 40[label = "build_biomass_potentials\nplanning_horizons: 2030", color = "0.41 0.6 0.85", style="rounded"]; + 41[label = "build_salt_cavern_potentials", color = "0.02 0.6 0.85", style="rounded"]; + 42[label = "build_simplified_population_layouts", color = "0.15 0.6 0.85", style="rounded"]; + 43[label = "build_industrial_energy_demand_per_node", color = "0.47 0.6 0.85", style="rounded"]; + 44[label = "build_industry_sector_ratios_intermediate\nplanning_horizons: 2030", color = "0.31 0.6 0.85", style="rounded"]; + 45[label = "build_industry_sector_ratios", color = "0.48 0.6 0.85", style="rounded"]; + 46[label = "build_ammonia_production", color = "0.00 0.6 0.85", style="rounded"]; + 47[label = "build_industrial_energy_demand_per_country_today", color = "0.32 0.6 0.85", style="rounded"]; + 48[label = "build_industrial_production_per_country", color = "0.60 0.6 0.85", style="rounded"]; + 49[label = "build_industrial_production_per_node", color = "0.05 0.6 0.85", style="rounded"]; + 50[label = "build_industrial_distribution_key", color = "0.21 0.6 0.85", style="rounded"]; + 51[label = "build_industrial_production_per_country_tomorrow\nplanning_horizons: 2030", color = "0.33 0.6 0.85", style="rounded"]; + 52[label = "build_industrial_energy_demand_per_node_today", color = "0.62 0.6 0.85", style="rounded"]; + 53[label = "build_hourly_heat_demand", color = "0.28 0.6 0.85", style="rounded"]; + 54[label = "build_daily_heat_demand\nscope: total", color = "0.22 0.6 0.85", style="rounded"]; + 55[label = "build_district_heat_share\nplanning_horizons: 2030", color = "0.21 0.6 0.85", style="rounded"]; + 56[label = "build_temperature_profiles\nscope: rural", color = "0.54 0.6 0.85", style="rounded"]; + 57[label = "build_temperature_profiles\nscope: urban", color = "0.54 0.6 0.85", style="rounded"]; + 58[label = "build_cop_profiles", color = "0.52 0.6 0.85", style="rounded"]; + 59[label = "build_existing_heating_distribution", color = "0.09 0.6 0.85", style="rounded"]; + 60[label = "copy_config", color = "0.42 0.6 0.85", style="rounded"]; + 61[label = "solve_sector_network_myopic", color = "0.30 0.6 0.85", style="rounded"]; + 62[label = "add_brownfield", color = "0.10 0.6 0.85", style="rounded"]; + 63[label = "prepare_sector_network\nsector_opts: 24h-T-H-B-I-A-dist1", color = "0.42 0.6 0.85", style="rounded"]; + 64[label = "build_biomass_potentials\nplanning_horizons: 2040", color = "0.41 0.6 0.85", style="rounded"]; + 65[label = "retrieve_cost_data\nyear: 2040", color = "0.04 0.6 0.85", style="rounded"]; + 66[label = "build_industrial_energy_demand_per_node", color = "0.47 0.6 0.85", style="rounded"]; + 67[label = "build_industry_sector_ratios_intermediate\nplanning_horizons: 2040", color = "0.31 0.6 0.85", style="rounded"]; + 68[label = "build_industrial_production_per_node", color = "0.05 0.6 0.85", style="rounded"]; + 69[label = "build_industrial_production_per_country_tomorrow\nplanning_horizons: 2040", color = "0.33 0.6 0.85", style="rounded"]; + 70[label = "build_district_heat_share\nplanning_horizons: 2040", color = "0.21 0.6 0.85", style="rounded"]; + 71[label = "solve_sector_network_myopic", color = "0.30 0.6 0.85", style="rounded"]; + 72[label = "add_brownfield", color = "0.10 0.6 0.85", style="rounded"]; + 73[label = "prepare_sector_network\nsector_opts: 24h-T-H-B-I-A-dist1", color = "0.42 0.6 0.85", style="rounded"]; + 74[label = "build_biomass_potentials\nplanning_horizons: 2050", color = "0.41 0.6 0.85", style="rounded"]; + 75[label = "retrieve_cost_data\nyear: 2050", color = "0.04 0.6 0.85", style="rounded"]; + 76[label = "build_industrial_energy_demand_per_node", color = "0.47 0.6 0.85", style="rounded"]; + 77[label = "build_industry_sector_ratios_intermediate\nplanning_horizons: 2050", color = "0.31 0.6 0.85", style="rounded"]; + 78[label = "build_industrial_production_per_node", color = "0.05 0.6 0.85", style="rounded"]; + 79[label = "build_industrial_production_per_country_tomorrow\nplanning_horizons: 2050", color = "0.33 0.6 0.85", style="rounded"]; + 80[label = "build_district_heat_share\nplanning_horizons: 2050", color = "0.21 0.6 0.85", style="rounded"]; + 81[label = "plot_power_network", color = "0.48 0.6 0.85", style="rounded"]; + 82[label = "plot_power_network", color = "0.48 0.6 0.85", style="rounded"]; + 83[label = "plot_power_network", color = "0.48 0.6 0.85", style="rounded"]; + 84[label = "plot_hydrogen_network", color = "0.37 0.6 0.85", style="rounded"]; + 85[label = "plot_hydrogen_network", color = "0.37 0.6 0.85", style="rounded"]; + 86[label = "plot_hydrogen_network", color = "0.37 0.6 0.85", style="rounded"]; + 1 -> 0 + 2 -> 1 + 33 -> 1 + 3 -> 2 + 23 -> 2 + 61 -> 2 + 71 -> 2 + 19 -> 2 + 81 -> 2 + 82 -> 2 + 83 -> 2 + 84 -> 2 + 85 -> 2 + 86 -> 2 + 4 -> 3 + 5 -> 4 + 19 -> 4 + 6 -> 5 + 19 -> 5 + 12 -> 5 + 7 -> 6 + 14 -> 6 + 15 -> 6 + 18 -> 6 + 8 -> 6 + 19 -> 6 + 12 -> 6 + 20 -> 6 + 10 -> 6 + 21 -> 6 + 9 -> 6 + 8 -> 7 + 10 -> 7 + 11 -> 7 + 9 -> 7 + 12 -> 7 + 13 -> 7 + 9 -> 8 + 10 -> 9 + 9 -> 12 + 8 -> 12 + 8 -> 14 + 10 -> 14 + 11 -> 14 + 9 -> 14 + 12 -> 14 + 13 -> 14 + 8 -> 15 + 10 -> 15 + 11 -> 15 + 16 -> 15 + 9 -> 15 + 12 -> 15 + 13 -> 15 + 17 -> 16 + 13 -> 16 + 8 -> 18 + 10 -> 18 + 11 -> 18 + 16 -> 18 + 9 -> 18 + 12 -> 18 + 13 -> 18 + 8 -> 20 + 22 -> 21 + 24 -> 23 + 19 -> 23 + 60 -> 23 + 25 -> 24 + 20 -> 24 + 5 -> 24 + 4 -> 24 + 35 -> 24 + 19 -> 24 + 58 -> 24 + 59 -> 24 + 26 -> 25 + 29 -> 25 + 30 -> 25 + 32 -> 25 + 33 -> 25 + 34 -> 25 + 37 -> 25 + 38 -> 25 + 40 -> 25 + 19 -> 25 + 15 -> 25 + 18 -> 25 + 41 -> 25 + 5 -> 25 + 4 -> 25 + 35 -> 25 + 42 -> 25 + 43 -> 25 + 53 -> 25 + 55 -> 25 + 39 -> 25 + 56 -> 25 + 57 -> 25 + 58 -> 25 + 27 -> 26 + 4 -> 26 + 28 -> 27 + 28 -> 29 + 4 -> 29 + 31 -> 30 + 19 -> 30 + 4 -> 31 + 19 -> 31 + 9 -> 32 + 33 -> 32 + 32 -> 34 + 35 -> 34 + 36 -> 35 + 4 -> 35 + 13 -> 35 + 9 -> 36 + 13 -> 36 + 9 -> 37 + 4 -> 37 + 32 -> 37 + 35 -> 38 + 34 -> 38 + 32 -> 38 + 33 -> 38 + 39 -> 38 + 36 -> 39 + 4 -> 39 + 13 -> 39 + 33 -> 40 + 4 -> 40 + 10 -> 40 + 9 -> 40 + 33 -> 41 + 4 -> 41 + 36 -> 42 + 5 -> 42 + 13 -> 42 + 44 -> 43 + 49 -> 43 + 52 -> 43 + 45 -> 44 + 47 -> 44 + 48 -> 44 + 46 -> 45 + 33 -> 45 + 33 -> 46 + 33 -> 47 + 48 -> 47 + 46 -> 48 + 33 -> 48 + 50 -> 49 + 51 -> 49 + 4 -> 50 + 35 -> 50 + 33 -> 50 + 48 -> 51 + 50 -> 52 + 47 -> 52 + 54 -> 53 + 36 -> 54 + 4 -> 54 + 13 -> 54 + 32 -> 55 + 35 -> 55 + 36 -> 56 + 4 -> 56 + 13 -> 56 + 36 -> 57 + 4 -> 57 + 13 -> 57 + 39 -> 58 + 56 -> 58 + 57 -> 58 + 35 -> 59 + 34 -> 59 + 55 -> 59 + 62 -> 61 + 65 -> 61 + 60 -> 61 + 7 -> 62 + 14 -> 62 + 15 -> 62 + 18 -> 62 + 5 -> 62 + 4 -> 62 + 63 -> 62 + 23 -> 62 + 65 -> 62 + 58 -> 62 + 26 -> 63 + 29 -> 63 + 30 -> 63 + 32 -> 63 + 33 -> 63 + 34 -> 63 + 37 -> 63 + 38 -> 63 + 64 -> 63 + 65 -> 63 + 15 -> 63 + 18 -> 63 + 41 -> 63 + 5 -> 63 + 4 -> 63 + 35 -> 63 + 42 -> 63 + 66 -> 63 + 53 -> 63 + 70 -> 63 + 39 -> 63 + 56 -> 63 + 57 -> 63 + 58 -> 63 + 33 -> 64 + 4 -> 64 + 10 -> 64 + 9 -> 64 + 67 -> 66 + 68 -> 66 + 52 -> 66 + 45 -> 67 + 47 -> 67 + 48 -> 67 + 50 -> 68 + 69 -> 68 + 48 -> 69 + 32 -> 70 + 35 -> 70 + 72 -> 71 + 75 -> 71 + 60 -> 71 + 7 -> 72 + 14 -> 72 + 15 -> 72 + 18 -> 72 + 5 -> 72 + 4 -> 72 + 73 -> 72 + 61 -> 72 + 75 -> 72 + 58 -> 72 + 26 -> 73 + 29 -> 73 + 30 -> 73 + 32 -> 73 + 33 -> 73 + 34 -> 73 + 37 -> 73 + 38 -> 73 + 74 -> 73 + 75 -> 73 + 15 -> 73 + 18 -> 73 + 41 -> 73 + 5 -> 73 + 4 -> 73 + 35 -> 73 + 42 -> 73 + 76 -> 73 + 53 -> 73 + 80 -> 73 + 39 -> 73 + 56 -> 73 + 57 -> 73 + 58 -> 73 + 33 -> 74 + 4 -> 74 + 10 -> 74 + 9 -> 74 + 77 -> 76 + 78 -> 76 + 52 -> 76 + 45 -> 77 + 47 -> 77 + 48 -> 77 + 50 -> 78 + 79 -> 78 + 48 -> 79 + 32 -> 80 + 35 -> 80 + 23 -> 81 + 4 -> 81 + 61 -> 82 + 4 -> 82 + 71 -> 83 + 4 -> 83 + 23 -> 84 + 4 -> 84 + 61 -> 85 + 4 -> 85 + 71 -> 86 + 4 -> 86 } | diff --git a/doc/validation.rst b/doc/validation.rst index 7049e3de..e538717c 100644 --- a/doc/validation.rst +++ b/doc/validation.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/doc/wildcards.rst b/doc/wildcards.rst index 75eec192..f86ff311 100644 --- a/doc/wildcards.rst +++ b/doc/wildcards.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2024 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 diff --git a/envs/environment.fixed.yaml b/envs/environment.fixed.yaml index ca2ae848..8bbd70bf 100644 --- a/envs/environment.fixed.yaml +++ b/envs/environment.fixed.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 @@ -12,94 +12,90 @@ dependencies: - _libgcc_mutex=0.1 - _openmp_mutex=4.5 - affine=2.4.0 -- alsa-lib=1.2.9 +- alsa-lib=1.2.10 - ampl-mp=3.1.0 - amply=0.1.6 -- anyio=3.7.1 - appdirs=1.4.4 -- argon2-cffi=21.3.0 -- argon2-cffi-bindings=21.2.0 -- asttokens=2.2.1 -- async-lru=2.0.3 +- asttokens=2.4.1 - atk-1.0=2.38.0 -- atlite=0.2.11 +- atlite=0.2.12 - attr=2.5.1 -- attrs=23.1.0 -- aws-c-auth=0.7.0 -- aws-c-cal=0.6.0 -- aws-c-common=0.8.23 +- attrs=23.2.0 +- aws-c-auth=0.7.15 +- aws-c-cal=0.6.9 +- aws-c-common=0.9.12 - aws-c-compression=0.2.17 -- aws-c-event-stream=0.3.1 -- aws-c-http=0.7.11 -- aws-c-io=0.13.28 -- aws-c-mqtt=0.8.14 -- aws-c-s3=0.3.13 -- aws-c-sdkutils=0.1.11 -- aws-checksums=0.1.16 -- aws-crt-cpp=0.20.3 -- aws-sdk-cpp=1.10.57 -- babel=2.12.1 -- backcall=0.2.0 -- backports=1.0 -- backports.functools_lru_cache=1.6.5 -- beautifulsoup4=4.12.2 -- bleach=6.0.0 -- blosc=1.21.4 -- bokeh=3.2.1 -- boost-cpp=1.78.0 +- aws-c-event-stream=0.4.1 +- aws-c-http=0.8.0 +- aws-c-io=0.14.3 +- aws-c-mqtt=0.10.1 +- aws-c-s3=0.5.0 +- aws-c-sdkutils=0.1.14 +- aws-checksums=0.1.17 +- aws-crt-cpp=0.26.1 +- aws-sdk-cpp=1.11.242 +- azure-core-cpp=1.10.3 +- azure-storage-blobs-cpp=12.10.0 +- azure-storage-common-cpp=12.5.0 +- beautifulsoup4=4.12.3 +- blosc=1.21.5 +- bokeh=3.3.4 - bottleneck=1.3.7 -- branca=0.6.0 -- brotli=1.0.9 -- brotli-bin=1.0.9 -- brotli-python=1.0.9 +- branca=0.7.1 +- brotli=1.1.0 +- brotli-bin=1.1.0 +- brotli-python=1.1.0 - bzip2=1.0.8 -- c-ares=1.19.1 -- c-blosc2=2.10.0 -- ca-certificates=2023.7.22 -- cairo=1.16.0 -- cartopy=0.21.1 +- c-ares=1.26.0 +- c-blosc2=2.13.2 +- ca-certificates=2024.2.2 +- cairo=1.18.0 +- cartopy=0.22.0 - cdsapi=0.6.1 -- certifi=2023.7.22 -- cffi=1.15.1 -- cfitsio=4.2.0 -- cftime=1.6.2 -- charset-normalizer=3.2.0 -- click=8.1.6 +- certifi=2024.2.2 +- cffi=1.16.0 +- cfgv=3.3.1 +- cfitsio=4.3.1 +- cftime=1.6.3 +- charset-normalizer=3.3.2 +- click=8.1.7 - click-plugins=1.1.1 - cligj=0.7.2 -- cloudpickle=2.2.1 +- cloudpickle=3.0.0 +- coin-or-cbc=2.10.10 +- coin-or-cgl=0.60.7 +- coin-or-clp=1.17.8 +- coin-or-osi=0.108.8 +- coin-or-utils=2.11.9 +- coincbc=2.10.10 - colorama=0.4.6 -- comm=0.1.3 - configargparse=1.7 - connection_pool=0.0.3 -- contourpy=1.1.0 -- country_converter=1.0.0 -- curl=8.2.0 -- cycler=0.11.0 -- cytoolz=0.12.2 -- dask=2023.7.1 -- dask-core=2023.7.1 +- contourpy=1.2.0 +- country_converter=1.2 +- cppad=20240000.2 +- cycler=0.12.1 +- cytoolz=0.12.3 +- dask=2024.2.0 +- dask-core=2024.2.0 - datrie=0.8.2 - dbus=1.13.6 -- debugpy=1.6.7 - decorator=5.1.1 -- defusedxml=0.7.1 - deprecation=2.1.0 - descartes=1.1.0 -- distributed=2023.7.1 -- distro=1.8.0 +- distlib=0.3.8 +- distributed=2024.2.0 +- distro=1.9.0 - docutils=0.20.1 - dpath=2.1.6 -- entrypoints=0.4 -- entsoe-py=0.5.10 +- entsoe-py=0.6.6 - et_xmlfile=1.1.0 -- exceptiongroup=1.1.2 -- executing=1.2.0 +- exceptiongroup=1.2.0 +- executing=2.0.1 - expat=2.5.0 -- filelock=3.12.2 -- fiona=1.9.4 -- flit-core=3.9.0 -- folium=0.14.0 +- filelock=3.13.1 +- fiona=1.9.5 +- folium=0.15.1 - font-ttf-dejavu-sans-mono=2.37 - font-ttf-inconsolata=3.000 - font-ttf-source-code-pro=2.038 @@ -107,366 +103,344 @@ dependencies: - fontconfig=2.14.2 - fonts-conda-ecosystem=1 - fonts-conda-forge=1 -- fonttools=4.41.1 +- fonttools=4.49.0 - freetype=2.12.1 -- freexl=1.0.6 +- freexl=2.0.0 - fribidi=1.0.10 -- fsspec=2023.6.0 -- gdal=3.7.0 +- fsspec=2024.2.0 +- gdal=3.8.4 - gdk-pixbuf=2.42.10 - geographiclib=1.52 -- geojson-rewind=1.0.2 -- geopandas=0.13.2 -- geopandas-base=0.13.2 -- geopy=2.3.0 -- geos=3.11.2 +- geojson-rewind=1.1.0 +- geopandas=0.14.3 +- geopandas-base=0.14.3 +- geopy=2.4.1 +- geos=3.12.1 - geotiff=1.7.1 - gettext=0.21.1 - gflags=2.2.2 - giflib=5.2.1 -- gitdb=4.0.10 -- gitpython=3.1.32 -- glib=2.76.4 -- glib-tools=2.76.4 +- gitdb=4.0.11 +- gitpython=3.1.42 +- glib=2.78.4 +- glib-tools=2.78.4 - glog=0.6.0 -- gmp=6.2.1 +- glpk=5.0 +- gmp=6.3.0 - graphite2=1.3.13 -- graphviz=8.1.0 -- gst-plugins-base=1.22.5 -- gstreamer=1.22.5 +- graphviz=9.0.0 +- gst-plugins-base=1.22.9 +- gstreamer=1.22.9 - gtk2=2.24.33 - gts=0.7.6 -- harfbuzz=7.3.0 +- harfbuzz=8.3.0 - hdf4=4.2.15 -- hdf5=1.14.1 +- hdf5=1.14.3 - humanfriendly=10.0 -- icu=72.1 -- idna=3.4 -- importlib-metadata=6.8.0 -- importlib_metadata=6.8.0 -- importlib_resources=6.0.0 +- icu=73.2 +- identify=2.5.35 +- idna=3.6 +- importlib-metadata=7.0.1 +- importlib_metadata=7.0.1 +- importlib_resources=6.1.1 - iniconfig=2.0.0 -- ipopt=3.14.12 -- ipykernel=6.24.0 -- ipython=8.14.0 -- ipython_genutils=0.2.0 -- ipywidgets=8.0.7 -- jedi=0.18.2 -- jinja2=3.1.2 -- joblib=1.3.0 -- json-c=0.16 -- json5=0.9.14 -- jsonschema=4.18.4 -- jsonschema-specifications=2023.7.1 -- jupyter=1.0.0 -- jupyter-lsp=2.2.0 -- jupyter_client=8.3.0 -- jupyter_console=6.6.3 -- jupyter_core=5.3.1 -- jupyter_events=0.6.3 -- jupyter_server=2.7.0 -- jupyter_server_terminals=0.4.4 -- jupyterlab=4.0.3 -- jupyterlab_pygments=0.2.2 -- jupyterlab_server=2.24.0 -- jupyterlab_widgets=3.0.8 -- kealib=1.5.1 +- ipopt=3.14.14 +- ipython=8.21.0 +- jedi=0.19.1 +- jinja2=3.1.3 +- joblib=1.3.2 +- json-c=0.17 +- jsonschema=4.21.1 +- jsonschema-specifications=2023.12.1 +- jupyter_core=5.7.1 +- kealib=1.5.3 - keyutils=1.6.1 -- kiwisolver=1.4.4 -- krb5=1.21.1 +- kiwisolver=1.4.5 +- krb5=1.21.2 - lame=3.100 -- lcms2=2.15 +- lcms2=2.16 - ld_impl_linux-64=2.40 - lerc=4.0.0 -- libabseil=20230125.3 -- libaec=1.0.6 -- libarchive=3.6.2 -- libarrow=12.0.1 +- libabseil=20230802.1 +- libaec=1.1.2 +- libarchive=3.7.2 +- libarrow=15.0.0 +- libarrow-acero=15.0.0 +- libarrow-dataset=15.0.0 +- libarrow-flight=15.0.0 +- libarrow-flight-sql=15.0.0 +- libarrow-gandiva=15.0.0 +- libarrow-substrait=15.0.0 - libblas=3.9.0 -- libbrotlicommon=1.0.9 -- libbrotlidec=1.0.9 -- libbrotlienc=1.0.9 -- libcap=2.67 +- libboost-headers=1.84.0 +- libbrotlicommon=1.1.0 +- libbrotlidec=1.1.0 +- libbrotlienc=1.1.0 +- libcap=2.69 - libcblas=3.9.0 - libclang=15.0.7 - libclang13=15.0.7 - libcrc32c=1.1.2 - libcups=2.3.3 -- libcurl=8.2.0 -- libdeflate=1.18 +- libcurl=8.5.0 +- libdeflate=1.19 - libedit=3.1.20191231 - libev=4.33 - libevent=2.1.12 - libexpat=2.5.0 - libffi=3.4.2 - libflac=1.4.3 -- libgcc-ng=13.1.0 -- libgcrypt=1.10.1 +- libgcc-ng=13.2.0 +- libgcrypt=1.10.3 - libgd=2.3.3 -- libgdal=3.7.0 -- libgfortran-ng=13.1.0 -- libgfortran5=13.1.0 -- libglib=2.76.4 -- libgomp=13.1.0 +- libgdal=3.8.4 +- libgfortran-ng=13.2.0 +- libgfortran5=13.2.0 +- libglib=2.78.4 +- libgomp=13.2.0 - libgoogle-cloud=2.12.0 - libgpg-error=1.47 -- libgrpc=1.56.2 +- libgrpc=1.60.1 +- libhwloc=2.9.3 - libiconv=1.17 -- libjpeg-turbo=2.1.5.1 +- libjpeg-turbo=3.0.0 - libkml=1.3.0 - liblapack=3.9.0 - liblapacke=3.9.0 - libllvm15=15.0.7 - libnetcdf=4.9.2 -- libnghttp2=1.52.0 -- libnsl=2.0.0 +- libnghttp2=1.58.0 +- libnl=3.9.0 +- libnsl=2.0.1 - libnuma=2.0.16 - libogg=1.3.4 -- libopenblas=0.3.23 +- libopenblas=0.3.26 - libopus=1.3.1 -- libpng=1.6.39 -- libpq=15.3 -- libprotobuf=4.23.3 -- librsvg=2.56.1 +- libparquet=15.0.0 +- libpng=1.6.42 +- libpq=16.2 +- libprotobuf=4.25.1 +- libre2-11=2023.06.02 +- librsvg=2.56.3 - librttopo=1.1.0 -- libsndfile=1.2.0 -- libsodium=1.0.18 +- libscotch=7.0.4 +- libsndfile=1.2.2 - libspatialindex=1.9.3 -- libspatialite=5.0.1 -- libsqlite=3.42.0 +- libspatialite=5.1.0 +- libspral=2023.09.07 +- libsqlite=3.45.1 - libssh2=1.11.0 -- libstdcxx-ng=13.1.0 -- libsystemd0=253 -- libthrift=0.18.1 -- libtiff=4.5.1 -- libtool=2.4.7 +- libstdcxx-ng=13.2.0 +- libsystemd0=255 +- libthrift=0.19.0 +- libtiff=4.6.0 - libutf8proc=2.8.0 - libuuid=2.38.1 - libvorbis=1.3.7 -- libwebp=1.3.1 -- libwebp-base=1.3.1 +- libwebp=1.3.2 +- libwebp-base=1.3.2 - libxcb=1.15 -- libxkbcommon=1.5.0 -- libxml2=2.11.4 -- libxslt=1.1.37 -- libzip=1.9.2 +- libxcrypt=4.4.36 +- libxkbcommon=1.6.0 +- libxml2=2.12.5 +- libxslt=1.1.39 +- libzip=1.10.1 - libzlib=1.2.13 +- linopy=0.3.4 - locket=1.0.0 -- lxml=4.9.3 -- lz4=4.3.2 +- lxml=5.1.0 +- lz4=4.3.3 - lz4-c=1.9.4 - lzo=2.10 -- mapclassify=2.5.0 -- markupsafe=2.1.3 -- matplotlib=3.5.3 -- matplotlib-base=3.5.3 +- mapclassify=2.6.1 +- markupsafe=2.1.5 +- matplotlib=3.8.3 +- matplotlib-base=3.8.3 - matplotlib-inline=0.1.6 - memory_profiler=0.61.0 -- metis=5.1.1 -- mistune=3.0.0 -- mpg123=1.31.3 -- msgpack-python=1.0.5 -- mumps-include=5.2.1 -- mumps-seq=5.2.1 -- munch=4.0.0 +- metis=5.1.0 +- minizip=4.0.4 +- mpg123=1.32.4 +- msgpack-python=1.0.7 +- mumps-include=5.6.2 +- mumps-seq=5.6.2 - munkres=1.1.4 - mysql-common=8.0.33 - mysql-libs=8.0.33 -- nbclient=0.8.0 -- nbconvert=7.7.2 -- nbconvert-core=7.7.2 -- nbconvert-pandoc=7.7.2 -- nbformat=5.9.1 +- nbformat=5.9.2 - ncurses=6.4 -- nest-asyncio=1.5.6 -- netcdf4=1.6.4 -- networkx=3.1 +- netcdf4=1.6.5 +- networkx=3.2.1 +- nodeenv=1.8.0 - nomkl=1.0 -- notebook=7.0.0 -- notebook-shim=0.2.3 - nspr=4.35 -- nss=3.89 -- numexpr=2.8.4 -- numpy=1.25.1 -- openjdk=17.0.3 +- nss=3.98 +- numexpr=2.9.0 +- numpy=1.26.4 +- openjdk=21.0.2 - openjpeg=2.5.0 - openpyxl=3.1.2 -- openssl=3.1.1 -- orc=1.9.0 -- overrides=7.3.1 -- packaging=23.1 -- pandas=2.0.3 -- pandoc=3.1.3 -- pandocfilters=1.5.0 +- openssl=3.2.1 +- orc=1.9.2 +- packaging=23.2 +- pandas=2.2.0 - pango=1.50.14 - parso=0.8.3 -- partd=1.4.0 -- patsy=0.5.3 -- pcre2=10.40 -- pexpect=4.8.0 +- partd=1.4.1 +- patsy=0.5.6 +- pcre2=10.42 +- pexpect=4.9.0 - pickleshare=0.7.5 -- pillow=10.0.0 -- pip=23.2.1 -- pixman=0.40.0 +- pillow=10.2.0 +- pip=24.0 +- pixman=0.43.2 - pkgutil-resolve-name=1.3.10 -- plac=1.3.5 -- platformdirs=3.9.1 -- pluggy=1.2.0 +- plac=1.4.2 +- platformdirs=4.2.0 +- pluggy=1.4.0 - ply=3.11 -- pooch=1.7.0 -- poppler=23.05.0 +- poppler=24.02.0 - poppler-data=0.4.12 -- postgresql=15.3 -- powerplantmatching=0.5.7 -- progressbar2=4.2.0 -- proj=9.2.1 -- prometheus_client=0.17.1 -- prompt-toolkit=3.0.39 -- prompt_toolkit=3.0.39 -- psutil=5.9.5 +- postgresql=16.2 +- powerplantmatching=0.5.11 +- pre-commit=3.6.2 +- progressbar2=4.3.2 +- proj=9.3.1 +- prompt-toolkit=3.0.42 +- psutil=5.9.8 - pthread-stubs=0.4 - ptyprocess=0.7.0 - pulp=2.7.0 - pulseaudio-client=16.1 - pure_eval=0.2.2 - py-cpuinfo=9.0.0 -- pyarrow=12.0.1 +- pyarrow=15.0.0 +- pyarrow-hotfix=0.6 - pycountry=22.3.5 - pycparser=2.21 -- pygments=2.15.1 +- pygments=2.17.2 - pyomo=6.6.1 -- pyparsing=3.1.0 -- pyproj=3.6.0 -- pyqt=5.15.7 -- pyqt5-sip=12.11.0 +- pyparsing=3.1.1 +- pyproj=3.6.1 +- pypsa=0.27.0 +- pyqt=5.15.9 +- pyqt5-sip=12.12.2 +- pyscipopt=4.4.0 - pyshp=2.3.1 - pysocks=1.7.1 -- pytables=3.8.0 -- pytest=7.4.0 -- python=3.10.12 +- pytables=3.9.2 +- pytest=8.0.0 +- python=3.11.8 - python-dateutil=2.8.2 -- python-fastjsonschema=2.18.0 -- python-json-logger=2.0.7 -- python-tzdata=2023.3 -- python-utils=3.7.0 -- python_abi=3.10 -- pytz=2023.3 +- python-fastjsonschema=2.19.1 +- python-tzdata=2024.1 +- python-utils=3.8.2 +- python_abi=3.11 +- pytz=2024.1 - pyxlsb=1.0.10 -- pyyaml=6.0 -- pyzmq=25.1.0 +- pyyaml=6.0.1 - qt-main=5.15.8 -- qtconsole=5.4.3 -- qtconsole-base=5.4.3 -- qtpy=2.3.1 -- rasterio=1.3.8 -- rdma-core=28.9 -- re2=2023.03.02 +- rasterio=1.3.9 +- rdma-core=50.0 +- re2=2023.06.02 - readline=8.2 -- referencing=0.30.0 +- referencing=0.33.0 - requests=2.31.0 - reretry=0.11.8 -- rfc3339-validator=0.1.4 -- rfc3986-validator=0.1.1 -- rioxarray=0.14.1 -- rpds-py=0.9.2 -- rtree=1.0.1 -- s2n=1.3.46 -- scikit-learn=1.3.0 -- scipy=1.11.1 -- scotch=6.0.9 -- seaborn=0.12.2 -- seaborn-base=0.12.2 -- send2trash=1.8.2 -- setuptools=68.0.0 -- setuptools-scm=7.1.0 -- setuptools_scm=7.1.0 -- shapely=2.0.1 -- sip=6.7.10 +- rioxarray=0.15.1 +- rpds-py=0.18.0 +- rtree=1.2.0 +- s2n=1.4.3 +- scikit-learn=1.4.1.post1 +- scip=8.1.0 +- scipy=1.12.0 +- scotch=7.0.4 +- seaborn=0.13.2 +- seaborn-base=0.13.2 +- setuptools=69.1.0 +- setuptools-scm=8.0.4 +- setuptools_scm=8.0.4 +- shapely=2.0.2 +- sip=6.7.12 - six=1.16.0 -- smart_open=6.3.0 -- smmap=3.0.5 -- snakemake-minimal=7.30.2 +- smart_open=6.4.0 +- smmap=5.0.0 +- snakemake-minimal=7.32.4 - snappy=1.1.10 -- sniffio=1.3.0 - snuggs=1.4.7 - sortedcontainers=2.4.0 -- soupsieve=2.3.2.post1 -- sqlite=3.42.0 +- soupsieve=2.5 +- sqlite=3.45.1 - stack_data=0.6.2 -- statsmodels=0.14.0 +- statsmodels=0.14.1 - stopit=1.1.2 -- tabula-py=2.6.0 +- tabula-py=2.7.0 - tabulate=0.9.0 -- tblib=1.7.0 -- terminado=0.17.1 -- threadpoolctl=3.2.0 -- throttler=1.2.1 -- tiledb=2.13.2 -- tinycss2=1.2.1 -- tk=8.6.12 +- tbb=2021.11.0 +- tblib=3.0.0 +- threadpoolctl=3.3.0 +- throttler=1.2.2 +- tiledb=2.20.0 +- tk=8.6.13 - toml=0.10.2 - tomli=2.0.1 -- toolz=0.12.0 +- toolz=0.12.1 - toposort=1.10 -- tornado=6.3.2 -- tqdm=4.65.0 -- traitlets=5.9.0 -- typing-extensions=4.7.1 -- typing_extensions=4.7.1 -- typing_utils=0.1.0 -- tzcode=2023c -- tzdata=2023c -- ucx=1.14.1 -- unicodedata2=15.0.0 -- unidecode=1.3.6 -- unixodbc=2.3.10 -- urllib3=2.0.4 -- wcwidth=0.2.6 -- webencodings=0.5.1 -- websocket-client=1.6.1 -- wheel=0.41.0 -- widgetsnbextension=4.0.8 -- wrapt=1.15.0 -- xarray=2023.7.0 +- tornado=6.3.3 +- tqdm=4.66.2 +- traitlets=5.14.1 +- typing-extensions=4.9.0 +- typing_extensions=4.9.0 +- tzcode=2024a +- tzdata=2024a +- ucx=1.15.0 +- ukkonen=1.0.1 +- unidecode=1.3.8 +- unixodbc=2.3.12 +- uriparser=0.9.7 +- urllib3=2.2.1 +- validators=0.22.0 +- virtualenv=20.25.0 +- wcwidth=0.2.13 +- wheel=0.42.0 +- wrapt=1.16.0 +- xarray=2024.2.0 - xcb-util=0.4.0 - xcb-util-image=0.4.0 - xcb-util-keysyms=0.4.0 - xcb-util-renderutil=0.3.9 - xcb-util-wm=0.4.1 -- xerces-c=3.2.4 -- xkeyboard-config=2.39 +- xerces-c=3.2.5 +- xkeyboard-config=2.41 - xlrd=2.0.1 - xorg-fixesproto=5.0 - xorg-inputproto=2.3.2 - xorg-kbproto=1.0.7 - xorg-libice=1.1.1 - xorg-libsm=1.2.4 -- xorg-libx11=1.8.6 +- xorg-libx11=1.8.7 - xorg-libxau=1.0.11 - xorg-libxdmcp=1.1.3 - xorg-libxext=1.3.4 - xorg-libxfixes=5.0.3 - xorg-libxi=1.7.10 - xorg-libxrender=0.9.11 +- xorg-libxt=1.3.0 - xorg-libxtst=1.2.3 - xorg-recordproto=1.14.2 - xorg-renderproto=0.11.1 - xorg-xextproto=7.3.0 - xorg-xf86vidmodeproto=2.3.1 - xorg-xproto=7.0.31 -- xyzservices=2023.7.0 +- xyzservices=2023.10.1 - xz=5.2.6 - yaml=0.2.5 -- yte=1.5.1 -- zeromq=4.3.4 +- yte=1.5.4 - zict=3.0.0 -- zipp=3.16.2 +- zipp=3.17.0 - zlib=1.2.13 - zlib-ng=2.0.7 -- zstd=1.5.2 +- zstd=1.5.5 - pip: - - gurobipy==10.0.2 - - linopy==0.2.2 - - pypsa==0.25.1 - - tsam==2.3.0 - - validators==0.20.0 + - highspy==1.5.3 + - tsam==2.3.1 diff --git a/envs/environment.yaml b/envs/environment.yaml index 3b85f7a9..316a662c 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -25,7 +25,7 @@ dependencies: - yaml - pytables - lxml -- powerplantmatching>=0.5.5 +- powerplantmatching>=0.5.5,!=0.5.9 - numpy - pandas>=2.1 - geopandas>=0.11.0 @@ -34,8 +34,9 @@ dependencies: - netcdf4 - networkx - scipy +- glpk - shapely>=2.0 -- pyomo +- pyscipopt - matplotlib - proj - fiona @@ -46,7 +47,7 @@ dependencies: - tabula-py - pyxlsb - graphviz -- ipopt +- pre-commit # Keep in conda environment when calling ipython - ipython @@ -61,3 +62,4 @@ dependencies: - tsam>=2.3.1 - snakemake-storage-plugin-http - snakemake-executor-plugin-slurm + - highspy diff --git a/envs/retrieve.yaml b/envs/retrieve.yaml new file mode 100644 index 00000000..b5db795d --- /dev/null +++ b/envs/retrieve.yaml @@ -0,0 +1,13 @@ +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +name: pypsa-eur-retrieve +channels: +- conda-forge +- bioconda +dependencies: +- python>=3.8 +- snakemake-minimal>=7.7.0,<8.0.0 +- pandas>=2.1 +- tqdm diff --git a/graphics/workflow.png b/graphics/workflow.png index f60f3462..a7fbc5ad 100644 Binary files a/graphics/workflow.png and b/graphics/workflow.png differ diff --git a/matplotlibrc b/matplotlibrc index f00ed5cd..bf667fb1 100644 --- a/matplotlibrc +++ b/matplotlibrc @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 font.family: sans-serif diff --git a/rules/build_electricity.smk b/rules/build_electricity.smk index 6308552f..24f328eb 100644 --- a/rules/build_electricity.smk +++ b/rules/build_electricity.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -8,7 +8,7 @@ if config["enable"].get("prepare_links_p_nom", False): output: "data/links_p_nom.csv", log: - LOGS + "prepare_links_p_nom.log", + logs("prepare_links_p_nom.log"), threads: 1 resources: mem_mb=1500, @@ -20,15 +20,15 @@ if config["enable"].get("prepare_links_p_nom", False): rule build_electricity_demand: params: - snapshots=config["snapshots"], - countries=config["countries"], - load=config["load"], + snapshots=config_provider("snapshots"), + countries=config_provider("countries"), + load=config_provider("load"), input: - ancient(RESOURCES + "load_raw.csv"), + ancient("data/electricity_demand_raw.csv"), output: - RESOURCES + "load.csv", + resources("electricity_demand.csv"), log: - LOGS + "build_electricity_demand.log", + logs("build_electricity_demand.log"), resources: mem_mb=5000, conda: @@ -39,16 +39,17 @@ rule build_electricity_demand: rule build_powerplants: params: - powerplants_filter=config["electricity"]["powerplants_filter"], - custom_powerplants=config["electricity"]["custom_powerplants"], - countries=config["countries"], + powerplants_filter=config_provider("electricity", "powerplants_filter"), + custom_powerplants=config_provider("electricity", "custom_powerplants"), + everywhere_powerplants=config_provider("electricity", "everywhere_powerplants"), + countries=config_provider("countries"), input: - base_network=RESOURCES + "networks/base.nc", + base_network=resources("networks/base.nc"), custom_powerplants="data/custom_powerplants.csv", output: - RESOURCES + "powerplants.csv", + resources("powerplants.csv"), log: - LOGS + "build_powerplants.log", + logs("build_powerplants.log"), threads: 1 resources: mem_mb=5000, @@ -60,11 +61,11 @@ rule build_powerplants: rule base_network: params: - countries=config["countries"], - snapshots=config["snapshots"], - lines=config["lines"], - links=config["links"], - transformers=config["transformers"], + countries=config_provider("countries"), + snapshots=config_provider("snapshots"), + lines=config_provider("lines"), + links=config_provider("links"), + transformers=config_provider("transformers"), input: eg_buses="data/entsoegridkit/buses.csv", eg_lines="data/entsoegridkit/lines.csv", @@ -74,15 +75,15 @@ rule base_network: parameter_corrections="data/parameter_corrections.yaml", links_p_nom="data/links_p_nom.csv", links_tyndp="data/links_tyndp.csv", - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", - europe_shape=RESOURCES + "europe_shape.geojson", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), + europe_shape=resources("europe_shape.geojson"), output: - RESOURCES + "networks/base.nc", + resources("networks/base.nc"), log: - LOGS + "base_network.log", + logs("base_network.log"), benchmark: - BENCHMARKS + "base_network" + benchmarks("base_network") threads: 1 resources: mem_mb=1500, @@ -94,7 +95,7 @@ rule base_network: rule build_shapes: params: - countries=config["countries"], + countries=config_provider("countries"), input: naturalearth=ancient("data/bundle/naturalearth/ne_10m_admin_0_countries.shp"), eez=ancient("data/bundle/eez/World_EEZ_v8_2014.shp"), @@ -104,12 +105,12 @@ rule build_shapes: ch_cantons=ancient("data/bundle/ch_cantons.csv"), ch_popgdp=ancient("data/bundle/je-e-21.03.02.xls"), output: - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", - europe_shape=RESOURCES + "europe_shape.geojson", - nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), + europe_shape=resources("europe_shape.geojson"), + nuts3_shapes=resources("nuts3_shapes.geojson"), log: - LOGS + "build_shapes.log", + logs("build_shapes.log"), threads: 1 resources: mem_mb=1500, @@ -121,16 +122,16 @@ rule build_shapes: rule build_bus_regions: params: - countries=config["countries"], + countries=config_provider("countries"), input: - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", - base_network=RESOURCES + "networks/base.nc", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), + base_network=resources("networks/base.nc"), output: - regions_onshore=RESOURCES + "regions_onshore.geojson", - regions_offshore=RESOURCES + "regions_offshore.geojson", + regions_onshore=resources("regions_onshore.geojson"), + regions_offshore=resources("regions_offshore.geojson"), log: - LOGS + "build_bus_regions.log", + logs("build_bus_regions.log"), threads: 1 resources: mem_mb=1000, @@ -144,20 +145,20 @@ if config["enable"].get("build_cutout", False): rule build_cutout: params: - snapshots=config["snapshots"], - cutouts=config["atlite"]["cutouts"], + snapshots=config_provider("snapshots"), + cutouts=config_provider("atlite", "cutouts"), input: - regions_onshore=RESOURCES + "regions_onshore.geojson", - regions_offshore=RESOURCES + "regions_offshore.geojson", + regions_onshore=resources("regions_onshore.geojson"), + regions_offshore=resources("regions_offshore.geojson"), output: protected("cutouts/" + CDIR + "{cutout}.nc"), log: - "logs/" + CDIR + "build_cutout/{cutout}.log", + logs(CDIR + "build_cutout/{cutout}.log"), benchmark: "benchmarks/" + CDIR + "build_cutout_{cutout}" - threads: ATLITE_NPROCESSES + threads: config["atlite"].get("nprocesses", 4) resources: - mem_mb=ATLITE_NPROCESSES * 1000, + mem_mb=config["atlite"].get("nprocesses", 4) * 1000, conda: "../envs/environment.yaml" script: @@ -169,13 +170,15 @@ if config["enable"].get("build_natura_raster", False): rule build_natura_raster: input: natura=ancient("data/bundle/natura/Natura2000_end2015.shp"), - cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]), + cutouts=lambda w: expand( + "cutouts/" + CDIR + "{cutouts}.nc", **config_provider("atlite")(w) + ), output: - RESOURCES + "natura.tiff", + resources("natura.tiff"), resources: mem_mb=5000, log: - LOGS + "build_natura_raster.log", + logs("build_natura_raster.log"), conda: "../envs/environment.yaml" script: @@ -185,21 +188,21 @@ if config["enable"].get("build_natura_raster", False): rule build_ship_raster: input: ship_density="data/shipdensity_global.zip", - cutouts=expand( + cutouts=lambda w: expand( "cutouts/" + CDIR + "{cutout}.nc", cutout=[ - config["renewable"][k]["cutout"] - for k in config["electricity"]["renewable_carriers"] + config_provider("renewable", k, "cutout")(w) + for k in config_provider("electricity", "renewable_carriers")(w) ], ), output: - RESOURCES + "shipdensity_raster.tif", + resources("shipdensity_raster.tif"), log: - LOGS + "build_ship_raster.log", + logs("build_ship_raster.log"), resources: mem_mb=5000, benchmark: - BENCHMARKS + "build_ship_raster" + benchmarks("build_ship_raster") conda: "../envs/environment.yaml" script: @@ -213,33 +216,33 @@ rule determine_availability_matrix_MD_UA: wdpa_marine="data/WDPA_WDOECM_marine.gpkg", gebco=lambda w: ( "data/bundle/GEBCO_2014_2D.nc" - if "max_depth" in config["renewable"][w.technology].keys() + if config_provider("renewable", w.technology)(w).get("max_depth") else [] ), ship_density=lambda w: ( - RESOURCES + "shipdensity_raster.tif" - if "ship_threshold" in config["renewable"][w.technology].keys() + resources("shipdensity_raster.tif") + if "ship_threshold" in config_provider("renewable", w.technology)(w).keys() else [] ), - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), regions=lambda w: ( - RESOURCES + "regions_onshore.geojson" + resources("regions_onshore.geojson") if w.technology in ("onwind", "solar") - else RESOURCES + "regions_offshore.geojson" + else resources("regions_offshore.geojson") ), cutout=lambda w: "cutouts/" + CDIR - + config["renewable"][w.technology]["cutout"] + + config_provider("renewable", w.technology, "cutout")(w) + ".nc", output: - availability_matrix=RESOURCES + "availability_matrix_MD-UA_{technology}.nc", - availability_map=RESOURCES + "availability_matrix_MD-UA_{technology}.png", + availability_matrix=resources("availability_matrix_MD-UA_{technology}.nc"), + availability_map=resources("availability_matrix_MD-UA_{technology}.png"), log: - LOGS + "determine_availability_matrix_MD_UA_{technology}.log", - threads: ATLITE_NPROCESSES + logs("determine_availability_matrix_MD_UA_{technology}.log"), + threads: config["atlite"].get("nprocesses", 4) resources: - mem_mb=ATLITE_NPROCESSES * 5000, + mem_mb=config["atlite"].get("nprocesses", 4) * 5000, conda: "../envs/environment.yaml" script: @@ -247,59 +250,67 @@ rule determine_availability_matrix_MD_UA: # Optional input when having Ukraine (UA) or Moldova (MD) in the countries list -if {"UA", "MD"}.intersection(set(config["countries"])): - opt = { - "availability_matrix_MD_UA": RESOURCES - + "availability_matrix_MD-UA_{technology}.nc" - } -else: - opt = {} +def input_ua_md_availability_matrix(w): + countries = set(config_provider("countries")(w)) + if {"UA", "MD"}.intersection(countries): + return { + "availability_matrix_MD_UA": resources( + "availability_matrix_MD-UA_{technology}.nc" + ) + } + return {} rule build_renewable_profiles: params: - renewable=config["renewable"], + snapshots=config_provider("snapshots"), + renewable=config_provider("renewable"), input: - **opt, - base_network=RESOURCES + "networks/base.nc", + unpack(input_ua_md_availability_matrix), + base_network=resources("networks/base.nc"), corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"), natura=lambda w: ( - RESOURCES + "natura.tiff" - if config["renewable"][w.technology]["natura"] + resources("natura.tiff") + if config_provider("renewable", w.technology, "natura")(w) + else [] + ), + luisa=lambda w: ( + "data/LUISA_basemap_020321_50m.tif" + if config_provider("renewable", w.technology, "luisa")(w) else [] ), gebco=ancient( lambda w: ( "data/bundle/GEBCO_2014_2D.nc" - if config["renewable"][w.technology].get("max_depth") + if config_provider("renewable", w.technology)(w).get("max_depth") else [] ) ), ship_density=lambda w: ( - RESOURCES + "shipdensity_raster.tif" - if config["renewable"][w.technology].get("ship_threshold", False) + resources("shipdensity_raster.tif") + if "ship_threshold" in config_provider("renewable", w.technology)(w).keys() else [] ), - country_shapes=RESOURCES + "country_shapes.geojson", - offshore_shapes=RESOURCES + "offshore_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), + offshore_shapes=resources("offshore_shapes.geojson"), regions=lambda w: ( - RESOURCES + "regions_onshore.geojson" + resources("regions_onshore.geojson") if w.technology in ("onwind", "solar") - else RESOURCES + "regions_offshore.geojson" + else resources("regions_offshore.geojson") ), cutout=lambda w: "cutouts/" + CDIR - + config["renewable"][w.technology]["cutout"] + + config_provider("renewable", w.technology, "cutout")(w) + ".nc", output: - profile=RESOURCES + "profile_{technology}.nc", + profile=resources("profile_{technology}.nc"), log: - LOGS + "build_renewable_profile_{technology}.log", + logs("build_renewable_profile_{technology}.log"), benchmark: - BENCHMARKS + "build_renewable_profiles_{technology}" - threads: ATLITE_NPROCESSES + benchmarks("build_renewable_profiles_{technology}") + threads: config["atlite"].get("nprocesses", 4) resources: - mem_mb=ATLITE_NPROCESSES * 5000, + mem_mb=config["atlite"].get("nprocesses", 4) * 5000, wildcard_constraints: technology="(?!hydro).*", # Any technology other than hydro conda: @@ -313,10 +324,10 @@ rule build_monthly_prices: co2_price_raw="data/validation/emission-spot-primary-market-auction-report-2019-data.xls", fuel_price_raw="data/validation/energy-price-trends-xlsx-5619002.xlsx", output: - co2_price=RESOURCES + "co2_price.csv", - fuel_price=RESOURCES + "monthly_fuel_price.csv", + co2_price=resources("co2_price.csv"), + fuel_price=resources("monthly_fuel_price.csv"), log: - LOGS + "build_monthly_prices.log", + logs("build_monthly_prices.log"), threads: 1 resources: mem_mb=5000, @@ -328,16 +339,19 @@ rule build_monthly_prices: rule build_hydro_profile: params: - hydro=config["renewable"]["hydro"], - countries=config["countries"], + hydro=config_provider("renewable", "hydro"), + countries=config_provider("countries"), input: - country_shapes=RESOURCES + "country_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), eia_hydro_generation="data/eia_hydro_annual_generation.csv", - cutout=f"cutouts/" + CDIR + config["renewable"]["hydro"]["cutout"] + ".nc", + cutout=lambda w: f"cutouts/" + + CDIR + + config_provider("renewable", "hydro", "cutout")(w) + + ".nc", output: - RESOURCES + "profile_hydro.nc", + resources("profile_hydro.nc"), log: - LOGS + "build_hydro_profile.log", + logs("build_hydro_profile.log"), resources: mem_mb=5000, conda: @@ -346,73 +360,87 @@ rule build_hydro_profile: "../scripts/build_hydro_profile.py" -if config["lines"]["dynamic_line_rating"]["activate"]: +rule build_line_rating: + params: + snapshots=config_provider("snapshots"), + input: + base_network=resources("networks/base.nc"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("lines", "dynamic_line_rating", "cutout")(w) + + ".nc", + output: + output=resources("networks/line_rating.nc"), + log: + logs("build_line_rating.log"), + benchmark: + benchmarks("build_line_rating") + threads: config["atlite"].get("nprocesses", 4) + resources: + mem_mb=config["atlite"].get("nprocesses", 4) * 1000, + conda: + "../envs/environment.yaml" + script: + "../scripts/build_line_rating.py" - rule build_line_rating: - input: - base_network=RESOURCES + "networks/base.nc", - cutout="cutouts/" - + CDIR - + config["lines"]["dynamic_line_rating"]["cutout"] - + ".nc", - output: - output=RESOURCES + "networks/line_rating.nc", - log: - LOGS + "build_line_rating.log", - benchmark: - BENCHMARKS + "build_line_rating" - threads: ATLITE_NPROCESSES - resources: - mem_mb=ATLITE_NPROCESSES * 1000, - conda: - "../envs/environment.yaml" - script: - "../scripts/build_line_rating.py" + +def input_profile_tech(w): + return { + f"profile_{tech}": resources(f"profile_{tech}.nc") + for tech in config_provider("electricity", "renewable_carriers")(w) + } + + +def input_conventional(w): + return { + f"conventional_{carrier}_{attr}": fn + for carrier, d in config_provider("conventional", default={None: {}})(w).items() + if carrier in config_provider("electricity", "conventional_carriers")(w) + for attr, fn in d.items() + if str(fn).startswith("data/") + } rule add_electricity: params: - length_factor=config["lines"]["length_factor"], - scaling_factor=config["load"]["scaling_factor"], - countries=config["countries"], - renewable=config["renewable"], - electricity=config["electricity"], - conventional=config["conventional"], - costs=config["costs"], + length_factor=config_provider("lines", "length_factor"), + scaling_factor=config_provider("load", "scaling_factor"), + countries=config_provider("countries"), + renewable=config_provider("renewable"), + electricity=config_provider("electricity"), + conventional=config_provider("conventional"), + costs=config_provider("costs"), input: - **{ - f"profile_{tech}": RESOURCES + f"profile_{tech}.nc" - for tech in config["electricity"]["renewable_carriers"] - }, - **{ - f"conventional_{carrier}_{attr}": fn - for carrier, d in config.get("conventional", {None: {}}).items() - if carrier in config["electricity"]["conventional_carriers"] - for attr, fn in d.items() - if str(fn).startswith("data/") - }, - base_network=RESOURCES + "networks/base.nc", - line_rating=RESOURCES + "networks/line_rating.nc" - if config["lines"]["dynamic_line_rating"]["activate"] - else RESOURCES + "networks/base.nc", - tech_costs=COSTS, - regions=RESOURCES + "regions_onshore.geojson", - powerplants=RESOURCES + "powerplants.csv", + unpack(input_profile_tech), + unpack(input_conventional), + base_network=resources("networks/base.nc"), + line_rating=lambda w: ( + resources("networks/line_rating.nc") + if config_provider("lines", "dynamic_line_rating", "activate")(w) + else resources("networks/base.nc") + ), + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year')(w)}.csv" + ), + regions=resources("regions_onshore.geojson"), + powerplants=resources("powerplants.csv"), hydro_capacities=ancient("data/bundle/hydro_capacities.csv"), geth_hydro_capacities="data/geth2015_hydro_capacities.csv", unit_commitment="data/unit_commitment.csv", - fuel_price=RESOURCES + "monthly_fuel_price.csv" - if config["conventional"]["dynamic_fuel_price"] - else [], - load=RESOURCES + "load.csv", - nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", + fuel_price=lambda w: ( + resources("monthly_fuel_price.csv") + if config_provider("conventional", "dynamic_fuel_price")(w) + else [] + ), + load=resources("electricity_demand.csv"), + nuts3_shapes=resources("nuts3_shapes.geojson"), ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv", output: - RESOURCES + "networks/elec.nc", + resources("networks/elec.nc"), log: - LOGS + "add_electricity.log", + logs("add_electricity.log"), benchmark: - BENCHMARKS + "add_electricity" + benchmarks("add_electricity") threads: 1 resources: mem_mb=10000, @@ -424,31 +452,33 @@ rule add_electricity: rule simplify_network: params: - simplify_network=config["clustering"]["simplify_network"], - aggregation_strategies=config["clustering"].get("aggregation_strategies", {}), - focus_weights=config["clustering"].get( - "focus_weights", config.get("focus_weights") + simplify_network=config_provider("clustering", "simplify_network"), + aggregation_strategies=config_provider( + "clustering", "aggregation_strategies", default={} ), - renewable_carriers=config["electricity"]["renewable_carriers"], - max_hours=config["electricity"]["max_hours"], - length_factor=config["lines"]["length_factor"], - p_max_pu=config["links"].get("p_max_pu", 1.0), - costs=config["costs"], + focus_weights=config_provider("clustering", "focus_weights", default=None), + renewable_carriers=config_provider("electricity", "renewable_carriers"), + max_hours=config_provider("electricity", "max_hours"), + length_factor=config_provider("lines", "length_factor"), + p_max_pu=config_provider("links", "p_max_pu", default=1.0), + costs=config_provider("costs"), input: - network=RESOURCES + "networks/elec.nc", - tech_costs=COSTS, - regions_onshore=RESOURCES + "regions_onshore.geojson", - regions_offshore=RESOURCES + "regions_offshore.geojson", + network=resources("networks/elec.nc"), + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year')(w)}.csv" + ), + regions_onshore=resources("regions_onshore.geojson"), + regions_offshore=resources("regions_offshore.geojson"), output: - network=RESOURCES + "networks/elec_s{simpl}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}.geojson", - busmap=RESOURCES + "busmap_elec_s{simpl}.csv", - connection_costs=RESOURCES + "connection_costs_s{simpl}.csv", + network=resources("networks/elec_s{simpl}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"), + busmap=resources("busmap_elec_s{simpl}.csv"), + connection_costs=resources("connection_costs_s{simpl}.csv"), log: - LOGS + "simplify_network/elec_s{simpl}.log", + logs("simplify_network/elec_s{simpl}.log"), benchmark: - BENCHMARKS + "simplify_network/elec_s{simpl}" + benchmarks("simplify_network/elec_s{simpl}") threads: 1 resources: mem_mb=12000, @@ -460,38 +490,42 @@ rule simplify_network: rule cluster_network: params: - cluster_network=config["clustering"]["cluster_network"], - aggregation_strategies=config["clustering"].get("aggregation_strategies", {}), - custom_busmap=config["enable"].get("custom_busmap", False), - focus_weights=config["clustering"].get( - "focus_weights", config.get("focus_weights") + cluster_network=config_provider("clustering", "cluster_network"), + aggregation_strategies=config_provider( + "clustering", "aggregation_strategies", default={} ), - renewable_carriers=config["electricity"]["renewable_carriers"], - conventional_carriers=config["electricity"].get("conventional_carriers", []), - max_hours=config["electricity"]["max_hours"], - length_factor=config["lines"]["length_factor"], - costs=config["costs"], + custom_busmap=config_provider("enable", "custom_busmap", default=False), + focus_weights=config_provider("clustering", "focus_weights", default=None), + renewable_carriers=config_provider("electricity", "renewable_carriers"), + conventional_carriers=config_provider( + "electricity", "conventional_carriers", default=[] + ), + max_hours=config_provider("electricity", "max_hours"), + length_factor=config_provider("lines", "length_factor"), + costs=config_provider("costs"), input: - network=RESOURCES + "networks/elec_s{simpl}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}.geojson", - busmap=ancient(RESOURCES + "busmap_elec_s{simpl}.csv"), - custom_busmap=( + network=resources("networks/elec_s{simpl}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}.geojson"), + busmap=ancient(resources("busmap_elec_s{simpl}.csv")), + custom_busmap=lambda w: ( "data/custom_busmap_elec_s{simpl}_{clusters}.csv" - if config["enable"].get("custom_busmap", False) + if config_provider("enable", "custom_busmap", default=False)(w) else [] ), - tech_costs=COSTS, + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year')(w)}.csv" + ), output: - network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson", - busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", - linemap=RESOURCES + "linemap_elec_s{simpl}_{clusters}.csv", + network=resources("networks/elec_s{simpl}_{clusters}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), + busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), + linemap=resources("linemap_elec_s{simpl}_{clusters}.csv"), log: - LOGS + "cluster_network/elec_s{simpl}_{clusters}.log", + logs("cluster_network/elec_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "cluster_network/elec_s{simpl}_{clusters}" + benchmarks("cluster_network/elec_s{simpl}_{clusters}") threads: 1 resources: mem_mb=10000, @@ -503,18 +537,20 @@ rule cluster_network: rule add_extra_components: params: - extendable_carriers=config["electricity"]["extendable_carriers"], - max_hours=config["electricity"]["max_hours"], - costs=config["costs"], + extendable_carriers=config_provider("electricity", "extendable_carriers"), + max_hours=config_provider("electricity", "max_hours"), + costs=config_provider("costs"), input: - network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", - tech_costs=COSTS, + network=resources("networks/elec_s{simpl}_{clusters}.nc"), + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year')(w)}.csv" + ), output: - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", + resources("networks/elec_s{simpl}_{clusters}_ec.nc"), log: - LOGS + "add_extra_components/elec_s{simpl}_{clusters}.log", + logs("add_extra_components/elec_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "add_extra_components/elec_s{simpl}_{clusters}_ec" + benchmarks("add_extra_components/elec_s{simpl}_{clusters}_ec") threads: 1 resources: mem_mb=4000, @@ -526,23 +562,30 @@ rule add_extra_components: rule prepare_network: params: - links=config["links"], - lines=config["lines"], - co2base=config["electricity"]["co2base"], - co2limit=config["electricity"]["co2limit"], - gaslimit=config["electricity"].get("gaslimit"), - max_hours=config["electricity"]["max_hours"], - costs=config["costs"], + time_resolution=config_provider("clustering", "temporal", "resolution_elec"), + links=config_provider("links"), + lines=config_provider("lines"), + co2base=config_provider("electricity", "co2base"), + co2limit_enable=config_provider("electricity", "co2limit_enable", default=False), + co2limit=config_provider("electricity", "co2limit"), + gaslimit_enable=config_provider("electricity", "gaslimit_enable", default=False), + gaslimit=config_provider("electricity", "gaslimit"), + max_hours=config_provider("electricity", "max_hours"), + costs=config_provider("costs"), + adjustments=config_provider("adjustments", "electricity"), + autarky=config_provider("electricity", "autarky", default={}), input: - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", - tech_costs=COSTS, - co2_price=lambda w: RESOURCES + "co2_price.csv" if "Ept" in w.opts else [], + resources("networks/elec_s{simpl}_{clusters}_ec.nc"), + tech_costs=lambda w: resources( + f"costs_{config_provider('costs', 'year')(w)}.csv" + ), + co2_price=lambda w: resources("co2_price.csv") if "Ept" in w.opts else [], output: - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), log: - LOGS + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log", + logs("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"), benchmark: - (BENCHMARKS + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}") + (benchmarks("prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}")) threads: 1 resources: mem_mb=4000, diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 0fea3c99..36e20590 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -1,23 +1,26 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule build_population_layouts: input: - nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", + nuts3_shapes=resources("nuts3_shapes.geojson"), urban_percent="data/urban_percent.csv", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - pop_layout_total=RESOURCES + "pop_layout_total.nc", - pop_layout_urban=RESOURCES + "pop_layout_urban.nc", - pop_layout_rural=RESOURCES + "pop_layout_rural.nc", + pop_layout_total=resources("pop_layout_total.nc"), + pop_layout_urban=resources("pop_layout_urban.nc"), + pop_layout_rural=resources("pop_layout_rural.nc"), log: - LOGS + "build_population_layouts.log", + logs("build_population_layouts.log"), resources: mem_mb=20000, benchmark: - BENCHMARKS + "build_population_layouts" + benchmarks("build_population_layouts") threads: 8 conda: "../envs/environment.yaml" @@ -27,19 +30,22 @@ rule build_population_layouts: rule build_clustered_population_layouts: input: - pop_layout_total=RESOURCES + "pop_layout_total.nc", - pop_layout_urban=RESOURCES + "pop_layout_urban.nc", - pop_layout_rural=RESOURCES + "pop_layout_rural.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout_total=resources("pop_layout_total.nc"), + pop_layout_urban=resources("pop_layout_urban.nc"), + pop_layout_rural=resources("pop_layout_rural.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), log: - LOGS + "build_clustered_population_layouts_{simpl}_{clusters}.log", + logs("build_clustered_population_layouts_{simpl}_{clusters}.log"), resources: mem_mb=10000, benchmark: - BENCHMARKS + "build_clustered_population_layouts/s{simpl}_{clusters}" + benchmarks("build_clustered_population_layouts/s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -48,136 +54,151 @@ rule build_clustered_population_layouts: rule build_simplified_population_layouts: input: - pop_layout_total=RESOURCES + "pop_layout_total.nc", - pop_layout_urban=RESOURCES + "pop_layout_urban.nc", - pop_layout_rural=RESOURCES + "pop_layout_rural.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout_total=resources("pop_layout_total.nc"), + pop_layout_urban=resources("pop_layout_urban.nc"), + pop_layout_rural=resources("pop_layout_rural.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}.geojson"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}.csv"), resources: mem_mb=10000, log: - LOGS + "build_simplified_population_layouts_{simpl}", + logs("build_simplified_population_layouts_{simpl}"), benchmark: - BENCHMARKS + "build_simplified_population_layouts/s{simpl}" + benchmarks("build_simplified_population_layouts/s{simpl}") conda: "../envs/environment.yaml" script: "../scripts/build_clustered_population_layouts.py" -if config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]: - - rule build_gas_network: - input: - gas_network="data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson", - output: - cleaned_gas_network=RESOURCES + "gas_network.csv", - resources: - mem_mb=4000, - log: - LOGS + "build_gas_network.log", - conda: - "../envs/environment.yaml" - script: - "../scripts/build_gas_network.py" - - rule build_gas_input_locations: - input: - lng=storage( - "https://globalenergymonitor.org/wp-content/uploads/2023/07/Europe-Gas-Tracker-2023-03-v3.xlsx", - keep_local=True, - ), - entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson", - production="data/gas_network/scigrid-gas/data/IGGIELGN_Productions.geojson", - regions_onshore=RESOURCES - + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES - + "regions_offshore_elec_s{simpl}_{clusters}.geojson", - output: - gas_input_nodes=RESOURCES - + "gas_input_locations_s{simpl}_{clusters}.geojson", - gas_input_nodes_simplified=RESOURCES - + "gas_input_locations_s{simpl}_{clusters}_simplified.csv", - resources: - mem_mb=2000, - log: - LOGS + "build_gas_input_locations_s{simpl}_{clusters}.log", - conda: - "../envs/environment.yaml" - script: - "../scripts/build_gas_input_locations.py" - - rule cluster_gas_network: - input: - cleaned_gas_network=RESOURCES + "gas_network.csv", - regions_onshore=RESOURCES - + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES - + "regions_offshore_elec_s{simpl}_{clusters}.geojson", - output: - clustered_gas_network=RESOURCES + "gas_network_elec_s{simpl}_{clusters}.csv", - resources: - mem_mb=4000, - log: - LOGS + "cluster_gas_network_s{simpl}_{clusters}.log", - conda: - "../envs/environment.yaml" - script: - "../scripts/cluster_gas_network.py" - - gas_infrastructure = { - **rules.cluster_gas_network.output, - **rules.build_gas_input_locations.output, - } - - -if not (config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]): - # this is effecively an `else` statement which is however not liked by snakefmt - - gas_infrastructure = {} - - -rule build_heat_demands: - params: - snapshots=config["snapshots"], +rule build_gas_network: input: - pop_layout=RESOURCES + "pop_layout_{scope}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + gas_network="data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson", output: - heat_demand=RESOURCES + "heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + cleaned_gas_network=resources("gas_network.csv"), + resources: + mem_mb=4000, + log: + logs("build_gas_network.log"), + conda: + "../envs/environment.yaml" + script: + "../scripts/build_gas_network.py" + + +rule build_gas_input_locations: + input: + gem=storage( + "https://globalenergymonitor.org/wp-content/uploads/2023/07/Europe-Gas-Tracker-2023-03-v3.xlsx", + keep_local=True, + ), + entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson", + storage="data/gas_network/scigrid-gas/data/IGGIELGN_Storages.geojson", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), + output: + gas_input_nodes=resources("gas_input_locations_s{simpl}_{clusters}.geojson"), + gas_input_nodes_simplified=resources( + "gas_input_locations_s{simpl}_{clusters}_simplified.csv" + ), + resources: + mem_mb=2000, + log: + logs("build_gas_input_locations_s{simpl}_{clusters}.log"), + conda: + "../envs/environment.yaml" + script: + "../scripts/build_gas_input_locations.py" + + +rule cluster_gas_network: + input: + cleaned_gas_network=resources("gas_network.csv"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), + output: + clustered_gas_network=resources("gas_network_elec_s{simpl}_{clusters}.csv"), + resources: + mem_mb=4000, + log: + logs("cluster_gas_network_s{simpl}_{clusters}.log"), + conda: + "../envs/environment.yaml" + script: + "../scripts/cluster_gas_network.py" + + +rule build_daily_heat_demand: + params: + snapshots=config_provider("snapshots"), + input: + pop_layout=resources("pop_layout_{scope}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", + output: + heat_demand=resources("daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, threads: 8 log: - LOGS + "build_heat_demands_{scope}_{simpl}_{clusters}.loc", + logs("build_daily_heat_demand_{scope}_{simpl}_{clusters}.loc"), benchmark: - BENCHMARKS + "build_heat_demands/{scope}_s{simpl}_{clusters}" + benchmarks("build_daily_heat_demand/{scope}_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: - "../scripts/build_heat_demand.py" + "../scripts/build_daily_heat_demand.py" + + +rule build_hourly_heat_demand: + params: + snapshots=config_provider("snapshots"), + input: + heat_profile="data/heat_load_profile_BDEW.csv", + heat_demand=resources("daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc"), + output: + heat_demand=resources("hourly_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc"), + resources: + mem_mb=2000, + threads: 8 + log: + logs("build_hourly_heat_demand_{scope}_{simpl}_{clusters}.loc"), + benchmark: + benchmarks("build_hourly_heat_demand/{scope}_s{simpl}_{clusters}") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_hourly_heat_demand.py" rule build_temperature_profiles: params: - snapshots=config["snapshots"], + snapshots=config_provider("snapshots"), input: - pop_layout=RESOURCES + "pop_layout_{scope}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout=resources("pop_layout_{scope}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - temp_soil=RESOURCES + "temp_soil_{scope}_elec_s{simpl}_{clusters}.nc", - temp_air=RESOURCES + "temp_air_{scope}_elec_s{simpl}_{clusters}.nc", + temp_soil=resources("temp_soil_{scope}_elec_s{simpl}_{clusters}.nc"), + temp_air=resources("temp_air_{scope}_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, threads: 8 log: - LOGS + "build_temperature_profiles_{scope}_{simpl}_{clusters}.log", + logs("build_temperature_profiles_{scope}_{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_temperature_profiles/{scope}_s{simpl}_{clusters}" + benchmarks("build_temperature_profiles/{scope}_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -186,27 +207,27 @@ rule build_temperature_profiles: rule build_cop_profiles: params: - heat_pump_sink_T=config["sector"]["heat_pump_sink_T"], + heat_pump_sink_T=config_provider("sector", "heat_pump_sink_T"), input: - temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", - temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", - temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", - temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", - temp_air_rural=RESOURCES + "temp_air_rural_elec_s{simpl}_{clusters}.nc", - temp_air_urban=RESOURCES + "temp_air_urban_elec_s{simpl}_{clusters}.nc", + temp_soil_total=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"), + temp_soil_rural=resources("temp_soil_rural_elec_s{simpl}_{clusters}.nc"), + temp_soil_urban=resources("temp_soil_urban_elec_s{simpl}_{clusters}.nc"), + temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), + temp_air_rural=resources("temp_air_rural_elec_s{simpl}_{clusters}.nc"), + temp_air_urban=resources("temp_air_urban_elec_s{simpl}_{clusters}.nc"), output: - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_soil_rural=RESOURCES + "cop_soil_rural_elec_s{simpl}_{clusters}.nc", - cop_soil_urban=RESOURCES + "cop_soil_urban_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc", - cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc", + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_soil_rural=resources("cop_soil_rural_elec_s{simpl}_{clusters}.nc"), + cop_soil_urban=resources("cop_soil_urban_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), + cop_air_rural=resources("cop_air_rural_elec_s{simpl}_{clusters}.nc"), + cop_air_urban=resources("cop_air_urban_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, log: - LOGS + "build_cop_profiles_s{simpl}_{clusters}.log", + logs("build_cop_profiles_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_cop_profiles/s{simpl}_{clusters}" + benchmarks("build_cop_profiles/s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -215,21 +236,24 @@ rule build_cop_profiles: rule build_solar_thermal_profiles: params: - snapshots=config["snapshots"], - solar_thermal=config["solar_thermal"], + snapshots=config_provider("snapshots"), + solar_thermal=config_provider("solar_thermal"), input: - pop_layout=RESOURCES + "pop_layout_{scope}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", + pop_layout=resources("pop_layout_{scope}.nc"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + cutout=lambda w: "cutouts/" + + CDIR + + config_provider("atlite", "default_cutout")(w) + + ".nc", output: - solar_thermal=RESOURCES + "solar_thermal_{scope}_elec_s{simpl}_{clusters}.nc", + solar_thermal=resources("solar_thermal_{scope}_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, threads: 16 log: - LOGS + "build_solar_thermal_profiles_{scope}_s{simpl}_{clusters}.log", + logs("build_solar_thermal_profiles_{scope}_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_solar_thermal_profiles/{scope}_s{simpl}_{clusters}" + benchmarks("build_solar_thermal_profiles/{scope}_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -238,26 +262,27 @@ rule build_solar_thermal_profiles: rule build_energy_totals: params: - countries=config["countries"], - energy=config["energy"], + countries=config_provider("countries"), + energy=config_provider("energy"), input: - nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", + nuts3_shapes=resources("nuts3_shapes.geojson"), co2="data/bundle-sector/eea/UNFCCC_v23.csv", - swiss="data/bundle-sector/switzerland-sfoe/switzerland-new_format.csv", + swiss="data/switzerland-new_format-all_years.csv", idees="data/bundle-sector/jrc-idees-2015", district_heat_share="data/district_heat_share.csv", - eurostat=input_eurostat, + eurostat="data/eurostat/eurostat-energy_balances-april_2023_edition", output: - energy_name=RESOURCES + "energy_totals.csv", - co2_name=RESOURCES + "co2_totals.csv", - transport_name=RESOURCES + "transport_data.csv", + energy_name=resources("energy_totals.csv"), + co2_name=resources("co2_totals.csv"), + transport_name=resources("transport_data.csv"), + district_heat_share=resources("district_heat_share.csv"), threads: 16 resources: mem_mb=10000, log: - LOGS + "build_energy_totals.log", + logs("build_energy_totals.log"), benchmark: - BENCHMARKS + "build_energy_totals" + benchmarks("build_energy_totals") conda: "../envs/environment.yaml" script: @@ -266,119 +291,102 @@ rule build_energy_totals: rule build_biomass_potentials: params: - biomass=config["biomass"], + biomass=config_provider("biomass"), input: enspreso_biomass=storage( "https://zenodo.org/records/10356004/files/ENSPRESO_BIOMASS.xlsx", keep_local=True, ), nuts2="data/bundle-sector/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21 - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"), swiss_cantons=ancient("data/bundle/ch_cantons.csv"), swiss_population=ancient("data/bundle/je-e-21.03.02.xls"), - country_shapes=RESOURCES + "country_shapes.geojson", + country_shapes=resources("country_shapes.geojson"), output: - biomass_potentials_all=RESOURCES - + "biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv", - biomass_potentials=RESOURCES - + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", + biomass_potentials_all=resources( + "biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + biomass_potentials=resources( + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log", + logs("build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log"), benchmark: - BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}" + benchmarks("build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}") conda: "../envs/environment.yaml" script: "../scripts/build_biomass_potentials.py" -if config["sector"]["biomass_transport"] or config["sector"]["biomass_spatial"]: - - rule build_biomass_transport_costs: - input: - transport_cost_data=storage( - "https://publications.jrc.ec.europa.eu/repository/bitstream/JRC98626/biomass potentials in europe_web rev.pdf", - keep_local=True, - ), - output: - biomass_transport_costs=RESOURCES + "biomass_transport_costs.csv", - threads: 1 - resources: - mem_mb=1000, - log: - LOGS + "build_biomass_transport_costs.log", - benchmark: - BENCHMARKS + "build_biomass_transport_costs" - conda: - "../envs/environment.yaml" - script: - "../scripts/build_biomass_transport_costs.py" - - build_biomass_transport_costs_output = rules.build_biomass_transport_costs.output +rule build_biomass_transport_costs: + input: + transport_cost_data=HTTP.remote( + "publications.jrc.ec.europa.eu/repository/bitstream/JRC98626/biomass potentials in europe_web rev.pdf", + keep_local=True, + ), + output: + biomass_transport_costs=resources("biomass_transport_costs.csv"), + threads: 1 + resources: + mem_mb=1000, + log: + logs("build_biomass_transport_costs.log"), + benchmark: + benchmarks("build_biomass_transport_costs") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_biomass_transport_costs.py" -if not (config["sector"]["biomass_transport"] or config["sector"]["biomass_spatial"]): - # this is effecively an `else` statement which is however not liked by snakefmt - build_biomass_transport_costs_output = {} - - -if config["sector"]["regional_co2_sequestration_potential"]["enable"]: - - rule build_sequestration_potentials: - params: - sequestration_potential=config["sector"][ - "regional_co2_sequestration_potential" - ], - input: - sequestration_potential=storage( - "https://raw.githubusercontent.com/ericzhou571/Co2Storage/main/resources/complete_map_2020_unit_Mt.geojson", - keep_local=True, - ), - regions_onshore=RESOURCES - + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES - + "regions_offshore_elec_s{simpl}_{clusters}.geojson", - output: - sequestration_potential=RESOURCES - + "co2_sequestration_potential_elec_s{simpl}_{clusters}.csv", - threads: 1 - resources: - mem_mb=4000, - log: - LOGS + "build_sequestration_potentials_s{simpl}_{clusters}.log", - benchmark: - BENCHMARKS + "build_sequestration_potentials_s{simpl}_{clusters}" - conda: - "../envs/environment.yaml" - script: - "../scripts/build_sequestration_potentials.py" - - build_sequestration_potentials_output = rules.build_sequestration_potentials.output - - -if not config["sector"]["regional_co2_sequestration_potential"]["enable"]: - # this is effecively an `else` statement which is however not liked by snakefmt - build_sequestration_potentials_output = {} +rule build_sequestration_potentials: + params: + sequestration_potential=config_provider( + "sector", "regional_co2_sequestration_potential" + ), + input: + sequestration_potential=HTTP.remote( + "https://raw.githubusercontent.com/ericzhou571/Co2Storage/main/resources/complete_map_2020_unit_Mt.geojson", + keep_local=True, + ), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), + output: + sequestration_potential=resources( + "co2_sequestration_potential_elec_s{simpl}_{clusters}.csv" + ), + threads: 1 + resources: + mem_mb=4000, + log: + logs("build_sequestration_potentials_s{simpl}_{clusters}.log"), + benchmark: + benchmarks("build_sequestration_potentials_s{simpl}_{clusters}") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_sequestration_potentials.py" rule build_salt_cavern_potentials: input: salt_caverns="data/bundle-sector/h2_salt_caverns_GWh_per_sqkm.geojson", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), output: - h2_cavern_potential=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv", + h2_cavern_potential=resources("salt_cavern_potentials_s{simpl}_{clusters}.csv"), threads: 1 resources: mem_mb=2000, log: - LOGS + "build_salt_cavern_potentials_s{simpl}_{clusters}.log", + logs("build_salt_cavern_potentials_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_salt_cavern_potentials_s{simpl}_{clusters}" + benchmarks("build_salt_cavern_potentials_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -386,19 +394,17 @@ rule build_salt_cavern_potentials: rule build_ammonia_production: - params: - countries=config["countries"], input: usgs="data/bundle-sector/myb1-2017-nitro.xls", output: - ammonia_production=RESOURCES + "ammonia_production.csv", + ammonia_production=resources("ammonia_production.csv"), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_ammonia_production.log", + logs("build_ammonia_production.log"), benchmark: - BENCHMARKS + "build_ammonia_production" + benchmarks("build_ammonia_production") conda: "../envs/environment.yaml" script: @@ -407,44 +413,73 @@ rule build_ammonia_production: rule build_industry_sector_ratios: params: - industry=config["industry"], - ammonia=config["sector"].get("ammonia", False), + industry=config_provider("industry"), + ammonia=config_provider("sector", "ammonia", default=False), input: - ammonia_production=RESOURCES + "ammonia_production.csv", + ammonia_production=resources("ammonia_production.csv"), idees="data/bundle-sector/jrc-idees-2015", output: - industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv", + industry_sector_ratios=resources("industry_sector_ratios.csv"), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_industry_sector_ratios.log", + logs("build_industry_sector_ratios.log"), benchmark: - BENCHMARKS + "build_industry_sector_ratios" + benchmarks("build_industry_sector_ratios") conda: "../envs/environment.yaml" script: "../scripts/build_industry_sector_ratios.py" +rule build_industry_sector_ratios_intermediate: + params: + industry=config_provider("industry"), + input: + industry_sector_ratios=resources("industry_sector_ratios.csv"), + industrial_energy_demand_per_country_today=resources( + "industrial_energy_demand_per_country_today.csv" + ), + industrial_production_per_country=resources( + "industrial_production_per_country.csv" + ), + output: + industry_sector_ratios=resources( + "industry_sector_ratios_{planning_horizons}.csv" + ), + threads: 1 + resources: + mem_mb=1000, + log: + logs("build_industry_sector_ratios_{planning_horizons}.log"), + benchmark: + benchmarks("build_industry_sector_ratios_{planning_horizons}") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_industry_sector_ratios_intermediate.py" + + rule build_industrial_production_per_country: params: - industry=config["industry"], - countries=config["countries"], + industry=config_provider("industry"), + countries=config_provider("countries"), input: - ammonia_production=RESOURCES + "ammonia_production.csv", + ammonia_production=resources("ammonia_production.csv"), jrc="data/bundle-sector/jrc-idees-2015", - eurostat="data/bundle-sector/eurostat-energy_balances-may_2018_edition", + eurostat="data/eurostat/eurostat-energy_balances-april_2023_edition", output: - industrial_production_per_country=RESOURCES - + "industrial_production_per_country.csv", + industrial_production_per_country=resources( + "industrial_production_per_country.csv" + ), threads: 8 resources: mem_mb=1000, log: - LOGS + "build_industrial_production_per_country.log", + logs("build_industrial_production_per_country.log"), benchmark: - BENCHMARKS + "build_industrial_production_per_country" + benchmarks("build_industrial_production_per_country") conda: "../envs/environment.yaml" script: @@ -453,23 +488,25 @@ rule build_industrial_production_per_country: rule build_industrial_production_per_country_tomorrow: params: - industry=config["industry"], + industry=config_provider("industry"), input: - industrial_production_per_country=RESOURCES - + "industrial_production_per_country.csv", + industrial_production_per_country=resources( + "industrial_production_per_country.csv" + ), output: - industrial_production_per_country_tomorrow=RESOURCES - + "industrial_production_per_country_tomorrow_{planning_horizons}.csv", + industrial_production_per_country_tomorrow=resources( + "industrial_production_per_country_tomorrow_{planning_horizons}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS - + "build_industrial_production_per_country_tomorrow_{planning_horizons}.log", + logs("build_industrial_production_per_country_tomorrow_{planning_horizons}.log"), benchmark: ( - BENCHMARKS - + "build_industrial_production_per_country_tomorrow_{planning_horizons}" + benchmarks( + "build_industrial_production_per_country_tomorrow_{planning_horizons}" + ) ) conda: "../envs/environment.yaml" @@ -479,22 +516,25 @@ rule build_industrial_production_per_country_tomorrow: rule build_industrial_distribution_key: params: - hotmaps_locate_missing=config["industry"].get("hotmaps_locate_missing", False), - countries=config["countries"], + hotmaps_locate_missing=config_provider( + "industry", "hotmaps_locate_missing", default=False + ), + countries=config_provider("countries"), input: - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), hotmaps_industrial_database="data/bundle-sector/Industrial_Database.csv", output: - industrial_distribution_key=RESOURCES - + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv", + industrial_distribution_key=resources( + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_industrial_distribution_key_s{simpl}_{clusters}.log", + logs("build_industrial_distribution_key_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_industrial_distribution_key/s{simpl}_{clusters}" + benchmarks("build_industrial_distribution_key/s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: @@ -503,23 +543,28 @@ rule build_industrial_distribution_key: rule build_industrial_production_per_node: input: - industrial_distribution_key=RESOURCES - + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv", - industrial_production_per_country_tomorrow=RESOURCES - + "industrial_production_per_country_tomorrow_{planning_horizons}.csv", + industrial_distribution_key=resources( + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv" + ), + industrial_production_per_country_tomorrow=resources( + "industrial_production_per_country_tomorrow_{planning_horizons}.csv" + ), output: - industrial_production_per_node=RESOURCES - + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + industrial_production_per_node=resources( + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS - + "build_industrial_production_per_node_s{simpl}_{clusters}_{planning_horizons}.log", + logs( + "build_industrial_production_per_node_s{simpl}_{clusters}_{planning_horizons}.log" + ), benchmark: ( - BENCHMARKS - + "build_industrial_production_per_node/s{simpl}_{clusters}_{planning_horizons}" + benchmarks( + "build_industrial_production_per_node/s{simpl}_{clusters}_{planning_horizons}" + ) ) conda: "../envs/environment.yaml" @@ -529,24 +574,31 @@ rule build_industrial_production_per_node: rule build_industrial_energy_demand_per_node: input: - industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv", - industrial_production_per_node=RESOURCES - + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - industrial_energy_demand_per_node_today=RESOURCES - + "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv", + industry_sector_ratios=resources( + "industry_sector_ratios_{planning_horizons}.csv" + ), + industrial_production_per_node=resources( + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + industrial_energy_demand_per_node_today=resources( + "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv" + ), output: - industrial_energy_demand_per_node=RESOURCES - + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + industrial_energy_demand_per_node=resources( + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS - + "build_industrial_energy_demand_per_node_s{simpl}_{clusters}_{planning_horizons}.log", + logs( + "build_industrial_energy_demand_per_node_s{simpl}_{clusters}_{planning_horizons}.log" + ), benchmark: ( - BENCHMARKS - + "build_industrial_energy_demand_per_node/s{simpl}_{clusters}_{planning_horizons}" + benchmarks( + "build_industrial_energy_demand_per_node/s{simpl}_{clusters}_{planning_horizons}" + ) ) conda: "../envs/environment.yaml" @@ -556,23 +608,24 @@ rule build_industrial_energy_demand_per_node: rule build_industrial_energy_demand_per_country_today: params: - countries=config["countries"], - industry=config["industry"], + countries=config_provider("countries"), + industry=config_provider("industry"), input: jrc="data/bundle-sector/jrc-idees-2015", - ammonia_production=RESOURCES + "ammonia_production.csv", - industrial_production_per_country=RESOURCES - + "industrial_production_per_country.csv", + industrial_production_per_country=resources( + "industrial_production_per_country.csv" + ), output: - industrial_energy_demand_per_country_today=RESOURCES - + "industrial_energy_demand_per_country_today.csv", + industrial_energy_demand_per_country_today=resources( + "industrial_energy_demand_per_country_today.csv" + ), threads: 8 resources: mem_mb=1000, log: - LOGS + "build_industrial_energy_demand_per_country_today.log", + logs("build_industrial_energy_demand_per_country_today.log"), benchmark: - BENCHMARKS + "build_industrial_energy_demand_per_country_today" + benchmarks("build_industrial_energy_demand_per_country_today") conda: "../envs/environment.yaml" script: @@ -581,76 +634,70 @@ rule build_industrial_energy_demand_per_country_today: rule build_industrial_energy_demand_per_node_today: input: - industrial_distribution_key=RESOURCES - + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv", - industrial_energy_demand_per_country_today=RESOURCES - + "industrial_energy_demand_per_country_today.csv", + industrial_distribution_key=resources( + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv" + ), + industrial_energy_demand_per_country_today=resources( + "industrial_energy_demand_per_country_today.csv" + ), output: - industrial_energy_demand_per_node_today=RESOURCES - + "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv", + industrial_energy_demand_per_node_today=resources( + "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv" + ), threads: 1 resources: mem_mb=1000, log: - LOGS + "build_industrial_energy_demand_per_node_today_s{simpl}_{clusters}.log", + logs("build_industrial_energy_demand_per_node_today_s{simpl}_{clusters}.log"), benchmark: - BENCHMARKS + "build_industrial_energy_demand_per_node_today/s{simpl}_{clusters}" + benchmarks("build_industrial_energy_demand_per_node_today/s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: "../scripts/build_industrial_energy_demand_per_node_today.py" -if config["sector"]["retrofitting"]["retro_endogen"]: - - rule build_retro_cost: - params: - retrofitting=config["sector"]["retrofitting"], - countries=config["countries"], - input: - building_stock="data/retro/data_building_stock.csv", - data_tabula="data/bundle-sector/retro/tabula-calculator-calcsetbuilding.csv", - air_temperature=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", - u_values_PL="data/retro/u_values_poland.csv", - tax_w="data/retro/electricity_taxes_eu.csv", - construction_index="data/retro/comparative_level_investment.csv", - floor_area_missing="data/retro/floor_area_missing.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - cost_germany="data/retro/retro_cost_germany.csv", - window_assumptions="data/retro/window_assumptions.csv", - output: - retro_cost=RESOURCES + "retro_cost_elec_s{simpl}_{clusters}.csv", - floor_area=RESOURCES + "floor_area_elec_s{simpl}_{clusters}.csv", - resources: - mem_mb=1000, - log: - LOGS + "build_retro_cost_s{simpl}_{clusters}.log", - benchmark: - BENCHMARKS + "build_retro_cost/s{simpl}_{clusters}" - conda: - "../envs/environment.yaml" - script: - "../scripts/build_retro_cost.py" - - build_retro_cost_output = rules.build_retro_cost.output - - -if not config["sector"]["retrofitting"]["retro_endogen"]: - # this is effecively an `else` statement which is however not liked by snakefmt - build_retro_cost_output = {} +rule build_retro_cost: + params: + retrofitting=config_provider("sector", "retrofitting"), + countries=config_provider("countries"), + input: + building_stock="data/retro/data_building_stock.csv", + data_tabula="data/bundle-sector/retro/tabula-calculator-calcsetbuilding.csv", + air_temperature=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), + u_values_PL="data/retro/u_values_poland.csv", + tax_w="data/retro/electricity_taxes_eu.csv", + construction_index="data/retro/comparative_level_investment.csv", + floor_area_missing="data/retro/floor_area_missing.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + cost_germany="data/retro/retro_cost_germany.csv", + window_assumptions="data/retro/window_assumptions.csv", + output: + retro_cost=resources("retro_cost_elec_s{simpl}_{clusters}.csv"), + floor_area=resources("floor_area_elec_s{simpl}_{clusters}.csv"), + resources: + mem_mb=1000, + log: + logs("build_retro_cost_s{simpl}_{clusters}.log"), + benchmark: + benchmarks("build_retro_cost/s{simpl}_{clusters}") + conda: + "../envs/environment.yaml" + script: + "../scripts/build_retro_cost.py" rule build_population_weighted_energy_totals: input: - energy_totals=RESOURCES + "energy_totals.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + energy_totals=resources("energy_totals.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), output: - RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + resources("pop_weighted_energy_totals_s{simpl}_{clusters}.csv"), threads: 1 resources: mem_mb=2000, log: - LOGS + "build_population_weighted_energy_totals_s{simpl}_{clusters}.log", + logs("build_population_weighted_energy_totals_s{simpl}_{clusters}.log"), conda: "../envs/environment.yaml" script: @@ -660,16 +707,16 @@ rule build_population_weighted_energy_totals: rule build_shipping_demand: input: ports="data/attributed_ports.json", - scope=RESOURCES + "europe_shape.geojson", - regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - demand=RESOURCES + "energy_totals.csv", + scope=resources("europe_shape.geojson"), + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + demand=resources("energy_totals.csv"), output: - RESOURCES + "shipping_demand_s{simpl}_{clusters}.csv", + resources("shipping_demand_s{simpl}_{clusters}.csv"), threads: 1 resources: mem_mb=2000, log: - LOGS + "build_shipping_demand_s{simpl}_{clusters}.log", + logs("build_shipping_demand_s{simpl}_{clusters}.log"), conda: "../envs/environment.yaml" script: @@ -678,110 +725,212 @@ rule build_shipping_demand: rule build_transport_demand: params: - snapshots=config["snapshots"], - sector=config["sector"], + snapshots=config_provider("snapshots"), + sector=config_provider("sector"), input: - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - pop_weighted_energy_totals=RESOURCES - + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", - transport_data=RESOURCES + "transport_data.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + pop_weighted_energy_totals=resources( + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv" + ), + transport_data=resources("transport_data.csv"), traffic_data_KFZ="data/bundle-sector/emobility/KFZ__count", traffic_data_Pkw="data/bundle-sector/emobility/Pkw__count", - temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", + temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), output: - transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv", - transport_data=RESOURCES + "transport_data_s{simpl}_{clusters}.csv", - avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv", - dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv", + transport_demand=resources("transport_demand_s{simpl}_{clusters}.csv"), + transport_data=resources("transport_data_s{simpl}_{clusters}.csv"), + avail_profile=resources("avail_profile_s{simpl}_{clusters}.csv"), + dsm_profile=resources("dsm_profile_s{simpl}_{clusters}.csv"), threads: 1 resources: mem_mb=2000, log: - LOGS + "build_transport_demand_s{simpl}_{clusters}.log", + logs("build_transport_demand_s{simpl}_{clusters}.log"), conda: "../envs/environment.yaml" script: "../scripts/build_transport_demand.py" +rule build_district_heat_share: + params: + sector=config_provider("sector"), + input: + district_heat_share=resources("district_heat_share.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + output: + district_heat_share=resources( + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + threads: 1 + resources: + mem_mb=1000, + log: + logs("build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log"), + conda: + "../envs/environment.yaml" + script: + "../scripts/build_district_heat_share.py" + + +rule build_existing_heating_distribution: + params: + baseyear=config_provider("scenario", "planning_horizons", 0), + sector=config_provider("sector"), + existing_capacities=config_provider("existing_capacities"), + input: + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + clustered_pop_energy_layout=resources( + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv" + ), + district_heat_share=resources( + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + output: + existing_heating_distribution=resources( + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + threads: 1 + resources: + mem_mb=2000, + log: + logs( + "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log" + ), + benchmark: + benchmarks( + "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/build_existing_heating_distribution.py" + + +def input_profile_offwind(w): + return { + f"profile_{tech}": resources(f"profile_{tech}.nc") + for tech in ["offwind-ac", "offwind-dc"] + if (tech in config_provider("electricity", "renewable_carriers")(w)) + } + + rule prepare_sector_network: params: - co2_budget=config["co2_budget"], - conventional_carriers=config["existing_capacities"]["conventional_carriers"], - foresight=config["foresight"], - costs=config["costs"], - sector=config["sector"], - industry=config["industry"], - pypsa_eur=config["pypsa_eur"], - length_factor=config["lines"]["length_factor"], - planning_horizons=config["scenario"]["planning_horizons"], - countries=config["countries"], - emissions_scope=config["energy"]["emissions"], - eurostat_report_year=config["energy"]["eurostat_report_year"], + time_resolution=config_provider("clustering", "temporal", "resolution_sector"), + co2_budget=config_provider("co2_budget"), + conventional_carriers=config_provider( + "existing_capacities", "conventional_carriers" + ), + foresight=config_provider("foresight"), + costs=config_provider("costs"), + sector=config_provider("sector"), + industry=config_provider("industry"), + lines=config_provider("lines"), + pypsa_eur=config_provider("pypsa_eur"), + length_factor=config_provider("lines", "length_factor"), + planning_horizons=config_provider("scenario", "planning_horizons"), + countries=config_provider("countries"), + adjustments=config_provider("adjustments", "sector"), + emissions_scope=config_provider("energy", "emissions"), RDIR=RDIR, input: - **build_retro_cost_output, - **build_biomass_transport_costs_output, - **gas_infrastructure, - **build_sequestration_potentials_output, - network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - energy_totals_name=RESOURCES + "energy_totals.csv", - eurostat=input_eurostat, - pop_weighted_energy_totals=RESOURCES - + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", - shipping_demand=RESOURCES + "shipping_demand_s{simpl}_{clusters}.csv", - transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv", - transport_data=RESOURCES + "transport_data_s{simpl}_{clusters}.csv", - avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv", - dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv", - co2_totals_name=RESOURCES + "co2_totals.csv", + unpack(input_profile_offwind), + **rules.cluster_gas_network.output, + **rules.build_gas_input_locations.output, + retro_cost=lambda w: ( + resources("retro_cost_elec_s{simpl}_{clusters}.csv") + if config_provider("sector", "retrofitting", "retro_endogen")(w) + else [] + ), + floor_area=lambda w: ( + resources("floor_area_elec_s{simpl}_{clusters}.csv") + if config_provider("sector", "retrofitting", "retro_endogen")(w) + else [] + ), + biomass_transport_costs=lambda w: ( + resources("biomass_transport_costs.csv") + if config_provider("sector", "biomass_transport")(w) + or config_provider("sector", "biomass_spatial")(w) + else [] + ), + sequestration_potential=lambda w: ( + resources("co2_sequestration_potential_elec_s{simpl}_{clusters}.csv") + if config_provider( + "sector", "regional_co2_sequestration_potential", "enable" + )(w) + else [] + ), + network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), + energy_totals_name=resources("energy_totals.csv"), + eurostat="data/eurostat/eurostat-energy_balances-april_2023_edition", + pop_weighted_energy_totals=resources( + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv" + ), + shipping_demand=resources("shipping_demand_s{simpl}_{clusters}.csv"), + transport_demand=resources("transport_demand_s{simpl}_{clusters}.csv"), + transport_data=resources("transport_data_s{simpl}_{clusters}.csv"), + avail_profile=resources("avail_profile_s{simpl}_{clusters}.csv"), + dsm_profile=resources("dsm_profile_s{simpl}_{clusters}.csv"), + co2_totals_name=resources("co2_totals.csv"), co2="data/bundle-sector/eea/UNFCCC_v23.csv", - biomass_potentials=RESOURCES - + "biomass_potentials_s{simpl}_{clusters}_" - + "{}.csv".format(config["biomass"]["year"]) - if config["foresight"] == "overnight" - else RESOURCES - + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_profile="data/heat_load_profile_BDEW.csv", - costs="data/costs_{}.csv".format(config["costs"]["year"]) - if config["foresight"] == "overnight" - else "data/costs_{planning_horizons}.csv", - profile_offwind_ac=RESOURCES + "profile_offwind-ac.nc", - profile_offwind_dc=RESOURCES + "profile_offwind-dc.nc", - h2_cavern=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv", - busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", - busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", - industrial_demand=RESOURCES - + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_demand_urban=RESOURCES + "heat_demand_urban_elec_s{simpl}_{clusters}.nc", - heat_demand_rural=RESOURCES + "heat_demand_rural_elec_s{simpl}_{clusters}.nc", - heat_demand_total=RESOURCES + "heat_demand_total_elec_s{simpl}_{clusters}.nc", - temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", - temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", - temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", - temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", - temp_air_rural=RESOURCES + "temp_air_rural_elec_s{simpl}_{clusters}.nc", - temp_air_urban=RESOURCES + "temp_air_urban_elec_s{simpl}_{clusters}.nc", - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_soil_rural=RESOURCES + "cop_soil_rural_elec_s{simpl}_{clusters}.nc", - cop_soil_urban=RESOURCES + "cop_soil_urban_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc", - cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc", - solar_thermal_total=RESOURCES - + "solar_thermal_total_elec_s{simpl}_{clusters}.nc" - if config["sector"]["solar_thermal"] - else [], - solar_thermal_urban=RESOURCES - + "solar_thermal_urban_elec_s{simpl}_{clusters}.nc" - if config["sector"]["solar_thermal"] - else [], - solar_thermal_rural=RESOURCES - + "solar_thermal_rural_elec_s{simpl}_{clusters}.nc" - if config["sector"]["solar_thermal"] - else [], + biomass_potentials=lambda w: ( + resources( + "biomass_potentials_s{simpl}_{clusters}_" + + "{}.csv".format(config_provider("biomass", "year")(w)) + ) + if config_provider("foresight")(w) == "overnight" + else resources( + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv" + ) + ), + costs=lambda w: ( + resources("costs_{}.csv".format(config_provider("costs", "year")(w))) + if config_provider("foresight")(w) == "overnight" + else resources("costs_{planning_horizons}.csv") + ), + h2_cavern=resources("salt_cavern_potentials_s{simpl}_{clusters}.csv"), + busmap_s=resources("busmap_elec_s{simpl}.csv"), + busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + simplified_pop_layout=resources("pop_layout_elec_s{simpl}.csv"), + industrial_demand=resources( + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + hourly_heat_demand_total=resources( + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc" + ), + district_heat_share=resources( + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), + temp_soil_total=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"), + temp_soil_rural=resources("temp_soil_rural_elec_s{simpl}_{clusters}.nc"), + temp_soil_urban=resources("temp_soil_urban_elec_s{simpl}_{clusters}.nc"), + temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), + temp_air_rural=resources("temp_air_rural_elec_s{simpl}_{clusters}.nc"), + temp_air_urban=resources("temp_air_urban_elec_s{simpl}_{clusters}.nc"), + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_soil_rural=resources("cop_soil_rural_elec_s{simpl}_{clusters}.nc"), + cop_soil_urban=resources("cop_soil_urban_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), + cop_air_rural=resources("cop_air_rural_elec_s{simpl}_{clusters}.nc"), + cop_air_urban=resources("cop_air_urban_elec_s{simpl}_{clusters}.nc"), + solar_thermal_total=lambda w: ( + resources("solar_thermal_total_elec_s{simpl}_{clusters}.nc") + if config_provider("sector", "solar_thermal")(w) + else [] + ), + solar_thermal_urban=lambda w: ( + resources("solar_thermal_urban_elec_s{simpl}_{clusters}.nc") + if config_provider("sector", "solar_thermal")(w) + else [] + ), + solar_thermal_rural=lambda w: ( + resources("solar_thermal_rural_elec_s{simpl}_{clusters}.nc") + if config_provider("sector", "solar_thermal")(w) + else [] + ), output: RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -789,12 +938,12 @@ rule prepare_sector_network: resources: mem_mb=2000, log: - LOGS - + "prepare_sector_network_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + RESULTS + + "logs/prepare_sector_network_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", benchmark: ( - BENCHMARKS - + "prepare_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/prepare_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" diff --git a/rules/collect.smk b/rules/collect.smk index c9bb10ea..214b8102 100644 --- a/rules/collect.smk +++ b/rules/collect.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -11,26 +11,32 @@ localrules: prepare_sector_networks, solve_elec_networks, solve_sector_networks, - plot_networks, rule cluster_networks: input: - expand(RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]), + expand( + resources("networks/elec_s{simpl}_{clusters}.nc"), + **config["scenario"], + run=config["run"]["name"], + ), rule extra_components_networks: input: expand( - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", **config["scenario"] + resources("networks/elec_s{simpl}_{clusters}_ec.nc"), + **config["scenario"], + run=config["run"]["name"], ), rule prepare_elec_networks: input: expand( - RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - **config["scenario"] + resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), + **config["scenario"], + run=config["run"]["name"], ), @@ -39,7 +45,8 @@ rule prepare_sector_networks: expand( RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), @@ -47,7 +54,8 @@ rule solve_elec_networks: input: expand( RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), @@ -56,25 +64,18 @@ rule solve_sector_networks: expand( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), rule solve_sector_networks_perfect: - input: - expand( - RESULTS - + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - **config["scenario"] - ), - - -rule plot_networks: input: expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), @@ -83,11 +84,13 @@ rule validate_elec_networks: expand( RESULTS + "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", - **config["scenario"] + **config["scenario"], + run=config["run"]["name"], ), expand( RESULTS + "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", **config["scenario"], - kind=["production", "prices", "cross_border"] + run=config["run"]["name"], + kind=["production", "prices", "cross_border"], ), diff --git a/rules/common.smk b/rules/common.smk index d3416050..2b8495e1 100644 --- a/rules/common.smk +++ b/rules/common.smk @@ -1,7 +1,89 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +import copy +from functools import partial, lru_cache + +import os, sys, glob + +path = workflow.source_path("../scripts/_helpers.py") +sys.path.insert(0, os.path.dirname(path)) + +from _helpers import validate_checksum, update_config_from_wildcards +from snakemake.utils import update_config + + +def get_config(config, keys, default=None): + """Retrieve a nested value from a dictionary using a tuple of keys.""" + value = config + for key in keys: + if isinstance(value, list): + value = value[key] + else: + value = value.get(key, default) + if value == default: + return default + return value + + +def merge_configs(base_config, scenario_config): + """Merge base config with a specific scenario without modifying the original.""" + merged = copy.deepcopy(base_config) + update_config(merged, scenario_config) + return merged + + +@lru_cache +def scenario_config(scenario_name): + """Retrieve a scenario config based on the overrides from the scenario file.""" + return merge_configs(config, scenarios[scenario_name]) + + +def static_getter(wildcards, keys, default): + """Getter function for static config values.""" + config_with_wildcards = update_config_from_wildcards( + config, wildcards, inplace=False + ) + return get_config(config_with_wildcards, keys, default) + + +def dynamic_getter(wildcards, keys, default): + """Getter function for dynamic config values based on scenario.""" + if "run" not in wildcards.keys(): + return get_config(config, keys, default) + scenario_name = wildcards.run + if scenario_name not in scenarios: + raise ValueError( + f"Scenario {scenario_name} not found in file {config['run']['scenario']['file']}." + ) + config_with_scenario = scenario_config(scenario_name) + config_with_wildcards = update_config_from_wildcards( + config_with_scenario, wildcards, inplace=False + ) + return get_config(config_with_wildcards, keys, default) + + +def config_provider(*keys, default=None): + """Dynamically provide config values based on 'run' -> 'name'. + + Usage in Snakemake rules would look something like: + params: + my_param=config_provider("key1", "key2", default="some_default_value") + """ + # Using functools.partial to freeze certain arguments in our getter functions. + if config["run"].get("scenarios", {}).get("enable", False): + return partial(dynamic_getter, keys=keys, default=default) + else: + return partial(static_getter, keys=keys, default=default) + + +def solver_threads(w): + solver_options = config_provider("solving", "solver_options")(w) + option_set = config_provider("solving", "solver", "options")(w) + threads = solver_options[option_set].get("threads", 4) + return threads + def memory(w): factor = 3.0 @@ -23,6 +105,15 @@ def memory(w): return int(factor * (10000 + 195 * int(w.clusters))) +def input_custom_extra_functionality(w): + path = config_provider( + "solving", "options", "custom_extra_functionality", default=False + )(w) + if path: + return os.path.join(os.path.dirname(workflow.snakefile), path) + return [] + + # Check if the workflow has access to the internet by trying to access the HEAD of specified url def has_internet_access(url="www.zenodo.org") -> bool: import http.client as http_client @@ -39,16 +130,11 @@ def has_internet_access(url="www.zenodo.org") -> bool: conn.close() -def input_eurostat(w): - # 2016 includes BA, 2017 does not - report_year = config["energy"]["eurostat_report_year"] - return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition" - - -def solved_previous_horizon(wildcards): - planning_horizons = config["scenario"]["planning_horizons"] - i = planning_horizons.index(int(wildcards.planning_horizons)) +def solved_previous_horizon(w): + planning_horizons = config_provider("scenario", "planning_horizons")(w) + i = planning_horizons.index(int(w.planning_horizons)) planning_horizon_p = str(planning_horizons[i - 1]) + return ( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 1ac4fec9..1b188829 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -9,60 +9,132 @@ localrules: if config["foresight"] != "perfect": - rule plot_network: + rule plot_power_network_clustered: params: - foresight=config["foresight"], - plotting=config["plotting"], + plotting=config_provider("plotting"), + input: + network=resources("networks/elec_s{simpl}_{clusters}.nc"), + regions_onshore=resources( + "regions_onshore_elec_s{simpl}_{clusters}.geojson" + ), + output: + map=resources("maps/power-network-s{simpl}-{clusters}.pdf"), + threads: 1 + resources: + mem_mb=4000, + benchmark: + benchmarks("plot_power_network_clustered/elec_s{simpl}_{clusters}") + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_power_network_clustered.py" + + rule plot_power_network: + params: + plotting=config_provider("plotting"), input: network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), output: map=RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - today=RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf", threads: 2 resources: mem_mb=10000, + log: + RESULTS + + "logs/plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", benchmark: ( - BENCHMARKS - + "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarksplot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" script: - "../scripts/plot_network.py" + "../scripts/plot_power_network.py" + + rule plot_hydrogen_network: + params: + plotting=config_provider("plotting"), + foresight=config_provider("foresight"), + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + log: + RESULTS + + "logs/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + benchmark: + ( + RESULTS + + "benchmarks/plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_hydrogen_network.py" + + rule plot_gas_network: + params: + plotting=config_provider("plotting"), + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + log: + RESULTS + + "logs/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + benchmark: + ( + RESULTS + + "benchmarks/plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_gas_network.py" if config["foresight"] == "perfect": - rule plot_network: + def output_map_year(w): + return { + f"map_{year}": RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_" + + f"{year}.pdf" + for year in config_provider("scenario", "planning_horizons")(w) + } + + rule plot_power_network_perfect: params: - foresight=config["foresight"], - plotting=config["plotting"], + plotting=config_provider("plotting"), input: network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), output: - **{ - f"map_{year}": RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_" - + f"{year}.pdf" - for year in config["scenario"]["planning_horizons"] - }, + unpack(output_map_year), threads: 2 resources: mem_mb=10000, - benchmark: - BENCHMARKS - +"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark" conda: "../envs/environment.yaml" script: - "../scripts/plot_network.py" + "../scripts/plot_power_network_perfect.py" rule copy_config: @@ -73,8 +145,6 @@ rule copy_config: threads: 1 resources: mem_mb=1000, - benchmark: - BENCHMARKS + "copy_config" conda: "../envs/environment.yaml" script: @@ -83,24 +153,57 @@ rule copy_config: rule make_summary: params: - foresight=config["foresight"], - costs=config["costs"], - snapshots=config["snapshots"], - scenario=config["scenario"], + foresight=config_provider("foresight"), + costs=config_provider("costs"), + snapshots=config_provider("snapshots"), + scenario=config_provider("scenario"), RDIR=RDIR, input: networks=expand( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - **config["scenario"] + **config["scenario"], + allow_missing=True, ), - costs="data/costs_{}.csv".format(config["costs"]["year"]) - if config["foresight"] == "overnight" - else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - plots=expand( + costs=lambda w: ( + resources("costs_{}.csv".format(config_provider("costs", "year")(w))) + if config_provider("foresight")(w) == "overnight" + else resources( + "costs_{}.csv".format( + config_provider("scenario", "planning_horizons", 0)(w) + ) + ) + ), + ac_plot=expand( + resources("maps/power-network-s{simpl}-{clusters}.pdf"), + **config["scenario"], + allow_missing=True, + ), + costs_plot=expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] + **config["scenario"], + allow_missing=True, + ), + h2_plot=lambda w: expand( + ( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf" + if config_provider("sector", "H2_network")(w) + else [] + ), + **config["scenario"], + allow_missing=True, + ), + ch4_plot=lambda w: expand( + ( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf" + if config_provider("sector", "gas_network")(w) + else [] + ), + **config["scenario"], + allow_missing=True, ), output: nodal_costs=RESULTS + "csvs/nodal_costs.csv", @@ -122,9 +225,7 @@ rule make_summary: resources: mem_mb=10000, log: - LOGS + "make_summary.log", - benchmark: - BENCHMARKS + "make_summary" + RESULTS + "logs/make_summary.log", conda: "../envs/environment.yaml" script: @@ -133,18 +234,19 @@ rule make_summary: rule plot_summary: params: - countries=config["countries"], - planning_horizons=config["scenario"]["planning_horizons"], - sector_opts=config["scenario"]["sector_opts"], - emissions_scope=config["energy"]["emissions"], - eurostat_report_year=config["energy"]["eurostat_report_year"], - plotting=config["plotting"], + countries=config_provider("countries"), + planning_horizons=config_provider("scenario", "planning_horizons"), + emissions_scope=config_provider("energy", "emissions"), + plotting=config_provider("plotting"), + foresight=config_provider("foresight"), + co2_budget=config_provider("co2_budget"), + sector=config_provider("sector"), RDIR=RDIR, input: costs=RESULTS + "csvs/costs.csv", energy=RESULTS + "csvs/energy.csv", balances=RESULTS + "csvs/supply_energy.csv", - eurostat=input_eurostat, + eurostat="data/eurostat/eurostat-energy_balances-april_2023_edition", co2="data/bundle-sector/eea/UNFCCC_v23.csv", output: costs=RESULTS + "graphs/costs.pdf", @@ -154,9 +256,7 @@ rule plot_summary: resources: mem_mb=10000, log: - LOGS + "plot_summary.log", - benchmark: - BENCHMARKS + "plot_summary" + RESULTS + "logs/plot_summary.log", conda: "../envs/environment.yaml" script: @@ -178,7 +278,7 @@ STATISTICS_BARPLOTS = [ rule plot_elec_statistics: params: - plotting=config["plotting"], + plotting=config_provider("plotting"), barplots=STATISTICS_BARPLOTS, input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", diff --git a/rules/retrieve.smk b/rules/retrieve.smk index 13120640..beae4f9c 100644 --- a/rules/retrieve.smk +++ b/rules/retrieve.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -32,12 +32,12 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle", output: protected(expand("data/bundle/{file}", file=datafiles)), log: - LOGS + "retrieve_databundle.log", + "logs/retrieve_databundle.log", resources: mem_mb=1000, retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_databundle.py" @@ -50,12 +50,12 @@ if config["enable"].get("retrieve_irena"): onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", solar="data/existing_infrastructure/solar_capacity_IRENA.csv", log: - LOGS + "retrieve_irena.log", + logs("retrieve_irena.log"), resources: mem_mb=1000, retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_irena.py" @@ -76,28 +76,25 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True retries: 2 run: move(input[0], output[0]) + validate_checksum(output[0], input[0]) if config["enable"]["retrieve"] and config["enable"].get("retrieve_cost_data", True): rule retrieve_cost_data: - input: - storage( - "https://raw.githubusercontent.com/PyPSA/technology-data/{}/outputs/".format( - config["costs"]["version"] - ) - + "costs_{year}.csv", - keep_local=True, - ), + params: + version=config_provider("costs", "version"), output: - "data/costs_{year}.csv", + resources("costs_{year}.csv"), log: - LOGS + "retrieve_cost_data_{year}.log", + logs("retrieve_cost_data_{year}.log"), resources: mem_mb=1000, retries: 2 - run: - move(input[0], output[0]) + conda: + "../envs/retrieve.yaml" + script: + "../scripts/retrieve_cost_data.py" if config["enable"]["retrieve"] and config["enable"].get( @@ -111,14 +108,15 @@ if config["enable"]["retrieve"] and config["enable"].get( keep_local=True, ), output: - RESOURCES + "natura.tiff", + resources("natura.tiff"), log: - LOGS + "retrieve_natura_raster.log", + logs("retrieve_natura_raster.log"), resources: mem_mb=5000, retries: 2 run: move(input[0], output[0]) + validate_checksum(output[0], input[0]) if config["enable"]["retrieve"] and config["enable"].get( @@ -135,49 +133,45 @@ if config["enable"]["retrieve"] and config["enable"].get( "h2_salt_caverns_GWh_per_sqkm.geojson", ] - datafolders = [ - protected( - directory("data/bundle-sector/eurostat-energy_balances-june_2016_edition") - ), - protected( - directory("data/bundle-sector/eurostat-energy_balances-may_2018_edition") - ), - protected(directory("data/bundle-sector/jrc-idees-2015")), - ] - rule retrieve_sector_databundle: output: protected(expand("data/bundle-sector/{files}", files=datafiles)), - *datafolders, + protected(directory("data/bundle-sector/jrc-idees-2015")), log: - LOGS + "retrieve_sector_databundle.log", + "logs/retrieve_sector_databundle.log", retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_sector_databundle.py" + rule retrieve_eurostat_data: + output: + directory("data/eurostat/eurostat-energy_balances-april_2023_edition"), + log: + "logs/retrieve_eurostat_data.log", + retries: 2 + script: + "../scripts/retrieve_eurostat_data.py" -if config["enable"]["retrieve"] and ( - config["sector"]["gas_network"] or config["sector"]["H2_retrofit"] -): + +if config["enable"]["retrieve"]: datafiles = [ "IGGIELGN_LNGs.geojson", "IGGIELGN_BorderPoints.geojson", "IGGIELGN_Productions.geojson", + "IGGIELGN_Storages.geojson", "IGGIELGN_PipeSegments.geojson", ] rule retrieve_gas_infrastructure_data: output: - protected( - expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles) - ), + expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles), log: - LOGS + "retrieve_gas_infrastructure_data.log", + "logs/retrieve_gas_infrastructure_data.log", retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_gas_infrastructure_data.py" @@ -185,24 +179,19 @@ if config["enable"]["retrieve"] and ( if config["enable"]["retrieve"]: rule retrieve_electricity_demand: - input: - storage( - "https://data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format( - version="2019-06-05" - if config["snapshots"]["end"] < "2019" - else "2020-10-06" - ), - keep_local=True, - ), + params: + versions=["2019-06-05", "2020-10-06"], output: - RESOURCES + "load_raw.csv", + "data/electricity_demand_raw.csv", log: - LOGS + "retrieve_electricity_demand.log", + "logs/retrieve_electricity_demand.log", resources: mem_mb=5000, retries: 2 - run: - move(input[0], output[0]) + conda: + "../envs/retrieve.yaml" + script: + "../scripts/retrieve_electricity_demand.py" if config["enable"]["retrieve"]: @@ -216,12 +205,13 @@ if config["enable"]["retrieve"]: output: protected("data/shipdensity_global.zip"), log: - LOGS + "retrieve_ship_raster.log", + "logs/retrieve_ship_raster.log", resources: mem_mb=5000, retries: 2 run: move(input[0], output[0]) + validate_checksum(output[0], input[0]) if config["enable"]["retrieve"]: @@ -237,6 +227,23 @@ if config["enable"]["retrieve"]: "data/Copernicus_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif", run: move(input[0], output[0]) + validate_checksum(output[0], input[0]) + + +if config["enable"]["retrieve"]: + + # Downloading LUISA Base Map for land cover and land use: + # Website: https://ec.europa.eu/jrc/en/luisa + rule retrieve_luisa_land_cover: + input: + HTTP.remote( + "jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/LUISA/EUROPE/Basemaps/LandUse/2018/LATEST/LUISA_basemap_020321_50m.tif", + static=True, + ), + output: + "data/LUISA_basemap_020321_50m.tif", + run: + move(input[0], output[0]) if config["enable"]["retrieve"]: @@ -288,7 +295,7 @@ if config["enable"]["retrieve"]: layer_path = ( f"/vsizip/{params.folder}/WDPA_{bYYYY}_Public_shp_{i}.zip" ) - print(f"Adding layer {i+1} of 3 to combined output file.") + print(f"Adding layer {i + 1} of 3 to combined output file.") shell("ogr2ogr -f gpkg -update -append {output.gpkg} {layer_path}") rule download_wdpa_marine: @@ -311,7 +318,7 @@ if config["enable"]["retrieve"]: for i in range(3): # vsizip is special driver for directly working with zipped shapefiles in ogr2ogr layer_path = f"/vsizip/{params.folder}/WDPA_WDOECM_{bYYYY}_Public_marine_shp_{i}.zip" - print(f"Adding layer {i+1} of 3 to combined output file.") + print(f"Adding layer {i + 1} of 3 to combined output file.") shell("ogr2ogr -f gpkg -update -append {output.gpkg} {layer_path}") @@ -327,7 +334,7 @@ if config["enable"]["retrieve"]: output: "data/validation/emission-spot-primary-market-auction-report-2019-data.xls", log: - LOGS + "retrieve_monthly_co2_prices.log", + "logs/retrieve_monthly_co2_prices.log", resources: mem_mb=5000, retries: 2 @@ -341,11 +348,11 @@ if config["enable"]["retrieve"]: output: "data/validation/energy-price-trends-xlsx-5619002.xlsx", log: - LOGS + "retrieve_monthly_fuel_prices.log", + "logs/retrieve_monthly_fuel_prices.log", resources: mem_mb=5000, retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_monthly_fuel_prices.py" diff --git a/rules/solve_electricity.smk b/rules/solve_electricity.smk index c396ebd5..d0cab34c 100644 --- a/rules/solve_electricity.smk +++ b/rules/solve_electricity.smk @@ -1,33 +1,35 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule solve_network: params: - solving=config["solving"], - foresight=config["foresight"], - planning_horizons=config["scenario"]["planning_horizons"], - co2_sequestration_potential=config["sector"].get( - "co2_sequestration_potential", 200 + solving=config_provider("solving"), + foresight=config_provider("foresight"), + planning_horizons=config_provider("scenario", "planning_horizons"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 ), + custom_extra_functionality=input_custom_extra_functionality, input: - network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), config=RESULTS + "config.yaml", output: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", log: solver=normpath( - LOGS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" + RESULTS + + "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" ), - python=LOGS - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + python=RESULTS + + "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", benchmark: - BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" - threads: 4 + RESULTS + "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + threads: solver_threads resources: mem_mb=memory, - walltime=config["solving"].get("walltime", "12:00:00"), + walltime=config_provider("solving", "walltime", default="12:00:00"), shadow: "minimal" conda: @@ -38,27 +40,27 @@ rule solve_network: rule solve_operations_network: params: - options=config["solving"]["options"], + options=config_provider("solving", "options"), input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", output: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc", log: solver=normpath( - LOGS - + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log" + RESULTS + + "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log" ), - python=LOGS - + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", + python=RESULTS + + "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", benchmark: ( - BENCHMARKS - + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + RESULTS + + "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" ) threads: 4 resources: mem_mb=(lambda w: 10000 + 372 * int(w.clusters)), - walltime=config["solving"].get("walltime", "12:00:00"), + walltime=config_provider("solving", "walltime", default="12:00:00"), shadow: "minimal" conda: diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 8a93d24a..a081fbe6 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -1,25 +1,31 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-4 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule add_existing_baseyear: params: - baseyear=config["scenario"]["planning_horizons"][0], - sector=config["sector"], - existing_capacities=config["existing_capacities"], - costs=config["costs"], + baseyear=config_provider("scenario", "planning_horizons", 0), + sector=config_provider("sector"), + existing_capacities=config_provider("existing_capacities"), + costs=config_provider("costs"), input: network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - powerplants=RESOURCES + "powerplants.csv", - busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", - busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + powerplants=resources("powerplants.csv"), + busmap_s=resources("busmap_elec_s{simpl}.csv"), + busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + costs=lambda w: resources( + "costs_{}.csv".format( + config_provider("scenario", "planning_horizons", 0)(w) + ) + ), + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), + existing_heating_distribution=resources( + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", @@ -27,17 +33,20 @@ rule add_existing_baseyear: RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", wildcard_constraints: + # TODO: The first planning_horizon needs to be aligned across scenarios + # snakemake does not support passing functions to wildcard_constraints + # reference: https://github.com/snakemake/snakemake/issues/2703 planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear threads: 1 resources: mem_mb=2000, log: - LOGS - + "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + RESULTS + + "logs/add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", benchmark: ( - BENCHMARKS - + "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" @@ -45,18 +54,33 @@ rule add_existing_baseyear: "../scripts/add_existing_baseyear.py" +def input_profile_tech_brownfield(w): + return { + f"profile_{tech}": resources(f"profile_{tech}.nc") + for tech in config_provider("electricity", "renewable_carriers")(w) + if tech != "hydro" + } + + rule add_brownfield: params: - H2_retrofit=config["sector"]["H2_retrofit"], - H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"], - threshold_capacity=config["existing_capacities"]["threshold_capacity"], + H2_retrofit=config_provider("sector", "H2_retrofit"), + H2_retrofit_capacity_per_CH4=config_provider( + "sector", "H2_retrofit_capacity_per_CH4" + ), + threshold_capacity=config_provider("existing_capacities", " threshold_capacity"), + snapshots=config_provider("snapshots"), + carriers=config_provider("electricity", "renewable_carriers"), input: + unpack(input_profile_tech_brownfield), + simplify_busmap=resources("busmap_elec_s{simpl}.csv"), + cluster_busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", network_p=solved_previous_horizon, #solved network at previous time step - costs="data/costs_{planning_horizons}.csv", - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + costs=resources("costs_{planning_horizons}.csv"), + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), output: RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -64,12 +88,12 @@ rule add_brownfield: resources: mem_mb=10000, log: - LOGS - + "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + RESULTS + + "logs/add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", benchmark: ( - BENCHMARKS - + "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" @@ -82,16 +106,17 @@ ruleorder: add_existing_baseyear > add_brownfield rule solve_sector_network_myopic: params: - solving=config["solving"], - foresight=config["foresight"], - planning_horizons=config["scenario"]["planning_horizons"], - co2_sequestration_potential=config["sector"].get( - "co2_sequestration_potential", 200 + solving=config_provider("solving"), + foresight=config_provider("foresight"), + planning_horizons=config_provider("scenario", "planning_horizons"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 ), + custom_extra_functionality=input_custom_extra_functionality, input: network=RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - costs="data/costs_{planning_horizons}.csv", + costs=resources("costs_{planning_horizons}.csv"), config=RESULTS + "config.yaml", output: RESULTS @@ -99,18 +124,20 @@ rule solve_sector_network_myopic: shadow: "shallow" log: - solver=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: 4 + solver=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + memory=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", + python=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + threads: solver_threads resources: - mem_mb=config["solving"]["mem"], - walltime=config["solving"].get("walltime", "12:00:00"), + mem_mb=config_provider("solving", "mem"), + walltime=config_provider("solving", "walltime", default="12:00:00"), benchmark: ( - BENCHMARKS - + "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + RESULTS + + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" diff --git a/rules/solve_overnight.smk b/rules/solve_overnight.smk index c7700760..b212d453 100644 --- a/rules/solve_overnight.smk +++ b/rules/solve_overnight.smk @@ -1,16 +1,17 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule solve_sector_network: params: - solving=config["solving"], - foresight=config["foresight"], - planning_horizons=config["scenario"]["planning_horizons"], - co2_sequestration_potential=config["sector"].get( - "co2_sequestration_potential", 200 + solving=config_provider("solving"), + foresight=config_provider("foresight"), + planning_horizons=config_provider("scenario", "planning_horizons"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 ), + custom_extra_functionality=input_custom_extra_functionality, input: network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -21,19 +22,20 @@ rule solve_sector_network: shadow: "shallow" log: - solver=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: config["solving"]["solver"].get("threads", 4) + solver=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + memory=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", + python=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + threads: solver_threads resources: - mem_mb=config["solving"]["mem"], - walltime=config["solving"].get("walltime", "12:00:00"), + mem_mb=config_provider("solving", "mem"), + walltime=config_provider("solving", "walltime", default="12:00:00"), benchmark: ( RESULTS - + BENCHMARKS - + "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk index ef4e367d..a565d978 100644 --- a/rules/solve_perfect.smk +++ b/rules/solve_perfect.smk @@ -1,22 +1,29 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT rule add_existing_baseyear: params: - baseyear=config["scenario"]["planning_horizons"][0], - sector=config["sector"], - existing_capacities=config["existing_capacities"], - costs=config["costs"], + baseyear=config_provider("scenario", "planning_horizons", 0), + sector=config_provider("sector"), + existing_capacities=config_provider("existing_capacities"), + costs=config_provider("costs"), input: network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - powerplants=RESOURCES + "powerplants.csv", - busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", - busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", - clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + powerplants=resources("powerplants.csv"), + busmap_s=resources("busmap_elec_s{simpl}.csv"), + busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), + clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), + costs=lambda w: resources( + "costs_{}.csv".format( + config_provider("scenario", "planning_horizons", 0)(w) + ) + ), + cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), + cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), + existing_heating_distribution=resources( + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv" + ), existing_heating="data/existing_infrastructure/existing_heating_raw.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", @@ -30,12 +37,12 @@ rule add_existing_baseyear: resources: mem_mb=2000, log: - LOGS - + "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + logs( + "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" + ), benchmark: - ( - BENCHMARKS - + "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + benchmarks( + "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" @@ -43,51 +50,28 @@ rule add_existing_baseyear: "../scripts/add_existing_baseyear.py" -rule add_brownfield: - params: - H2_retrofit=config["sector"]["H2_retrofit"], - H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"], - threshold_capacity=config["existing_capacities"]["threshold_capacity"], - input: - network=RESULTS - + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - network_p=solved_previous_horizon, #solved network at previous time step - costs="data/costs_{planning_horizons}.csv", - cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - output: - RESULTS - + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - threads: 4 - resources: - mem_mb=10000, - log: - LOGS - + "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", - benchmark: - ( - BENCHMARKS - + "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" - ) - conda: - "../envs/environment.yaml" - script: - "../scripts/add_brownfield.py" +def input_network_year(w): + return { + f"network_{year}": RESULTS + + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}" + + f"_{year}.nc" + for year in config_provider("scenario", "planning_horizons")(w)[1:] + } rule prepare_perfect_foresight: + params: + costs=config_provider("costs"), + time_resolution=config_provider("clustering", "temporal", "sector"), input: - **{ - f"network_{year}": RESULTS - + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" - + f"{year}.nc" - for year in config["scenario"]["planning_horizons"][1:] - }, + unpack(input_network_year), brownfield_network=lambda w: ( RESULTS + "prenetworks-brownfield/" + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" - + "{}.nc".format(str(config["scenario"]["planning_horizons"][0])) + + "{}.nc".format( + str(config_provider("scenario", "planning_horizons", 0)(w)) + ) ), output: RESULTS @@ -96,12 +80,12 @@ rule prepare_perfect_foresight: resources: mem_mb=10000, log: - LOGS - + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log", + logs( + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log" + ), benchmark: - ( - BENCHMARKS - + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}" + benchmarks( + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}" ) conda: "../envs/environment.yaml" @@ -111,24 +95,25 @@ rule prepare_perfect_foresight: rule solve_sector_network_perfect: params: - solving=config["solving"], - foresight=config["foresight"], - sector=config["sector"], - planning_horizons=config["scenario"]["planning_horizons"], - co2_sequestration_potential=config["sector"].get( - "co2_sequestration_potential", 200 + solving=config_provider("solving"), + foresight=config_provider("foresight"), + sector=config_provider("sector"), + planning_horizons=config_provider("scenario", "planning_horizons"), + co2_sequestration_potential=config_provider( + "sector", "co2_sequestration_potential", default=200 ), + custom_extra_functionality=input_custom_extra_functionality, input: network=RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - costs="data/costs_2030.csv", + costs=resources("costs_2030.csv"), config=RESULTS + "config.yaml", output: RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - threads: 4 + threads: solver_threads resources: - mem_mb=config["solving"]["mem"], + mem_mb=config_provider("solving", "mem"), shadow: "shallow" log: @@ -140,8 +125,8 @@ rule solve_sector_network_perfect: + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log", benchmark: ( - BENCHMARKS - + "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}" + RESULTS + + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}" ) conda: "../envs/environment.yaml" @@ -149,18 +134,22 @@ rule solve_sector_network_perfect: "../scripts/solve_network.py" +def input_networks_make_summary_perfect(w): + return { + f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS + + f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc" + for simpl in config_provider("scenario", "simpl")(w) + for clusters in config_provider("scenario", "clusters")(w) + for opts in config_provider("scenario", "opts")(w) + for sector_opts in config_provider("scenario", "sector_opts")(w) + for ll in config_provider("scenario", "ll")(w) + } + + rule make_summary_perfect: input: - **{ - f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS - + f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc" - for simpl in config["scenario"]["simpl"] - for clusters in config["scenario"]["clusters"] - for opts in config["scenario"]["opts"] - for sector_opts in config["scenario"]["sector_opts"] - for ll in config["scenario"]["ll"] - }, - costs="data/costs_2020.csv", + unpack(input_networks_make_summary_perfect), + costs=resources("costs_2020.csv"), output: nodal_costs=RESULTS + "csvs/nodal_costs.csv", nodal_capacities=RESULTS + "csvs/nodal_capacities.csv", @@ -182,13 +171,10 @@ rule make_summary_perfect: resources: mem_mb=10000, log: - LOGS + "make_summary_perfect.log", + logs("make_summary_perfect.log"), benchmark: - (BENCHMARKS + "make_summary_perfect") + benchmarks("make_summary_perfect") conda: "../envs/environment.yaml" script: "../scripts/make_summary_perfect.py" - - -ruleorder: add_existing_baseyear > add_brownfield diff --git a/rules/validate.smk b/rules/validate.smk index cfb8c959..91fe6e91 100644 --- a/rules/validate.smk +++ b/rules/validate.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -17,12 +17,12 @@ rule build_electricity_production: The data is used for validation of the optimization results. """ params: - snapshots=config["snapshots"], - countries=config["countries"], + snapshots=config_provider("snapshots"), + countries=config_provider("countries"), output: - RESOURCES + "historical_electricity_production.csv", + resources("historical_electricity_production.csv"), log: - LOGS + "build_electricity_production.log", + logs("build_electricity_production.log"), resources: mem_mb=5000, script: @@ -35,14 +35,14 @@ rule build_cross_border_flows: The data is used for validation of the optimization results. """ params: - snapshots=config["snapshots"], - countries=config["countries"], + snapshots=config_provider("snapshots"), + countries=config_provider("countries"), input: - network=RESOURCES + "networks/base.nc", + network=resources("networks/base.nc"), output: - RESOURCES + "historical_cross_border_flows.csv", + resources("historical_cross_border_flows.csv"), log: - LOGS + "build_cross_border_flows.log", + logs("build_cross_border_flows.log"), resources: mem_mb=5000, script: @@ -55,12 +55,12 @@ rule build_electricity_prices: The data is used for validation of the optimization results. """ params: - snapshots=config["snapshots"], - countries=config["countries"], + snapshots=config_provider("snapshots"), + countries=config_provider("countries"), output: - RESOURCES + "historical_electricity_prices.csv", + resources("historical_electricity_prices.csv"), log: - LOGS + "build_electricity_prices.log", + logs("build_electricity_prices.log"), resources: mem_mb=5000, script: @@ -70,7 +70,7 @@ rule build_electricity_prices: rule plot_validation_electricity_production: input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - electricity_production=RESOURCES + "historical_electricity_production.csv", + electricity_production=resources("historical_electricity_production.csv"), output: **{ plot: RESULTS @@ -85,10 +85,10 @@ rule plot_validation_electricity_production: rule plot_validation_cross_border_flows: params: - countries=config["countries"], + countries=config_provider("countries"), input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - cross_border_flows=RESOURCES + "historical_cross_border_flows.csv", + cross_border_flows=resources("historical_cross_border_flows.csv"), output: **{ plot: RESULTS @@ -104,7 +104,7 @@ rule plot_validation_cross_border_flows: rule plot_validation_electricity_prices: input: network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - electricity_prices=RESOURCES + "historical_electricity_prices.csv", + electricity_prices=resources("historical_electricity_prices.csv"), output: **{ plot: RESULTS diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..fc781c2f --- /dev/null +++ b/scripts/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT diff --git a/scripts/_benchmark.py b/scripts/_benchmark.py index 4e3413e9..58fc3d39 100644 --- a/scripts/_benchmark.py +++ b/scripts/_benchmark.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -13,15 +13,15 @@ import os import sys import time +from memory_profiler import _get_memory, choose_backend + logger = logging.getLogger(__name__) # TODO: provide alternative when multiprocessing is not available try: from multiprocessing import Pipe, Process except ImportError: - from multiprocessing.dummy import Process, Pipe - -from memory_profiler import _get_memory, choose_backend + from multiprocessing.dummy import Pipe, Process # The memory logging facilities have been adapted from memory_profiler diff --git a/scripts/_helpers.py b/scripts/_helpers.py index c3066965..a1504c3c 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -1,19 +1,23 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT import contextlib +import copy +import hashlib import logging import os +import re import urllib +from functools import partial from pathlib import Path import pandas as pd import pytz +import requests import yaml -from pypsa.components import component_attrs, components -from pypsa.descriptors import Dict +from snakemake.utils import update_config from tqdm import tqdm logger = logging.getLogger(__name__) @@ -21,6 +25,106 @@ logger = logging.getLogger(__name__) REGION_COLS = ["geometry", "name", "x", "y", "country"] +def get_run_path(fn, dir, rdir, shared_resources): + """ + Dynamically provide paths based on shared resources and filename. + + Use this function for snakemake rule inputs or outputs that should be + optionally shared across runs or created individually for each run. + + Parameters + ---------- + fn : str + The filename for the path to be generated. + dir : str + The base directory. + rdir : str + Relative directory for non-shared resources. + shared_resources : str or bool + Specifies which resources should be shared. + - If string is "base", special handling for shared "base" resources (see notes). + - If random string other than "base", this folder is used instead of the `rdir` keyword. + - If boolean, directly specifies if the resource is shared. + + Returns + ------- + str + Full path where the resource should be stored. + + Notes + ----- + Special case for "base" allows no wildcards other than "technology", "year" + and "scope" and excludes filenames starting with "networks/elec" or + "add_electricity". All other resources are shared. + """ + if shared_resources == "base": + pattern = r"\{([^{}]+)\}" + existing_wildcards = set(re.findall(pattern, fn)) + irrelevant_wildcards = {"technology", "year", "scope"} + no_relevant_wildcards = not existing_wildcards - irrelevant_wildcards + no_elec_rule = not fn.startswith("networks/elec") and not fn.startswith( + "add_electricity" + ) + is_shared = no_relevant_wildcards and no_elec_rule + elif isinstance(shared_resources, str): + rdir = shared_resources + "/" + is_shared = True + elif isinstance(shared_resources, bool): + is_shared = shared_resources + else: + raise ValueError( + "shared_resources must be a boolean, str, or 'base' for special handling." + ) + + if is_shared: + return f"{dir}{fn}" + else: + return f"{dir}{rdir}{fn}" + + +def path_provider(dir, rdir, shared_resources): + """ + Returns a partial function that dynamically provides paths based on shared + resources and the filename. + + Returns + ------- + partial function + A partial function that takes a filename as input and + returns the path to the file based on the shared_resources parameter. + """ + return partial(get_run_path, dir=dir, rdir=rdir, shared_resources=shared_resources) + + +def get_opt(opts, expr, flags=None): + """ + Return the first option matching the regular expression. + + The regular expression is case-insensitive by default. + """ + if flags is None: + flags = re.IGNORECASE + for o in opts: + match = re.match(expr, o, flags=flags) + if match: + return match.group(0) + return None + + +def find_opt(opts, expr): + """ + Return if available the float after the expression. + """ + for o in opts: + if expr in o: + m = re.findall(r"m?\d+(?:[\.p]\d+)?", o) + if len(m) > 0: + return True, float(m[-1].replace("p", ".").replace("m", "-")) + else: + return True, None + return False, None + + # Define a context manager to temporarily mute print statements @contextlib.contextmanager def mute_print(): @@ -29,6 +133,21 @@ def mute_print(): yield +def set_scenario_config(snakemake): + scenario = snakemake.config["run"].get("scenarios", {}) + if scenario.get("enable") and "run" in snakemake.wildcards.keys(): + try: + with open(scenario["file"], "r") as f: + scenario_config = yaml.safe_load(f) + except FileNotFoundError: + # fallback for mock_snakemake + script_dir = Path(__file__).parent.resolve() + root_dir = script_dir.parent + with open(root_dir / scenario["file"], "r") as f: + scenario_config = yaml.safe_load(f) + update_config(snakemake.config, scenario_config[snakemake.wildcards.run]) + + def configure_logging(snakemake, skip_handlers=False): """ Configure the basic behaviour for the logging module. @@ -48,6 +167,7 @@ def configure_logging(snakemake, skip_handlers=False): Do (not) skip the default handlers created for redirecting output to STDERR and file. """ import logging + import sys kwargs = snakemake.config.get("logging", dict()).copy() kwargs.setdefault("level", "INFO") @@ -71,6 +191,16 @@ def configure_logging(snakemake, skip_handlers=False): ) logging.basicConfig(**kwargs) + # Setup a function to handle uncaught exceptions and include them with their stacktrace into logfiles + def handle_exception(exc_type, exc_value, exc_traceback): + # Log the exception + logger = logging.getLogger() + logger.error( + "Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback) + ) + + sys.excepthook = handle_exception + def update_p_nom_max(n): # if extendable carriers (solar/onwind/...) have capacity >= 0, @@ -191,7 +321,13 @@ def progress_retrieve(url, file, disable=False): urllib.request.urlretrieve(url, file, reporthook=update_to) -def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): +def mock_snakemake( + rulename, + root_dir=None, + configfiles=None, + submodule_dir="workflow/submodules/pypsa-eur", + **wildcards, +): """ This function is expected to be executed from the 'scripts'-directory of ' the snakemake project. It returns a snakemake.script.Snakemake object, @@ -207,6 +343,9 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): path to the root directory of the snakemake project configfiles: list, str list of configfiles to be used to update the config + submodule_dir: str, Path + in case PyPSA-Eur is used as a submodule, submodule_dir is + the path of pypsa-eur relative to the project directory. **wildcards: keyword arguments fixing the wildcards. Only necessary if wildcards are needed. @@ -233,7 +372,10 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): root_dir = Path(root_dir).resolve() user_in_script_dir = Path.cwd().resolve() == script_dir - if user_in_script_dir: + if str(submodule_dir) in __file__: + # the submodule_dir path is only need to locate the project dir + os.chdir(Path(__file__[: __file__.find(str(submodule_dir))])) + elif user_in_script_dir: os.chdir(root_dir) elif Path.cwd().resolve() != root_dir: raise RuntimeError( @@ -245,7 +387,9 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): if os.path.exists(p): snakefile = p break - if isinstance(configfiles, str): + if configfiles is None: + configfiles = [] + elif isinstance(configfiles, str): configfiles = [configfiles] resource_settings = ResourceSettings() @@ -277,7 +421,7 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): def make_accessable(*ios): for io in ios: - for i in range(len(io)): + for i, _ in enumerate(io): io[i] = os.path.abspath(io[i]) make_accessable(job.input, job.output, job.log) @@ -324,14 +468,259 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None): return week_df -def parse(l): - return yaml.safe_load(l[0]) if len(l) == 1 else {l.pop(0): parse(l)} +def parse(infix): + """ + Recursively parse a chained wildcard expression into a dictionary or a YAML + object. + + Parameters + ---------- + list_to_parse : list + The list to parse. + + Returns + ------- + dict or YAML object + The parsed list. + """ + if len(infix) == 1: + return yaml.safe_load(infix[0]) + else: + return {infix.pop(0): parse(infix)} -def update_config_with_sector_opts(config, sector_opts): - from snakemake.utils import update_config +def update_config_from_wildcards(config, w, inplace=True): + """ + Parses configuration settings from wildcards and updates the config. + """ - for o in sector_opts.split("-"): - if o.startswith("CF+"): - l = o.split("+")[1:] - update_config(config, parse(l)) + if not inplace: + config = copy.deepcopy(config) + + if w.get("opts"): + opts = w.opts.split("-") + + if nhours := get_opt(opts, r"^\d+(h|seg)$"): + config["clustering"]["temporal"]["resolution_elec"] = nhours + + co2l_enable, co2l_value = find_opt(opts, "Co2L") + if co2l_enable: + config["electricity"]["co2limit_enable"] = True + if co2l_value is not None: + config["electricity"]["co2limit"] = ( + co2l_value * config["electricity"]["co2base"] + ) + + gasl_enable, gasl_value = find_opt(opts, "CH4L") + if gasl_enable: + config["electricity"]["gaslimit_enable"] = True + if gasl_value is not None: + config["electricity"]["gaslimit"] = gasl_value * 1e6 + + if "Ept" in opts: + config["costs"]["emission_prices"]["co2_monthly_prices"] = True + + ep_enable, ep_value = find_opt(opts, "Ep") + if ep_enable: + config["costs"]["emission_prices"]["enable"] = True + if ep_value is not None: + config["costs"]["emission_prices"]["co2"] = ep_value + + if "ATK" in opts: + config["autarky"]["enable"] = True + if "ATKc" in opts: + config["autarky"]["by_country"] = True + + attr_lookup = { + "p": "p_nom_max", + "e": "e_nom_max", + "c": "capital_cost", + "m": "marginal_cost", + } + for o in opts: + flags = ["+e", "+p", "+m", "+c"] + if all(flag not in o for flag in flags): + continue + carrier, attr_factor = o.split("+") + attr = attr_lookup[attr_factor[0]] + factor = float(attr_factor[1:]) + if not isinstance(config["adjustments"]["electricity"], dict): + config["adjustments"]["electricity"] = dict() + update_config( + config["adjustments"]["electricity"], {attr: {carrier: factor}} + ) + + if w.get("sector_opts"): + opts = w.sector_opts.split("-") + + if "T" in opts: + config["sector"]["transport"] = True + + if "H" in opts: + config["sector"]["heating"] = True + + if "B" in opts: + config["sector"]["biomass"] = True + + if "I" in opts: + config["sector"]["industry"] = True + + if "A" in opts: + config["sector"]["agriculture"] = True + + if "CCL" in opts: + config["solving"]["constraints"]["CCL"] = True + + eq_value = get_opt(opts, r"^EQ+\d*\.?\d+(c|)") + for o in opts: + if eq_value is not None: + config["solving"]["constraints"]["EQ"] = eq_value + elif "EQ" in o: + config["solving"]["constraints"]["EQ"] = True + break + + if "BAU" in opts: + config["solving"]["constraints"]["BAU"] = True + + if "SAFE" in opts: + config["solving"]["constraints"]["SAFE"] = True + + if nhours := get_opt(opts, r"^\d+(h|sn|seg)$"): + config["clustering"]["temporal"]["resolution_sector"] = nhours + + if "decentral" in opts: + config["sector"]["electricity_transmission_grid"] = False + + if "noH2network" in opts: + config["sector"]["H2_network"] = False + + if "nowasteheat" in opts: + config["sector"]["use_fischer_tropsch_waste_heat"] = False + config["sector"]["use_methanolisation_waste_heat"] = False + config["sector"]["use_haber_bosch_waste_heat"] = False + config["sector"]["use_methanation_waste_heat"] = False + config["sector"]["use_fuel_cell_waste_heat"] = False + config["sector"]["use_electrolysis_waste_heat"] = False + + if "nodistrict" in opts: + config["sector"]["district_heating"]["progress"] = 0.0 + + dg_enable, dg_factor = find_opt(opts, "dist") + if dg_enable: + config["sector"]["electricity_distribution_grid"] = True + if dg_factor is not None: + config["sector"][ + "electricity_distribution_grid_cost_factor" + ] = dg_factor + + if "biomasstransport" in opts: + config["sector"]["biomass_transport"] = True + + _, maxext = find_opt(opts, "linemaxext") + if maxext is not None: + config["lines"]["max_extension"] = maxext * 1e3 + config["links"]["max_extension"] = maxext * 1e3 + + _, co2l_value = find_opt(opts, "Co2L") + if co2l_value is not None: + config["co2_budget"] = float(co2l_value) + + if co2_distribution := get_opt(opts, r"^(cb)\d+(\.\d+)?(ex|be)$"): + config["co2_budget"] = co2_distribution + + if co2_budget := get_opt(opts, r"^(cb)\d+(\.\d+)?$"): + config["co2_budget"] = float(co2_budget[2:]) + + attr_lookup = { + "p": "p_nom_max", + "e": "e_nom_max", + "c": "capital_cost", + "m": "marginal_cost", + } + for o in opts: + flags = ["+e", "+p", "+m", "+c"] + if all(flag not in o for flag in flags): + continue + carrier, attr_factor = o.split("+") + attr = attr_lookup[attr_factor[0]] + factor = float(attr_factor[1:]) + if not isinstance(config["adjustments"]["sector"], dict): + config["adjustments"]["sector"] = dict() + update_config(config["adjustments"]["sector"], {attr: {carrier: factor}}) + + _, sdr_value = find_opt(opts, "sdr") + if sdr_value is not None: + config["costs"]["social_discountrate"] = sdr_value / 100 + + _, seq_limit = find_opt(opts, "seq") + if seq_limit is not None: + config["sector"]["co2_sequestration_potential"] = seq_limit + + # any config option can be represented in wildcard + for o in opts: + if o.startswith("CF+"): + infix = o.split("+")[1:] + update_config(config, parse(infix)) + + if not inplace: + return config + + +def get_checksum_from_zenodo(file_url): + parts = file_url.split("/") + record_id = parts[parts.index("record") + 1] + filename = parts[-1] + + response = requests.get(f"https://zenodo.org/api/records/{record_id}", timeout=30) + response.raise_for_status() + data = response.json() + + for file in data["files"]: + if file["key"] == filename: + return file["checksum"] + return None + + +def validate_checksum(file_path, zenodo_url=None, checksum=None): + """ + Validate file checksum against provided or Zenodo-retrieved checksum. + Calculates the hash of a file using 64KB chunks. Compares it against a + given checksum or one from a Zenodo URL. + + Parameters + ---------- + file_path : str + Path to the file for checksum validation. + zenodo_url : str, optional + URL of the file on Zenodo to fetch the checksum. + checksum : str, optional + Checksum (format 'hash_type:checksum_value') for validation. + + Raises + ------ + AssertionError + If the checksum does not match, or if neither `checksum` nor `zenodo_url` is provided. + + + Examples + -------- + >>> validate_checksum("/path/to/file", checksum="md5:abc123...") + >>> validate_checksum( + ... "/path/to/file", + ... zenodo_url="https://zenodo.org/record/12345/files/example.txt", + ... ) + + If the checksum is invalid, an AssertionError will be raised. + """ + assert checksum or zenodo_url, "Either checksum or zenodo_url must be provided" + if zenodo_url: + checksum = get_checksum_from_zenodo(zenodo_url) + hash_type, checksum = checksum.split(":") + hasher = hashlib.new(hash_type) + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(65536), b""): # 64kb chunks + hasher.update(chunk) + calculated_checksum = hasher.hexdigest() + assert ( + calculated_checksum == checksum + ), "Checksum is invalid. This may be due to an incomplete download. Delete the file and re-execute the rule." diff --git a/scripts/add_brownfield.py b/scripts/add_brownfield.py index 74102580..1e175d87 100644 --- a/scripts/add_brownfield.py +++ b/scripts/add_brownfield.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,16 +8,20 @@ Prepares brownfield data from previous planning horizon. import logging -logger = logging.getLogger(__name__) - -import pandas as pd - -idx = pd.IndexSlice - import numpy as np +import pandas as pd import pypsa -from _helpers import update_config_with_sector_opts +import xarray as xr +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from add_existing_baseyear import add_build_year_to_new_assets +from pypsa.clustering.spatial import normed_or_uniform + +logger = logging.getLogger(__name__) +idx = pd.IndexSlice def add_brownfield(n, n_p, year): @@ -120,6 +124,82 @@ def add_brownfield(n, n_p, year): n.links.loc[new_pipes, "p_nom_min"] = 0.0 +def disable_grid_expansion_if_LV_limit_hit(n): + if "lv_limit" not in n.global_constraints.index: + return + + total_expansion = ( + n.lines.eval("s_nom_min * length").sum() + + n.links.query("carrier == 'DC'").eval("p_nom_min * length").sum() + ).sum() + + lv_limit = n.global_constraints.at["lv_limit", "constant"] + + # allow small numerical differences + if lv_limit - total_expansion < 1: + logger.info("LV is already reached, disabling expansion and LV limit") + extendable_acs = n.lines.query("s_nom_extendable").index + n.lines.loc[extendable_acs, "s_nom_extendable"] = False + n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[extendable_acs, "s_nom_min"] + + extendable_dcs = n.links.query("carrier == 'DC' and p_nom_extendable").index + n.links.loc[extendable_dcs, "p_nom_extendable"] = False + n.links.loc[extendable_dcs, "p_nom"] = n.links.loc[extendable_dcs, "p_nom_min"] + + n.global_constraints.drop("lv_limit", inplace=True) + + +def adjust_renewable_profiles(n, input_profiles, params, year): + """ + Adjusts renewable profiles according to the renewable technology specified, + using the latest year below or equal to the selected year. + """ + + # spatial clustering + cluster_busmap = pd.read_csv(snakemake.input.cluster_busmap, index_col=0).squeeze() + simplify_busmap = pd.read_csv( + snakemake.input.simplify_busmap, index_col=0 + ).squeeze() + clustermaps = simplify_busmap.map(cluster_busmap) + clustermaps.index = clustermaps.index.astype(str) + + # temporal clustering + dr = pd.date_range(**params["snapshots"], freq="h") + snapshotmaps = ( + pd.Series(dr, index=dr).where(lambda x: x.isin(n.snapshots), pd.NA).ffill() + ) + + for carrier in params["carriers"]: + if carrier == "hydro": + continue + with xr.open_dataset(getattr(input_profiles, "profile_" + carrier)) as ds: + if ds.indexes["bus"].empty or "year" not in ds.indexes: + continue + + closest_year = max( + (y for y in ds.year.values if y <= year), default=min(ds.year.values) + ) + + p_max_pu = ( + ds["profile"] + .sel(year=closest_year) + .transpose("time", "bus") + .to_pandas() + ) + + # spatial clustering + weight = ds["weight"].sel(year=closest_year).to_pandas() + weight = weight.groupby(clustermaps).transform(normed_or_uniform) + p_max_pu = (p_max_pu * weight).T.groupby(clustermaps).sum().T + p_max_pu.columns = p_max_pu.columns + f" {carrier}" + + # temporal_clustering + p_max_pu = p_max_pu.groupby(snapshotmaps).mean() + + # replace renewable time series + n.generators_t.p_max_pu.loc[:, p_max_pu.columns] = p_max_pu + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -130,13 +210,14 @@ if __name__ == "__main__": clusters="37", opts="", ll="v1.0", - sector_opts="168H-T-H-B-I-solar+p3-dist1", + sector_opts="168H-T-H-B-I-dist1", planning_horizons=2030, ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}") @@ -144,11 +225,15 @@ if __name__ == "__main__": n = pypsa.Network(snakemake.input.network) + adjust_renewable_profiles(n, snakemake.input, snakemake.params, year) + add_build_year_to_new_assets(n, year) n_p = pypsa.Network(snakemake.input.network_p) add_brownfield(n, n_p, year) + disable_grid_expansion_if_LV_limit_hit(n) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index e626f456..a799caec 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -52,7 +52,7 @@ Inputs :scale: 34 % - ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used! -- ``resources/load.csv`` Hourly per-country load profiles. +- ``resources/electricity_demand.csv`` Hourly per-country electricity demand profiles. - ``resources/regions_onshore.geojson``: confer :ref:`busregions` - ``resources/nuts3_shapes.geojson``: confer :ref:`shapes` - ``resources/powerplants.csv``: confer :ref:`powerplants` @@ -93,7 +93,7 @@ import powerplantmatching as pm import pypsa import scipy.sparse as sparse import xarray as xr -from _helpers import configure_logging, update_p_nom_max +from _helpers import configure_logging, set_scenario_config, update_p_nom_max from powerplantmatching.export import map_country_bus from shapely.prepared import prep @@ -178,6 +178,15 @@ def sanitize_carriers(n, config): n.carriers["color"] = n.carriers.color.where(n.carriers.color != "", colors) +def sanitize_locations(n): + n.buses["x"] = n.buses.x.where(n.buses.x != 0, n.buses.location.map(n.buses.x)) + n.buses["y"] = n.buses.y.where(n.buses.y != 0, n.buses.location.map(n.buses.y)) + n.buses["country"] = n.buses.country.where( + n.buses.country.ne("") & n.buses.country.notnull(), + n.buses.location.map(n.buses.country), + ) + + def add_co2_emissions(n, costs, carriers): """ Add CO2 emissions to the network's carriers attribute. @@ -288,16 +297,16 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1. ua_md_gdp = pd.read_csv(ua_md_gdp, dtype={"name": "str"}).set_index("name") - logger.info(f"Load data scaled with scalling factor {scaling}.") + logger.info(f"Load data scaled by factor {scaling}.") opsd_load *= scaling nuts3 = gpd.read_file(nuts3_shapes).set_index("index") def upsample(cntry, group): - l = opsd_load[cntry] + load = opsd_load[cntry] if len(group) == 1: - return pd.DataFrame({group.index[0]: l}) + return pd.DataFrame({group.index[0]: load}) nuts3_cntry = nuts3.loc[nuts3.country == cntry] transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr() gdp_n = pd.Series( @@ -314,8 +323,8 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1. # overwrite factor because nuts3 provides no data for UA+MD factors = normed(ua_md_gdp.loc[group.index, "GDP_PPP"].squeeze()) return pd.DataFrame( - factors.values * l.values[:, np.newaxis], - index=l.index, + factors.values * load.values[:, np.newaxis], + index=load.index, columns=factors.index, ) @@ -327,7 +336,9 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1. axis=1, ) - n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) + n.madd( + "Load", substation_lv_i, bus=substation_lv_i, p_set=load + ) # carrier="electricity" def update_transmission_costs(n, costs, length_factor=1.0): @@ -374,6 +385,10 @@ def attach_wind_and_solar( if ds.indexes["bus"].empty: continue + # if-statement for compatibility with old profiles + if "year" in ds.indexes: + ds = ds.sel(year=ds.year.min(), drop=True) + supcar = car.split("-", 2)[0] if supcar == "offwind": underwater_fraction = ds["underwater_fraction"].to_pandas() @@ -504,8 +519,8 @@ def attach_conventional_generators( snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0 ).iloc[:, 0] bus_values = n.buses.country.map(values) - n.generators[attr].update( - n.generators.loc[idx].bus.map(bus_values).dropna() + n.generators.update( + {attr: n.generators.loc[idx].bus.map(bus_values).dropna()} ) else: # Single value affecting all generators of technology k indiscriminantely of country @@ -622,7 +637,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par hydro.max_hours > 0, hydro.country.map(max_hours_country) ).fillna(6) - if flatten_dispatch := params.get("flatten_dispatch", False): + if params.get("flatten_dispatch", False): buffer = params.get("flatten_dispatch_buffer", 0.2) average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"] p_max_pu = (average_capacity_factor + buffer).clip(upper=1) @@ -647,77 +662,6 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par ) -def attach_extendable_generators(n, costs, ppl, carriers): - logger.warning( - "The function `attach_extendable_generators` is deprecated in v0.5.0." - ) - add_missing_carriers(n, carriers) - add_co2_emissions(n, costs, carriers) - - for tech in carriers: - if tech.startswith("OCGT"): - ocgt = ( - ppl.query("carrier in ['OCGT', 'CCGT']") - .groupby("bus", as_index=False) - .first() - ) - n.madd( - "Generator", - ocgt.index, - suffix=" OCGT", - bus=ocgt["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["OCGT", "capital_cost"], - marginal_cost=costs.at["OCGT", "marginal_cost"], - efficiency=costs.at["OCGT", "efficiency"], - ) - - elif tech.startswith("CCGT"): - ccgt = ( - ppl.query("carrier in ['OCGT', 'CCGT']") - .groupby("bus", as_index=False) - .first() - ) - n.madd( - "Generator", - ccgt.index, - suffix=" CCGT", - bus=ccgt["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["CCGT", "capital_cost"], - marginal_cost=costs.at["CCGT", "marginal_cost"], - efficiency=costs.at["CCGT", "efficiency"], - ) - - elif tech.startswith("nuclear"): - nuclear = ( - ppl.query("carrier == 'nuclear'").groupby("bus", as_index=False).first() - ) - n.madd( - "Generator", - nuclear.index, - suffix=" nuclear", - bus=nuclear["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["nuclear", "capital_cost"], - marginal_cost=costs.at["nuclear", "marginal_cost"], - efficiency=costs.at["nuclear", "efficiency"], - ) - - else: - raise NotImplementedError( - "Adding extendable generators for carrier " - "'{tech}' is not implemented, yet. " - "Only OCGT, CCGT and nuclear are allowed at the moment." - ) - - def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) -> None: """ Attach renewable capacities from the OPSD dataset to the network. @@ -749,8 +693,8 @@ def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) -> caps = caps.groupby(["bus"]).Capacity.sum() caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) - n.generators.p_nom.update(gens.bus.map(caps).dropna()) - n.generators.p_nom_min.update(gens.bus.map(caps).dropna()) + n.generators.update({"p_nom": gens.bus.map(caps).dropna()}) + n.generators.update({"p_nom_min": gens.bus.map(caps).dropna()}) def estimate_renewable_capacities( @@ -846,6 +790,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("add_electricity") configure_logging(snakemake) + set_scenario_config(snakemake) params = snakemake.params diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 1842166b..0bbe19f0 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,25 +8,24 @@ horizon. """ import logging - -logger = logging.getLogger(__name__) - -import pandas as pd - -idx = pd.IndexSlice - from types import SimpleNamespace import country_converter as coco import numpy as np +import pandas as pd import pypsa import xarray as xr -from _helpers import update_config_with_sector_opts +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from add_electricity import sanitize_carriers from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs +logger = logging.getLogger(__name__) cc = coco.CountryConverter() - +idx = pd.IndexSlice spatial = SimpleNamespace() @@ -53,7 +52,7 @@ def add_build_year_to_new_assets(n, baseyear): "series" ) & n.component_attrs[c.name].status.str.contains("Input") for attr in n.component_attrs[c.name].index[selection]: - c.pnl[attr].rename(columns=rename, inplace=True) + c.pnl[attr] = c.pnl[attr].rename(columns=rename) def add_existing_renewables(df_agg): @@ -172,10 +171,6 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas phased_out = df_agg[df_agg["DateOut"] < baseyear].index df_agg.drop(phased_out, inplace=True) - # calculate remaining lifetime before phase-out (+1 because assuming - # phase out date at the end of the year) - df_agg["lifetime"] = df_agg.DateOut - df_agg.DateIn + 1 - # assign clustered bus busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0).squeeze() busmap = pd.read_csv(snakemake.input.busmap, index_col=0).squeeze() @@ -196,6 +191,10 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas grouping_years, np.digitize(df_agg.DateIn, grouping_years, right=True) ) + # calculate (adjusted) remaining lifetime before phase-out (+1 because assuming + # phase out date at the end of the year) + df_agg["lifetime"] = df_agg.DateOut - df_agg["grouping_year"] + 1 + df = df_agg.pivot_table( index=["grouping_year", "Fueltype"], columns="cluster_bus", @@ -305,7 +304,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas else: bus0 = vars(spatial)[carrier[generator]].nodes if "EU" not in vars(spatial)[carrier[generator]].locations: - bus0 = bus0.intersection(capacity.index + " gas") + bus0 = bus0.intersection(capacity.index + " " + carrier[generator]) # check for missing bus missing_bus = pd.Index(bus0).difference(n.buses.index) @@ -407,104 +406,23 @@ def add_heating_capacities_installed_before_baseyear( """ logger.debug(f"Adding heating capacities installed before {baseyear}") - # Add existing heating capacities, data comes from the study - # "Mapping and analyses of the current and future (2020 - 2030) - # heating/cooling fuel deployment (fossil/renewables) " - # https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 - # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". - # TODO start from original file - - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] - df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - - # data for Albania, Montenegro and Macedonia not included in database - df.loc["Albania"] = np.nan - df.loc["Montenegro"] = np.nan - df.loc["Macedonia"] = np.nan - - df.fillna(0.0, inplace=True) - - # convert GW to MW - df *= 1e3 - - df.index = cc.convert(df.index, to="iso2") - - # coal and oil boilers are assimilated to oil boilers - df["oil boiler"] = df["oil boiler"] + df["coal boiler"] - df.drop(["coal boiler"], axis=1, inplace=True) - - # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - - nodal_df = df.loc[pop_layout.ct] - nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) - - # split existing capacities between residential and services - # proportional to energy demand - p_set_sum = n.loads_t.p_set.sum() - ratio_residential = pd.Series( - [ - ( - p_set_sum[f"{node} residential rural heat"] - / ( - p_set_sum[f"{node} residential rural heat"] - + p_set_sum[f"{node} services rural heat"] - ) - ) - # if rural heating demand for one of the nodes doesn't exist, - # then columns were dropped before and heating demand share should be 0.0 - if all( - f"{node} {service} rural heat" in p_set_sum.index - for service in ["residential", "services"] - ) - else 0.0 - for node in nodal_df.index - ], - index=nodal_df.index, + existing_heating = pd.read_csv( + snakemake.input.existing_heating_distribution, header=[0, 1], index_col=0 ) - for tech in techs: - nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential - nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) + techs = existing_heating.columns.get_level_values(1).unique() - names = [ - "residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central", - ] - - nodes = {} - p_nom = {} - for name in names: + for name in existing_heating.columns.get_level_values(0).unique(): name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index( - [ - n.buses.at[index, "location"] - for index in n.buses.index[ - n.buses.index.str.contains(name) - & n.buses.index.str.contains("heat") - ] - ] - ) - heat_pump_type = "air" if "urban" in name else "ground" - heat_type = "residential" if "residential" in name else "services" - if name == "urban central": - p_nom[name] = nodal_df["air heat pump"][nodes[name]] + nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) + + if (name_type != "central") and options["electricity_distribution_grid"]: + nodes_elec = nodes + " low voltage" else: - p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ - nodes[name] - ] + nodes_elec = nodes + + heat_pump_type = "air" if "urban" in name else "ground" # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -512,7 +430,7 @@ def add_heating_capacities_installed_before_baseyear( cop = {"air": ashp_cop, "ground": gshp_cop} if time_dep_hp_cop: - efficiency = cop[heat_pump_type][nodes[name]] + efficiency = cop[heat_pump_type][nodes] else: efficiency = costs.at[costs_name, "efficiency"] @@ -520,32 +438,33 @@ def add_heating_capacities_installed_before_baseyear( if int(grouping_year) + default_lifetime <= int(baseyear): continue - # installation is assumed to be linear for the past 25 years (default lifetime) + # installation is assumed to be linear for the past default_lifetime years ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes_elec, + bus1=nodes + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] + * ratio + / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers - # (50% capacities to rural buses, 50% to urban buses) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} resistive heater-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes_elec, + bus1=nodes + " " + name + " heat", carrier=name + " resistive heater", efficiency=costs.at[f"{name_type} resistive heater", "efficiency"], capital_cost=( @@ -553,21 +472,20 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} resistive heater"][nodes[name]] + existing_heating.loc[nodes, (name, "resistive heater")] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), build_year=int(grouping_year), - lifetime=costs.at[costs_name, "lifetime"], + lifetime=costs.at[f"{name_type} resistive heater", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} gas boiler-{grouping_year}", - bus0=spatial.gas.nodes, - bus1=nodes[name] + " " + name + " heat", + bus0="EU gas" if "EU gas" in spatial.gas.nodes else nodes + " gas", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[f"{name_type} gas boiler", "efficiency"], @@ -577,8 +495,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} gas boiler"][nodes[name]] + existing_heating.loc[nodes, (name, "gas boiler")] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -588,20 +505,21 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} oil boiler"][nodes[name]] - * ratio - / costs.at["decentral oil boiler", "efficiency"], + p_nom=( + existing_heating.loc[nodes, (name, "oil boiler")] + * ratio + / costs.at["decentral oil boiler", "efficiency"] + ), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) @@ -627,11 +545,8 @@ def add_heating_capacities_installed_before_baseyear( ], ) - # drop assets which are at the end of their lifetime - links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index - n.mremove("Link", links_i) - +# %% if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -643,16 +558,16 @@ if __name__ == "__main__": clusters="37", ll="v1.0", opts="", - sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1", + sector_opts="8760-T-H-B-I-A-dist1", planning_horizons=2020, ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) options = snakemake.params.sector - opts = snakemake.wildcards.sector_opts.split("-") baseyear = snakemake.params.baseyear @@ -675,7 +590,7 @@ if __name__ == "__main__": n, grouping_years_power, costs, baseyear ) - if "H" in opts: + if options["heating"]: time_dep_hp_cop = options["time_dep_hp_cop"] ashp_cop = ( xr.open_dataarray(snakemake.input.cop_air_total) @@ -687,7 +602,9 @@ if __name__ == "__main__": .to_pandas() .reindex(index=n.snapshots) ) - default_lifetime = snakemake.params.costs["fill_values"]["lifetime"] + default_lifetime = snakemake.params.existing_capacities[ + "default_heating_lifetime" + ] add_heating_capacities_installed_before_baseyear( n, baseyear, diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index e00e1e5f..eb14436e 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -55,8 +55,8 @@ import logging import numpy as np import pandas as pd import pypsa -from _helpers import configure_logging -from add_electricity import load_costs, sanitize_carriers +from _helpers import configure_logging, set_scenario_config +from add_electricity import load_costs, sanitize_carriers, sanitize_locations idx = pd.IndexSlice @@ -100,10 +100,9 @@ def attach_stores(n, costs, extendable_carriers): n.madd("Carrier", carriers) buses_i = n.buses.index - bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} if "H2" in carriers: - h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) + h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", location=buses_i) n.madd( "Store", @@ -143,7 +142,7 @@ def attach_stores(n, costs, extendable_carriers): if "battery" in carriers: b_buses_i = n.madd( - "Bus", buses_i + " battery", carrier="battery", **bus_sub_dict + "Bus", buses_i + " battery", carrier="battery", location=buses_i ) n.madd( @@ -231,6 +230,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5) configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.network) extendable_carriers = snakemake.params.extendable_carriers @@ -246,6 +246,7 @@ if __name__ == "__main__": attach_hydrogen_pipelines(n, costs, extendable_carriers) sanitize_carriers(n, snakemake.config) + sanitize_locations(n) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/base_network.py b/scripts/base_network.py index eda29451..8e03ae35 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -77,11 +77,14 @@ import shapely import shapely.prepared import shapely.wkt import yaml -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config +from packaging.version import Version, parse from scipy import spatial from scipy.sparse import csgraph from shapely.geometry import LineString, Point +PD_GE_2_2 = parse(pd.__version__) >= Version("2.2") + logger = logging.getLogger(__name__) @@ -138,7 +141,9 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec): ) buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"}) - buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool) + buses["under_construction"] = buses.under_construction.where( + lambda s: s.notnull(), False + ).astype(bool) # remove all buses outside of all countries including exclusive economic zones (offshore) europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"] @@ -522,12 +527,13 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): ) return pd.Series(key, index) + compat_kws = dict(include_groups=False) if PD_GE_2_2 else {} gb = buses.loc[substation_b].groupby( ["x", "y"], as_index=False, group_keys=False, sort=False ) - bus_map_low = gb.apply(prefer_voltage, "min") + bus_map_low = gb.apply(prefer_voltage, "min", **compat_kws) lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False) - bus_map_high = gb.apply(prefer_voltage, "max") + bus_map_high = gb.apply(prefer_voltage, "max", **compat_kws) hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False) onshore_b = pd.Series(False, buses.index) @@ -553,6 +559,7 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): for b, df in product(("bus0", "bus1"), (n.lines, n.links)): has_connections_b |= ~df.groupby(b).under_construction.min() + buses["onshore_bus"] = onshore_b buses["substation_lv"] = ( lv_b & onshore_b & (~buses["under_construction"]) & has_connections_b ) @@ -560,7 +567,7 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): ~buses["under_construction"] ) - c_nan_b = buses.country == "na" + c_nan_b = buses.country.fillna("na") == "na" if c_nan_b.sum() > 0: c_tag = _get_country(buses.loc[c_nan_b]) c_tag.loc[~c_tag.isin(countries)] = np.nan @@ -725,11 +732,12 @@ def base_network( transformers = _set_electrical_parameters_transformers(transformers, config) links = _set_electrical_parameters_links(links, config, links_p_nom) converters = _set_electrical_parameters_converters(converters, config) + snapshots = snakemake.params.snapshots n = pypsa.Network() n.name = "PyPSA-Eur" - n.set_snapshots(pd.date_range(freq="h", **config["snapshots"])) + n.set_snapshots(pd.date_range(freq="h", **snapshots)) n.madd("Carrier", ["AC", "DC"]) n.import_components_from_dataframe(buses, "Bus") @@ -761,6 +769,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("base_network") configure_logging(snakemake) + set_scenario_config(snakemake) n = base_network( snakemake.input.eg_buses, diff --git a/scripts/build_ammonia_production.py b/scripts/build_ammonia_production.py index 1bcdf9ae..84d547da 100644 --- a/scripts/build_ammonia_production.py +++ b/scripts/build_ammonia_production.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,6 +8,7 @@ Build historical annual ammonia production per country in ktonNH3/a. import country_converter as coco import pandas as pd +from _helpers import set_scenario_config cc = coco.CountryConverter() @@ -18,6 +19,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_ammonia_production") + set_scenario_config(snakemake) + ammonia = pd.read_excel( snakemake.input.usgs, sheet_name="T12", @@ -25,13 +28,14 @@ if __name__ == "__main__": header=0, index_col=0, skipfooter=19, + na_values=["--"], ) ammonia.index = cc.convert(ammonia.index, to="iso2") years = [str(i) for i in range(2013, 2018)] - countries = ammonia.index.intersection(snakemake.params.countries) - ammonia = ammonia.loc[countries, years].astype(float) + + ammonia = ammonia[years] # convert from ktonN to ktonNH3 ammonia *= 17 / 14 diff --git a/scripts/build_biomass_potentials.py b/scripts/build_biomass_potentials.py index d7c467cf..79e2c203 100644 --- a/scripts/build_biomass_potentials.py +++ b/scripts/build_biomass_potentials.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,13 +9,15 @@ using data from JRC ENSPRESO. import logging -logger = logging.getLogger(__name__) import geopandas as gpd import numpy as np import pandas as pd +logger = logging.getLogger(__name__) AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050] +from _helpers import configure_logging, set_scenario_config + def build_nuts_population_data(year=2013): pop = pd.read_csv( @@ -132,14 +134,14 @@ def disaggregate_nuts0(bio): pop = build_nuts_population_data() # get population in nuts2 - pop_nuts2 = pop.loc[pop.index.str.len() == 4] + pop_nuts2 = pop.loc[pop.index.str.len() == 4].copy() by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum() pop_nuts2["fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country) # distribute nuts0 data to nuts2 by population bio_nodal = bio.loc[pop_nuts2.ct] bio_nodal.index = pop_nuts2.index - bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0) + bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0).astype(float) # update inplace bio.update(bio_nodal) @@ -221,6 +223,9 @@ if __name__ == "__main__": planning_horizons=2050, ) + configure_logging(snakemake) + set_scenario_config(snakemake) + overnight = snakemake.config["foresight"] == "overnight" params = snakemake.params.biomass investment_year = int(snakemake.wildcards.planning_horizons) diff --git a/scripts/build_biomass_transport_costs.py b/scripts/build_biomass_transport_costs.py index 9271b600..9c825c47 100644 --- a/scripts/build_biomass_transport_costs.py +++ b/scripts/build_biomass_transport_costs.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -80,4 +80,9 @@ def build_biomass_transport_costs(): if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("build_biomass_transport_costs") + build_biomass_transport_costs() diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index a6500bb0..9d993c17 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -47,7 +47,7 @@ import geopandas as gpd import numpy as np import pandas as pd import pypsa -from _helpers import REGION_COLS, configure_logging +from _helpers import REGION_COLS, configure_logging, set_scenario_config from scipy.spatial import Voronoi from shapely.geometry import Polygon @@ -115,6 +115,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_bus_regions") configure_logging(snakemake) + set_scenario_config(snakemake) countries = snakemake.params.countries @@ -135,7 +136,13 @@ if __name__ == "__main__": c_b = n.buses.country == country onshore_shape = country_shapes[country] - onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]] + onshore_locs = ( + n.buses.loc[c_b & n.buses.onshore_bus] + .sort_values( + by="substation_lv", ascending=False + ) # preference for substations + .drop_duplicates(subset=["x", "y"], keep="first")[["x", "y"]] + ) onshore_regions.append( gpd.GeoDataFrame( { diff --git a/scripts/build_clustered_population_layouts.py b/scripts/build_clustered_population_layouts.py index 083f3de4..2d9c6acb 100644 --- a/scripts/build_clustered_population_layouts.py +++ b/scripts/build_clustered_population_layouts.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ import atlite import geopandas as gpd import pandas as pd import xarray as xr +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): @@ -22,16 +23,15 @@ if __name__ == "__main__": clusters=48, ) + set_scenario_config(snakemake) + cutout = atlite.Cutout(snakemake.input.cutout) clustered_regions = ( - gpd.read_file(snakemake.input.regions_onshore) - .set_index("name") - .buffer(0) - .squeeze() + gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop = {} for item in ["total", "urban", "rural"]: diff --git a/scripts/build_cop_profiles.py b/scripts/build_cop_profiles.py index 4b1d952e..16e44c18 100644 --- a/scripts/build_cop_profiles.py +++ b/scripts/build_cop_profiles.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -14,6 +14,7 @@ https://doi.org/10.1039/C2EE22653G. """ import xarray as xr +from _helpers import set_scenario_config def coefficient_of_performance(delta_T, source="air"): @@ -35,6 +36,8 @@ if __name__ == "__main__": clusters=48, ) + set_scenario_config(snakemake) + for area in ["total", "urban", "rural"]: for source in ["air", "soil"]: source_T = xr.open_dataarray(snakemake.input[f"temp_{source}_{area}"]) diff --git a/scripts/build_cross_border_flows.py b/scripts/build_cross_border_flows.py index b9fc3fe8..d463d234 100644 --- a/scripts/build_cross_border_flows.py +++ b/scripts/build_cross_border_flows.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -8,7 +8,7 @@ import logging import pandas as pd import pypsa -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from entsoe import EntsoePandasClient from entsoe.exceptions import InvalidBusinessParameterError, NoMatchingDataError from requests import HTTPError @@ -21,6 +21,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_cross_border_flows") configure_logging(snakemake) + set_scenario_config(snakemake) api_key = snakemake.config["private"]["keys"]["entsoe_api"] client = EntsoePandasClient(api_key=api_key) diff --git a/scripts/build_cutout.py b/scripts/build_cutout.py index 9a7f9e00..1edb18ce 100644 --- a/scripts/build_cutout.py +++ b/scripts/build_cutout.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -95,7 +95,7 @@ import logging import atlite import geopandas as gpd import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config logger = logging.getLogger(__name__) @@ -105,6 +105,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5") configure_logging(snakemake) + set_scenario_config(snakemake) cutout_params = snakemake.params.cutouts[snakemake.wildcards.cutout] diff --git a/scripts/build_heat_demand.py b/scripts/build_daily_heat_demand.py similarity index 77% rename from scripts/build_heat_demand.py rename to scripts/build_daily_heat_demand.py index 73494260..0a36db7e 100644 --- a/scripts/build_heat_demand.py +++ b/scripts/build_daily_heat_demand.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ import geopandas as gpd import numpy as np import pandas as pd import xarray as xr +from _helpers import set_scenario_config from dask.distributed import Client, LocalCluster if __name__ == "__main__": @@ -18,10 +19,12 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_daily_heat_demands", + scope="total", simpl="", clusters=48, ) + set_scenario_config(snakemake) nprocesses = int(snakemake.threads) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) @@ -31,13 +34,10 @@ if __name__ == "__main__": cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) clustered_regions = ( - gpd.read_file(snakemake.input.regions_onshore) - .set_index("name") - .buffer(0) - .squeeze() + gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop_layout = xr.open_dataarray(snakemake.input.pop_layout) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py new file mode 100644 index 00000000..46ada2f6 --- /dev/null +++ b/scripts/build_district_heat_share.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build district heat shares at each node, depending on investment year. +""" + +import logging + +import pandas as pd +from _helpers import configure_logging, set_scenario_config +from prepare_sector_network import get + +logger = logging.getLogger(__name__) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_district_heat_share", + simpl="", + clusters=48, + planning_horizons="2050", + ) + configure_logging(snakemake) + set_scenario_config(snakemake) + + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, index_col=0)[ + "district heat share" + ] + + # make ct-based share nodal + district_heat_share = district_heat_share.loc[pop_layout.ct] + district_heat_share.index = pop_layout.index + + # total urban population per country + ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() + + # distribution of urban population within a country + pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) + + # fraction of node that is urban + urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) + + # maximum potential of urban demand covered by district heating + central_fraction = snakemake.config["sector"]["district_heating"]["potential"] + + # district heating share at each node + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) + + # if district heating share larger than urban fraction -> set urban + # fraction to district heating share + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) + + # difference of max potential and today's share of district heating + diff = (urban_fraction * central_fraction) - dist_fraction_node + progress = get( + snakemake.config["sector"]["district_heating"]["progress"], investment_year + ) + dist_fraction_node += diff * progress + logger.info( + f"Increase district heating share by a progress factor of {progress:.2%} " + f"resulting in new average share of {dist_fraction_node.mean():.2%}" + ) + + df = pd.DataFrame( + { + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction, + }, + dtype=float, + ) + + df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_electricity_demand.py b/scripts/build_electricity_demand.py index f7b6cddd..47befb8a 100755 --- a/scripts/build_electricity_demand.py +++ b/scripts/build_electricity_demand.py @@ -1,15 +1,13 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ -This rule downloads the load data from `Open Power System Data Time series. - +This rule downloads the load data from `Open Power System Data Time series `_. For all countries in -the network, the per country load timeseries with suffix -``_load_actual_entsoe_transparency`` are extracted from the dataset. After -filling small gaps linearly and large gaps by copying time-slice of a given -period, the load data is exported to a ``.csv`` file. +the network, the per country load timeseries are extracted from the dataset. +After filling small gaps linearly and large gaps by copying time-slice of a +given period, the load data is exported to a ``.csv`` file. Relevant Settings ----------------- @@ -19,9 +17,7 @@ Relevant Settings snapshots: load: - interpolate_limit: - time_shift_for_large_gaps: - manual_adjustments: + interpolate_limit: time_shift_for_large_gaps: manual_adjustments: .. seealso:: @@ -31,25 +27,25 @@ Relevant Settings Inputs ------ -- ``resources/load_raw.csv``: +- ``data/electricity_demand_raw.csv``: Outputs ------- -- ``resources/load.csv``: +- ``resources/electricity_demand.csv``: """ import logging -logger = logging.getLogger(__name__) -import dateutil import numpy as np import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from pandas import Timedelta as Delta +logger = logging.getLogger(__name__) -def load_timeseries(fn, years, countries, powerstatistics=True): + +def load_timeseries(fn, years, countries): """ Read load data from OPSD time-series package version 2020-10-06. @@ -62,29 +58,15 @@ def load_timeseries(fn, years, countries, powerstatistics=True): File name or url location (file format .csv) countries : listlike Countries for which to read load data. - powerstatistics: bool - Whether the electricity consumption data of the ENTSOE power - statistics (if true) or of the ENTSOE transparency map (if false) - should be parsed. Returns ------- load : pd.DataFrame Load time-series with UTC timestamps x ISO-2 countries """ - logger.info(f"Retrieving load data from '{fn}'.") - - pattern = "power_statistics" if powerstatistics else "transparency" - pattern = f"_load_actual_entsoe_{pattern}" - - def rename(s): - return s[: -len(pattern)] - return ( - pd.read_csv(fn, index_col=0, parse_dates=[0]) + pd.read_csv(fn, index_col=0, parse_dates=[0], date_format="%Y-%m-%dT%H:%M:%SZ") .tz_localize(None) - .filter(like=pattern) - .rename(columns=rename) .dropna(how="all", axis=0) .rename(columns={"GB_UKM": "GB"}) .filter(items=countries) @@ -149,17 +131,18 @@ def copy_timeslice(load, cntry, start, stop, delta, fn_load=None): ].values elif fn_load is not None: duration = pd.date_range(freq="h", start=start - delta, end=stop - delta) - load_raw = load_timeseries(fn_load, duration, [cntry], powerstatistics) + load_raw = load_timeseries(fn_load, duration, [cntry]) load.loc[start:stop, cntry] = load_raw.loc[ start - delta : stop - delta, cntry ].values -def manual_adjustment(load, fn_load, powerstatistics, countries): +def manual_adjustment(load, fn_load, countries): """ Adjust gaps manual for load data from OPSD time-series package. - 1. For the ENTSOE power statistics load data (if powerstatistics is True) + 1. For years later than 2015 for which the load data is mainly taken from the + ENTSOE power statistics Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the same load curve as Serbia and Albania the same as Macdedonia, both scaled @@ -167,7 +150,8 @@ def manual_adjustment(load, fn_load, powerstatistics, countries): IEA Data browser [0] for the year 2013. - 2. For the ENTSOE transparency load data (if powerstatistics is False) + 2. For years earlier than 2015 for which the load data is mainly taken from the + ENTSOE transparency platforms Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the same load curve as Montenegro, scaled by the corresponding ratio of total energy @@ -183,9 +167,6 @@ def manual_adjustment(load, fn_load, powerstatistics, countries): ---------- load : pd.DataFrame Load time-series with UTC timestamps x ISO-2 countries - powerstatistics: bool - Whether argument load comprises the electricity consumption data of - the ENTSOE power statistics or of the ENTSOE transparency map load_fn: str File name or url location (file format .csv) @@ -195,88 +176,72 @@ def manual_adjustment(load, fn_load, powerstatistics, countries): Manual adjusted and interpolated load time-series with UTC timestamps x ISO-2 countries """ - if powerstatistics: - if "MK" in load.columns: - if "AL" not in load.columns or load.AL.isnull().values.all(): - load["AL"] = load["MK"] * (4.1 / 7.4) - if "RS" in load.columns: - if "KV" not in load.columns or load.KV.isnull().values.all(): - load["KV"] = load["RS"] * (4.8 / 27.0) - copy_timeslice( - load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1) - ) - copy_timeslice( - load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2) - ) - copy_timeslice( - load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1) - ) - copy_timeslice( - load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1) - ) - # is a WE, so take WE before - copy_timeslice( - load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1) - ) - copy_timeslice( - load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1) - ) - copy_timeslice( - load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1) - ) - # whole january missing - copy_timeslice( - load, - "GB", - "2010-01-01 00:00", - "2010-01-31 23:00", - Delta(days=-365), - fn_load, - ) - # 1.1. at midnight gets special treatment - copy_timeslice( - load, - "IE", - "2016-01-01 00:00", - "2016-01-01 01:00", - Delta(days=-366), - fn_load, - ) - copy_timeslice( - load, - "PT", - "2016-01-01 00:00", - "2016-01-01 01:00", - Delta(days=-366), - fn_load, - ) - copy_timeslice( - load, - "GB", - "2016-01-01 00:00", - "2016-01-01 01:00", - Delta(days=-366), - fn_load, - ) - - else: + if "AL" not in load and "AL" in countries: if "ME" in load: - if "AL" not in load and "AL" in countries: - load["AL"] = load.ME * (5.7 / 2.9) - if "MK" not in load and "MK" in countries: + load["AL"] = load.ME * (5.7 / 2.9) + elif "MK" in load: + load["AL"] = load["MK"] * (4.1 / 7.4) + + if "MK" in countries: + if "MK" not in load or load.MK.isnull().sum() > len(load) / 2: + if "ME" in load: load["MK"] = load.ME * (6.7 / 2.9) - if "BA" not in load and "BA" in countries: - load["BA"] = load.HR * (11.0 / 16.2) - copy_timeslice( - load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1) - ) - copy_timeslice( - load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1) - ) - copy_timeslice( - load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1) - ) + + if "BA" not in load and "BA" in countries: + if "ME" in load: + load["BA"] = load.HR * (11.0 / 16.2) + + if "KV" not in load or load.KV.isnull().values.all(): + if "RS" in load: + load["KV"] = load["RS"] * (4.8 / 27.0) + + copy_timeslice(load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1)) + copy_timeslice(load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2)) + copy_timeslice(load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1)) + copy_timeslice(load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1)) + # is a WE, so take WE before + copy_timeslice(load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1)) + copy_timeslice(load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1)) + copy_timeslice(load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1)) + # whole january missing + copy_timeslice( + load, + "GB", + "2010-01-01 00:00", + "2010-01-31 23:00", + Delta(days=-365), + fn_load, + ) + # 1.1. at midnight gets special treatment + copy_timeslice( + load, + "IE", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) + copy_timeslice( + load, + "PT", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) + copy_timeslice( + load, + "GB", + "2016-01-01 00:00", + "2016-01-01 01:00", + Delta(days=-366), + fn_load, + ) + + copy_timeslice(load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)) + copy_timeslice(load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1)) + copy_timeslice(load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1)) if "UA" in countries: copy_timeslice( @@ -296,15 +261,15 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_electricity_demand") configure_logging(snakemake) + set_scenario_config(snakemake) - powerstatistics = snakemake.params.load["power_statistics"] interpolate_limit = snakemake.params.load["interpolate_limit"] countries = snakemake.params.countries snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) years = slice(snapshots[0], snapshots[-1]) time_shift = snakemake.params.load["time_shift_for_large_gaps"] - load = load_timeseries(snakemake.input[0], years, countries, powerstatistics) + load = load_timeseries(snakemake.input[0], years, countries) if "UA" in countries: # attach load of UA (best data only for entsoe transparency) @@ -321,7 +286,7 @@ if __name__ == "__main__": load["MD"] = 6.2e6 * (load_ua / load_ua.sum()) if snakemake.params.load["manual_adjustments"]: - load = manual_adjustment(load, snakemake.input[0], powerstatistics, countries) + load = manual_adjustment(load, snakemake.input[0], countries) if load.empty: logger.warning("Build electricity demand time series is empty.") diff --git a/scripts/build_electricity_prices.py b/scripts/build_electricity_prices.py index 353ea7e3..f9b964bd 100644 --- a/scripts/build_electricity_prices.py +++ b/scripts/build_electricity_prices.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT import logging import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from entsoe import EntsoePandasClient from entsoe.exceptions import NoMatchingDataError @@ -19,6 +19,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_cross_border_flows") configure_logging(snakemake) + set_scenario_config(snakemake) api_key = snakemake.config["private"]["keys"]["entsoe_api"] client = EntsoePandasClient(api_key=api_key) diff --git a/scripts/build_electricity_production.py b/scripts/build_electricity_production.py index beb859bd..b81c6b45 100644 --- a/scripts/build_electricity_production.py +++ b/scripts/build_electricity_production.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT import logging import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from entsoe import EntsoePandasClient from entsoe.exceptions import NoMatchingDataError @@ -39,6 +39,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_electricity_production") configure_logging(snakemake) + set_scenario_config(snakemake) api_key = snakemake.config["private"]["keys"]["entsoe_api"] client = EntsoePandasClient(api_key=api_key) @@ -58,7 +59,7 @@ if __name__ == "__main__": gen = client.query_generation(country, start=start, end=end, nett=True) gen = gen.tz_localize(None).resample("1h").mean() gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)] - gen = gen.rename(columns=carrier_grouper).groupby(level=0, axis=1).sum() + gen = gen.rename(columns=carrier_grouper).T.groupby(level=0).sum().T generation.append(gen) except NoMatchingDataError: unavailable_countries.append(country) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 6f9585c1..1ffc4ae2 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,21 +7,19 @@ Build total energy demands per country using JRC IDEES, eurostat, and EEA data. """ import logging - -logger = logging.getLogger(__name__) - import multiprocessing as mp +import os from functools import partial import country_converter as coco import geopandas as gpd import numpy as np import pandas as pd -from _helpers import mute_print +from _helpers import configure_logging, mute_print, set_scenario_config from tqdm import tqdm cc = coco.CountryConverter() - +logger = logging.getLogger(__name__) idx = pd.IndexSlice @@ -39,54 +37,6 @@ def reverse(dictionary): return {v: k for k, v in dictionary.items()} -eurostat_codes = { - "EU28": "EU", - "EA19": "EA", - "Belgium": "BE", - "Bulgaria": "BG", - "Czech Republic": "CZ", - "Denmark": "DK", - "Germany": "DE", - "Estonia": "EE", - "Ireland": "IE", - "Greece": "GR", - "Spain": "ES", - "France": "FR", - "Croatia": "HR", - "Italy": "IT", - "Cyprus": "CY", - "Latvia": "LV", - "Lithuania": "LT", - "Luxembourg": "LU", - "Hungary": "HU", - "Malta": "MA", - "Netherlands": "NL", - "Austria": "AT", - "Poland": "PL", - "Portugal": "PT", - "Romania": "RO", - "Slovenia": "SI", - "Slovakia": "SK", - "Finland": "FI", - "Sweden": "SE", - "United Kingdom": "GB", - "Iceland": "IS", - "Norway": "NO", - "Montenegro": "ME", - "FYR of Macedonia": "MK", - "Albania": "AL", - "Serbia": "RS", - "Turkey": "TU", - "Bosnia and Herzegovina": "BA", - "Kosovo\n(UNSCR 1244/99)": "KO", # 2017 version - # 2016 version - "Kosovo\n(under United Nations Security Council Resolution 1244/99)": "KO", - "Moldova": "MO", - "Ukraine": "UK", - "Switzerland": "CH", -} - - idees_rename = {"GR": "EL", "GB": "UK"} eu28 = cc.EU28as("ISO2").ISO2.tolist() @@ -119,39 +69,57 @@ to_ipcc = { } -def build_eurostat(input_eurostat, countries, report_year, year): +def build_eurostat(input_eurostat, countries, year): """ Return multi-index for all countries' energy data in TWh/a. """ - filenames = { - 2016: f"/{year}-Energy-Balances-June2016edition.xlsx", - 2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx", - } - - with mute_print(): - dfs = pd.read_excel( - input_eurostat + filenames[report_year], - sheet_name=None, - skiprows=1, + df = {} + countries = {idees_rename.get(country, country) for country in countries} - {"CH"} + for country in countries: + filename = ( + f"{input_eurostat}/{country}-Energy-balance-sheets-April-2023-edition.xlsb" + ) + sheet = pd.read_excel( + filename, + engine="pyxlsb", + sheet_name=str(year), + skiprows=4, index_col=list(range(4)), ) + df[country] = sheet + df = pd.concat(df, axis=0) - # sorted_index necessary for slicing - lookup = eurostat_codes - labelled_dfs = { - lookup[df.columns[0]]: df - for df in dfs.values() - if lookup[df.columns[0]] in countries + # drop columns with all NaNs + unnamed_cols = df.columns[df.columns.astype(str).str.startswith("Unnamed")] + df.drop(unnamed_cols, axis=1, inplace=True) + df.drop(year, axis=1, inplace=True) + + # make numeric values where possible + df.replace("Z", 0, inplace=True) + df = df.apply(pd.to_numeric, errors="coerce") + df = df.select_dtypes(include=[np.number]) + + # write 'International aviation' to the 2nd level of the multiindex + int_avia = df.index.get_level_values(2) == "International aviation" + temp = df.loc[int_avia] + temp.index = pd.MultiIndex.from_frame( + temp.index.to_frame().fillna("International aviation") + ) + df = pd.concat([temp, df.loc[~int_avia]]) + + # Renaming some indices + index_rename = { + "Households": "Residential", + "Commercial & public services": "Services", + "Domestic navigation": "Domestic Navigation", + "International maritime bunkers": "Bunkers", } - df = pd.concat(labelled_dfs, sort=True).sort_index() + columns_rename = {"Total": "Total all products", "UK": "GB"} + df.rename(index=index_rename, columns=columns_rename, inplace=True) + df.sort_index(inplace=True) + df.index.names = [None] * len(df.index.names) - # drop non-numeric and country columns - non_numeric_cols = df.columns[df.dtypes != float] - country_cols = df.columns.intersection(lookup.keys()) - to_drop = non_numeric_cols.union(country_cols) - df.drop(to_drop, axis=1, inplace=True) - - # convert ktoe/a to TWh/a + # convert to TWh/a from ktoe/a df *= 11.63 / 1e3 return df @@ -189,12 +157,12 @@ def idees_per_country(ct, year, base_dir): ct_totals["total residential water"] = df.at["Water heating"] assert df.index[23] == "Electricity" - ct_totals["electricity residential water"] = df[23] + ct_totals["electricity residential water"] = df.iloc[23] ct_totals["total residential cooking"] = df["Cooking"] assert df.index[30] == "Electricity" - ct_totals["electricity residential cooking"] = df[30] + ct_totals["electricity residential cooking"] = df.iloc[30] df = pd.read_excel(fn_residential, "RES_summary", index_col=0)[year] @@ -202,13 +170,13 @@ def idees_per_country(ct, year, base_dir): ct_totals["total residential"] = df[row] assert df.index[47] == "Electricity" - ct_totals["electricity residential"] = df[47] + ct_totals["electricity residential"] = df.iloc[47] assert df.index[46] == "Derived heat" - ct_totals["derived heat residential"] = df[46] + ct_totals["derived heat residential"] = df.iloc[46] assert df.index[50] == "Thermal uses" - ct_totals["thermal uses residential"] = df[50] + ct_totals["thermal uses residential"] = df.iloc[50] # services @@ -222,12 +190,12 @@ def idees_per_country(ct, year, base_dir): ct_totals["total services water"] = df["Hot water"] assert df.index[24] == "Electricity" - ct_totals["electricity services water"] = df[24] + ct_totals["electricity services water"] = df.iloc[24] ct_totals["total services cooking"] = df["Catering"] assert df.index[31] == "Electricity" - ct_totals["electricity services cooking"] = df[31] + ct_totals["electricity services cooking"] = df.iloc[31] df = pd.read_excel(fn_tertiary, "SER_summary", index_col=0)[year] @@ -235,13 +203,13 @@ def idees_per_country(ct, year, base_dir): ct_totals["total services"] = df[row] assert df.index[50] == "Electricity" - ct_totals["electricity services"] = df[50] + ct_totals["electricity services"] = df.iloc[50] assert df.index[49] == "Derived heat" - ct_totals["derived heat services"] = df[49] + ct_totals["derived heat services"] = df.iloc[49] assert df.index[53] == "Thermal uses" - ct_totals["thermal uses services"] = df[53] + ct_totals["thermal uses services"] = df.iloc[53] # agriculture, forestry and fishing @@ -282,28 +250,28 @@ def idees_per_country(ct, year, base_dir): ct_totals["total two-wheel"] = df["Powered 2-wheelers (Gasoline)"] assert df.index[19] == "Passenger cars" - ct_totals["total passenger cars"] = df[19] + ct_totals["total passenger cars"] = df.iloc[19] assert df.index[30] == "Battery electric vehicles" - ct_totals["electricity passenger cars"] = df[30] + ct_totals["electricity passenger cars"] = df.iloc[30] assert df.index[31] == "Motor coaches, buses and trolley buses" - ct_totals["total other road passenger"] = df[31] + ct_totals["total other road passenger"] = df.iloc[31] assert df.index[39] == "Battery electric vehicles" - ct_totals["electricity other road passenger"] = df[39] + ct_totals["electricity other road passenger"] = df.iloc[39] assert df.index[41] == "Light duty vehicles" - ct_totals["total light duty road freight"] = df[41] + ct_totals["total light duty road freight"] = df.iloc[41] assert df.index[49] == "Battery electric vehicles" - ct_totals["electricity light duty road freight"] = df[49] + ct_totals["electricity light duty road freight"] = df.iloc[49] row = "Heavy duty vehicles (Diesel oil incl. biofuels)" ct_totals["total heavy duty road freight"] = df[row] assert df.index[61] == "Passenger cars" - ct_totals["passenger car efficiency"] = df[61] + ct_totals["passenger car efficiency"] = df.iloc[61] df = pd.read_excel(fn_transport, "TrRail_ene", index_col=0)[year] @@ -312,39 +280,39 @@ def idees_per_country(ct, year, base_dir): ct_totals["electricity rail"] = df["Electricity"] assert df.index[15] == "Passenger transport" - ct_totals["total rail passenger"] = df[15] + ct_totals["total rail passenger"] = df.iloc[15] assert df.index[16] == "Metro and tram, urban light rail" assert df.index[19] == "Electric" assert df.index[20] == "High speed passenger trains" - ct_totals["electricity rail passenger"] = df[[16, 19, 20]].sum() + ct_totals["electricity rail passenger"] = df.iloc[[16, 19, 20]].sum() assert df.index[21] == "Freight transport" - ct_totals["total rail freight"] = df[21] + ct_totals["total rail freight"] = df.iloc[21] assert df.index[23] == "Electric" - ct_totals["electricity rail freight"] = df[23] + ct_totals["electricity rail freight"] = df.iloc[23] df = pd.read_excel(fn_transport, "TrAvia_ene", index_col=0)[year] assert df.index[6] == "Passenger transport" - ct_totals["total aviation passenger"] = df[6] + ct_totals["total aviation passenger"] = df.iloc[6] assert df.index[10] == "Freight transport" - ct_totals["total aviation freight"] = df[10] + ct_totals["total aviation freight"] = df.iloc[10] assert df.index[7] == "Domestic" - ct_totals["total domestic aviation passenger"] = df[7] + ct_totals["total domestic aviation passenger"] = df.iloc[7] assert df.index[8] == "International - Intra-EU" assert df.index[9] == "International - Extra-EU" - ct_totals["total international aviation passenger"] = df[[8, 9]].sum() + ct_totals["total international aviation passenger"] = df.iloc[[8, 9]].sum() assert df.index[11] == "Domestic and International - Intra-EU" - ct_totals["total domestic aviation freight"] = df[11] + ct_totals["total domestic aviation freight"] = df.iloc[11] assert df.index[12] == "International - Extra-EU" - ct_totals["total international aviation freight"] = df[12] + ct_totals["total international aviation freight"] = df.iloc[12] ct_totals["total domestic aviation"] = ( ct_totals["total domestic aviation freight"] @@ -364,7 +332,7 @@ def idees_per_country(ct, year, base_dir): df = pd.read_excel(fn_transport, "TrRoad_act", index_col=0)[year] assert df.index[85] == "Passenger cars" - ct_totals["passenger cars"] = df[85] + ct_totals["passenger cars"] = df.iloc[85] return pd.Series(ct_totals, name=ct) @@ -394,13 +362,6 @@ def build_idees(countries, year): # convert TWh/100km to kWh/km totals.loc["passenger car efficiency"] *= 10 - # district heating share - district_heat = totals.loc[ - ["derived heat residential", "derived heat services"] - ].sum() - total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() - totals.loc["district heat share"] = district_heat.div(total_heat) - return totals.T @@ -479,7 +440,7 @@ def build_energy_totals(countries, eurostat, swiss, idees): # The main heating source for about 73 per cent of the households is based on electricity # => 26% is non-electric - if "NO" in df: + if "NO" in df.index: elec_fraction = 0.73 no_norway = df.drop("NO") @@ -575,16 +536,36 @@ def build_energy_totals(countries, eurostat, swiss, idees): ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] df.loc["BA", missing] = ratio * df.loc["RS", missing] + return df + + +def build_district_heat_share(countries, idees): + # district heating share + district_heat = idees[["derived heat residential", "derived heat services"]].sum( + axis=1 + ) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum( + axis=1 + ) + + district_heat_share = district_heat / total_heat + + district_heat_share = district_heat_share.reindex(countries) + # Missing district heating share - dh_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] + dh_share = ( + pd.read_csv(snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]) + .div(100) + .squeeze() ) # make conservative assumption and take minimum from both data sets - df["district heat share"] = pd.concat( - [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 + district_heat_share = pd.concat( + [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 ).min(axis=1) - return df + district_heat_share.name = "district heat share" + + return district_heat_share def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): @@ -641,8 +622,8 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): return emissions / 1e3 -def build_eurostat_co2(input_eurostat, countries, report_year, year=1990): - eurostat = build_eurostat(input_eurostat, countries, report_year, year) +def build_eurostat_co2(input_eurostat, countries, year=1990): + eurostat = build_eurostat(input_eurostat, countries, year) specific_emissions = pd.Series(index=eurostat.columns, dtype=float) @@ -664,12 +645,7 @@ def build_co2_totals(countries, eea_co2, eurostat_co2): for ct in pd.Index(countries).intersection(["BA", "RS", "AL", "ME", "MK"]): mappings = { - "electricity": ( - ct, - "+", - "Conventional Thermal Power Stations", - "of which From Coal", - ), + "electricity": (ct, "+", "Electricity & heat generation", np.nan), "residential non-elec": (ct, "+", "+", "Residential"), "services non-elec": (ct, "+", "+", "Services"), "road non-elec": (ct, "+", "+", "Road"), @@ -677,12 +653,12 @@ def build_co2_totals(countries, eea_co2, eurostat_co2): "domestic navigation": (ct, "+", "+", "Domestic Navigation"), "international navigation": (ct, "-", "Bunkers"), "domestic aviation": (ct, "+", "+", "Domestic aviation"), - "international aviation": (ct, "+", "+", "International aviation"), + "international aviation": (ct, "-", "International aviation"), # does not include industrial process emissions or fuel processing/refining - "industrial non-elec": (ct, "+", "Industry"), + "industrial non-elec": (ct, "+", "Industry sector"), # does not include non-energy emissions "agriculture": (eurostat_co2.index.get_level_values(0) == ct) - & eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3), + & eurostat_co2.index.isin(["Agriculture & forestry", "Fishing"], level=3), } for i, mi in mappings.items(): @@ -727,13 +703,139 @@ def build_transport_data(countries, population, idees): return transport_data +def rescale_idees_from_eurostat( + idees_countries, energy, eurostat, input_eurostat, countries +): + """ + Takes JRC IDEES data from 2015 and rescales it by the ratio of the eurostat + data and the 2015 eurostat data. + + missing data: ['passenger car efficiency', 'passenger cars'] + """ + main_cols = ["Total all products", "Electricity"] + # read in the eurostat data for 2015 + eurostat_2015 = build_eurostat(input_eurostat, countries, 2015)[main_cols] + eurostat_year = eurostat[main_cols] + # calculate the ratio of the two data sets + ratio = eurostat_year / eurostat_2015 + ratio = ratio.droplevel([1, 4]) + cols_rename = {"Total all products": "total", "Electricity": "ele"} + index_rename = {v: k for k, v in idees_rename.items()} + ratio.rename(columns=cols_rename, index=index_rename, inplace=True) + + mappings = { + "Residential": { + "total": [ + "total residential space", + "total residential water", + "total residential cooking", + "total residential", + "derived heat residential", + "thermal uses residential", + ], + "elec": [ + "electricity residential space", + "electricity residential water", + "electricity residential cooking", + "electricity residential", + ], + }, + "Services": { + "total": [ + "total services space", + "total services water", + "total services cooking", + "total services", + "derived heat services", + "thermal uses services", + ], + "elec": [ + "electricity services space", + "electricity services water", + "electricity services cooking", + "electricity services", + ], + }, + "Agriculture & forestry": { + "total": [ + "total agriculture heat", + "total agriculture machinery", + "total agriculture", + ], + "elec": [ + "total agriculture electricity", + ], + }, + "Road": { + "total": [ + "total road", + "total passenger cars", + "total other road passenger", + "total light duty road freight", + ], + "elec": [ + "electricity road", + "electricity passenger cars", + "electricity other road passenger", + "electricity light duty road freight", + ], + }, + "Rail": { + "total": [ + "total rail", + "total rail passenger", + "total rail freight", + ], + "elec": [ + "electricity rail", + "electricity rail passenger", + "electricity rail freight", + ], + }, + } + + avia_inter = [ + "total aviation passenger", + "total aviation freight", + "total international aviation passenger", + "total international aviation freight", + "total international aviation", + ] + avia_domestic = [ + "total domestic aviation passenger", + "total domestic aviation freight", + "total domestic aviation", + ] + navigation = [ + "total domestic navigation", + ] + + for country in idees_countries: + for sector, mapping in mappings.items(): + sector_ratio = ratio.loc[(country, slice(None), sector)] + + energy.loc[country, mapping["total"]] *= sector_ratio["total"].iloc[0] + energy.loc[country, mapping["elec"]] *= sector_ratio["ele"].iloc[0] + + avi_d = ratio.loc[(country, slice(None), "Domestic aviation"), "total"] + avi_i = ratio.loc[(country, "International aviation", slice(None)), "total"] + energy.loc[country, avia_inter] *= avi_i.iloc[0] + energy.loc[country, avia_domestic] *= avi_d.iloc[0] + + nav = ratio.loc[(country, slice(None), "Domestic Navigation"), "total"] + energy.loc[country, navigation] *= nav.iloc[0] + + return energy + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake("build_energy_totals") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) params = snakemake.params.energy @@ -744,21 +846,32 @@ if __name__ == "__main__": idees_countries = pd.Index(countries).intersection(eu28) data_year = params["energy_totals_year"] - report_year = snakemake.params.energy["eurostat_report_year"] input_eurostat = snakemake.input.eurostat - eurostat = build_eurostat(input_eurostat, countries, report_year, data_year) + eurostat = build_eurostat(input_eurostat, countries, data_year) swiss = build_swiss(data_year) - idees = build_idees(idees_countries, data_year) + # data from idees only exists from 2000-2015. read in latest data and rescale later + idees = build_idees(idees_countries, min(2015, data_year)) energy = build_energy_totals(countries, eurostat, swiss, idees) + + if data_year > 2015: + logger.info("Data year is after 2015. Rescaling IDEES data based on eurostat.") + energy = rescale_idees_from_eurostat( + idees_countries, energy, eurostat, input_eurostat, countries + ) + energy.to_csv(snakemake.output.energy_name) + # use rescaled idees data to calculate district heat share + district_heat_share = build_district_heat_share( + countries, energy.loc[idees_countries] + ) + district_heat_share.to_csv(snakemake.output.district_heat_share) + base_year_emissions = params["base_emissions_year"] emissions_scope = snakemake.params.energy["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) - eurostat_co2 = build_eurostat_co2( - input_eurostat, countries, report_year, base_year_emissions - ) + eurostat_co2 = build_eurostat_co2(input_eurostat, countries, base_year_emissions) co2 = build_co2_totals(countries, eea_co2, eurostat_co2) co2.to_csv(snakemake.output.co2_name) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py new file mode 100644 index 00000000..eb2361c2 --- /dev/null +++ b/scripts/build_existing_heating_distribution.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Builds table of existing heat generation capacities for initial planning +horizon. +""" +import country_converter as coco +import numpy as np +import pandas as pd +from _helpers import set_scenario_config + +cc = coco.CountryConverter() + + +def build_existing_heating(): + # retrieve existing heating capacities + + # Add existing heating capacities, data comes from the study + # "Mapping and analyses of the current and future (2020 - 2030) + # heating/cooling fuel deployment (fossil/renewables) " + # https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en + # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". + # data is for buildings only (i.e. NOT district heating) and represents the year 2012 + # TODO start from original file + + existing_heating = pd.read_csv( + snakemake.input.existing_heating, index_col=0, header=0 + ) + + # data for Albania, Montenegro and Macedonia not included in database + existing_heating.loc["Albania"] = np.nan + existing_heating.loc["Montenegro"] = np.nan + existing_heating.loc["Macedonia"] = np.nan + + existing_heating.fillna(0.0, inplace=True) + + # convert GW to MW + existing_heating *= 1e3 + + existing_heating.index = cc.convert(existing_heating.index, to="iso2") + + # coal and oil boilers are assimilated to oil boilers + existing_heating["oil boiler"] = ( + existing_heating["oil boiler"] + existing_heating["coal boiler"] + ) + existing_heating.drop(["coal boiler"], axis=1, inplace=True) + + # distribute technologies to nodes by population + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + nodal_heating = existing_heating.loc[pop_layout.ct] + nodal_heating.index = pop_layout.index + nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] + + energy_layout = pd.read_csv( + snakemake.input.clustered_pop_energy_layout, index_col=0 + ) + + uses = ["space", "water"] + sectors = ["residential", "services"] + + nodal_sectoral_totals = pd.DataFrame(dtype=float) + + for sector in sectors: + nodal_sectoral_totals[sector] = energy_layout[ + [f"total {sector} {use}" for use in uses] + ].sum(axis=1) + + nodal_sectoral_fraction = nodal_sectoral_totals.div( + nodal_sectoral_totals.sum(axis=1), axis=0 + ) + + nodal_heat_name_fraction = pd.DataFrame(index=district_heat_info.index, dtype=float) + + nodal_heat_name_fraction["urban central"] = 0.0 + + for sector in sectors: + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[ + sector + ] * (1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = ( + nodal_sectoral_fraction[sector] * urban_fraction + ) + + nodal_heat_name_tech = pd.concat( + { + name: nodal_heating.multiply(nodal_heat_name_fraction[name], axis=0) + for name in nodal_heat_name_fraction.columns + }, + axis=1, + names=["heat name", "technology"], + ) + + # move all ground HPs to rural, all air to urban + + for sector in sectors: + nodal_heat_name_tech[(f"{sector} rural", "ground heat pump")] += ( + nodal_heat_name_tech[("urban central", "ground heat pump")] + * nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] + ) + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 + + nodal_heat_name_tech[ + (f"{sector} urban decentral", "air heat pump") + ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 + + nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 + + nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_existing_heating_distribution", + simpl="", + clusters=48, + planning_horizons=2050, + ) + set_scenario_config(snakemake) + + build_existing_heating() diff --git a/scripts/build_gas_input_locations.py b/scripts/build_gas_input_locations.py index a3b945ab..6543d950 100644 --- a/scripts/build_gas_input_locations.py +++ b/scripts/build_gas_input_locations.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,12 +9,13 @@ production sites with data from SciGRID_gas and Global Energy Monitor. import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd +from _helpers import configure_logging, set_scenario_config from cluster_gas_network import load_bus_regions +logger = logging.getLogger(__name__) + def read_scigrid_gas(fn): df = gpd.read_file(fn) @@ -23,13 +24,15 @@ def read_scigrid_gas(fn): return df -def build_gem_lng_data(lng_fn): - df = pd.read_excel(lng_fn[0], sheet_name="LNG terminals - data") +def build_gem_lng_data(fn): + df = pd.read_excel(fn[0], sheet_name="LNG terminals - data") df = df.set_index("ComboID") - remove_status = ["Cancelled"] - remove_country = ["Cyprus", "Turkey"] - remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"] + remove_country = ["Cyprus", "Turkey"] # noqa: F841 + remove_terminal = [ # noqa: F841 + "Puerto de la Luz LNG Terminal", + "Gran Canaria LNG Terminal", + ] df = df.query( "Status != 'Cancelled' \ @@ -42,9 +45,50 @@ def build_gem_lng_data(lng_fn): return gpd.GeoDataFrame(df, geometry=geometry, crs="EPSG:4326") -def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries): +def build_gem_prod_data(fn): + df = pd.read_excel(fn[0], sheet_name="Gas extraction - main") + df = df.set_index("GEM Unit ID") + + remove_country = ["Cyprus", "Türkiye"] # noqa: F841 + remove_fuel_type = ["oil"] # noqa: F841 + + df = df.query( + "Status != 'shut in' \ + & 'Fuel type' != 'oil' \ + & Country != @remove_country \ + & ~Latitude.isna() \ + & ~Longitude.isna()" + ).copy() + + p = pd.read_excel(fn[0], sheet_name="Gas extraction - production") + p = p.set_index("GEM Unit ID") + p = p[p["Fuel description"] == "gas"] + + capacities = pd.DataFrame(index=df.index) + for key in ["production", "production design capacity", "reserves"]: + cap = ( + p.loc[p["Production/reserves"] == key, "Quantity (converted)"] + .groupby("GEM Unit ID") + .sum() + .reindex(df.index) + ) + # assume capacity such that 3% of reserves can be extracted per year (25% quantile) + annualization_factor = 0.03 if key == "reserves" else 1.0 + capacities[key] = cap * annualization_factor + + df["mcm_per_year"] = ( + capacities["production"] + .combine_first(capacities["production design capacity"]) + .combine_first(capacities["reserves"]) + ) + + geometry = gpd.points_from_xy(df["Longitude"], df["Latitude"]) + return gpd.GeoDataFrame(df, geometry=geometry, crs="EPSG:4326") + + +def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries): # LNG terminals - lng = build_gem_lng_data(lng_fn) + lng = build_gem_lng_data(gem_fn) # Entry points from outside the model scope entry = read_scigrid_gas(entry_fn) @@ -55,25 +99,30 @@ def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries): | (entry.from_country == "NO") # malformed datapoint # entries from NO to GB ] + sto = read_scigrid_gas(sto_fn) + remove_country = ["RU", "UA", "TR", "BY"] # noqa: F841 + sto = sto.query("country_code not in @remove_country") + # production sites inside the model scope - prod = read_scigrid_gas(prod_fn) - prod = prod.loc[ - (prod.geometry.y > 35) & (prod.geometry.x < 30) & (prod.country_code != "DE") - ] + prod = build_gem_prod_data(gem_fn) mcm_per_day_to_mw = 437.5 # MCM/day to MWh/h + mcm_per_year_to_mw = 1.199 # MCM/year to MWh/h mtpa_to_mw = 1649.224 # mtpa to MWh/h - lng["p_nom"] = lng["CapacityInMtpa"] * mtpa_to_mw - entry["p_nom"] = entry["max_cap_from_to_M_m3_per_d"] * mcm_per_day_to_mw - prod["p_nom"] = prod["max_supply_M_m3_per_d"] * mcm_per_day_to_mw + mcm_to_gwh = 11.36 # MCM to GWh + lng["capacity"] = lng["CapacityInMtpa"] * mtpa_to_mw + entry["capacity"] = entry["max_cap_from_to_M_m3_per_d"] * mcm_per_day_to_mw + prod["capacity"] = prod["mcm_per_year"] * mcm_per_year_to_mw + sto["capacity"] = sto["max_cushionGas_M_m3"] * mcm_to_gwh lng["type"] = "lng" entry["type"] = "pipeline" prod["type"] = "production" + sto["type"] = "storage" - sel = ["geometry", "p_nom", "type"] + sel = ["geometry", "capacity", "type"] - return pd.concat([prod[sel], entry[sel], lng[sel]], ignore_index=True) + return pd.concat([prod[sel], entry[sel], lng[sel], sto[sel]], ignore_index=True) if __name__ == "__main__": @@ -83,10 +132,11 @@ if __name__ == "__main__": snakemake = mock_snakemake( "build_gas_input_locations", simpl="", - clusters="37", + clusters="128", ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) regions = load_bus_regions( snakemake.input.regions_onshore, snakemake.input.regions_offshore @@ -104,9 +154,9 @@ if __name__ == "__main__": countries = regions.index.str[:2].unique().str.replace("GB", "UK") gas_input_locations = build_gas_input_locations( - snakemake.input.lng, + snakemake.input.gem, snakemake.input.entry, - snakemake.input.production, + snakemake.input.storage, countries, ) @@ -116,9 +166,13 @@ if __name__ == "__main__": gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver="GeoJSON") + ensure_columns = ["lng", "pipeline", "production", "storage"] gas_input_nodes_s = ( - gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack() + gas_input_nodes.groupby(["bus", "type"])["capacity"] + .sum() + .unstack() + .reindex(columns=ensure_columns) ) - gas_input_nodes_s.columns.name = "p_nom" + gas_input_nodes_s.columns.name = "capacity" gas_input_nodes_s.to_csv(snakemake.output.gas_input_nodes_simplified) diff --git a/scripts/build_gas_network.py b/scripts/build_gas_network.py index 92e686cd..5e9a5c9a 100644 --- a/scripts/build_gas_network.py +++ b/scripts/build_gas_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,13 +9,14 @@ Preprocess gas network based on data from bthe SciGRID_gas project import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd +from _helpers import configure_logging, set_scenario_config from pypsa.geo import haversine_pts from shapely.geometry import Point +logger = logging.getLogger(__name__) + def diameter_to_capacity(pipe_diameter_mm): """ @@ -114,12 +115,10 @@ def prepare_dataset( df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity) ratio = df.p_nom / df.p_nom_diameter not_nordstream = df.max_pressure_bar < 220 - df.p_nom.update( - df.p_nom_diameter.where( - (df.p_nom <= 500) - | ((ratio > correction_threshold_p_nom) & not_nordstream) - | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) - ) + df["p_nom"] = df.p_nom_diameter.where( + (df.p_nom <= 500) + | ((ratio > correction_threshold_p_nom) & not_nordstream) + | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) ) # lines which have way too discrepant line lengths @@ -130,12 +129,10 @@ def prepare_dataset( axis=1, ) ratio = df.eval("length / length_haversine") - df["length"].update( - df.length_haversine.where( - (df["length"] < 20) - | (ratio > correction_threshold_length) - | (ratio < 1 / correction_threshold_length) - ) + df["length"] = df.length_haversine.where( + (df["length"] < 20) + | (ratio > correction_threshold_length) + | (ratio < 1 / correction_threshold_length) ) return df @@ -147,7 +144,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_gas_network") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) gas_network = load_dataset(snakemake.input.gas_network) diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py new file mode 100644 index 00000000..a1dacd51 --- /dev/null +++ b/scripts/build_hourly_heat_demand.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build hourly heat demand time series from daily ones. +""" + +from itertools import product + +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles, set_scenario_config + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_hourly_heat_demands", + scope="total", + simpl="", + clusters=48, + ) + set_scenario_config(snakemake) + + snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) + + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand) + .to_pandas() + .reindex(index=snapshots, method="ffill") + ) + + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + + sectors = ["residential", "services"] + uses = ["water", "space"] + + heat_demand = {} + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile, + ) + + if use == "space": + heat_demand[f"{sector} {use}"] = ( + daily_space_heat_demand * intraday_year_profile + ) + else: + heat_demand[f"{sector} {use}"] = intraday_year_profile + + heat_demand = pd.concat(heat_demand, axis=1, names=["sector use", "node"]) + + heat_demand.index.name = "snapshots" + + ds = heat_demand.stack().to_xarray() + + ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 65cc22b7..933feee8 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -65,7 +65,7 @@ import atlite import country_converter as coco import geopandas as gpd import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config cc = coco.CountryConverter() @@ -131,6 +131,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_hydro_profile") configure_logging(snakemake) + set_scenario_config(snakemake) params_hydro = snakemake.params.hydro cutout = atlite.Cutout(snakemake.input.cutout) diff --git a/scripts/build_industrial_distribution_key.py b/scripts/build_industrial_distribution_key.py index b86d47c2..7cba0af5 100644 --- a/scripts/build_industrial_distribution_key.py +++ b/scripts/build_industrial_distribution_key.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,17 +7,15 @@ Build spatial distribution of industries from Hotmaps database. """ import logging - -logger = logging.getLogger(__name__) - import uuid from itertools import product import country_converter as coco import geopandas as gpd import pandas as pd -from packaging.version import Version, parse +from _helpers import configure_logging, set_scenario_config +logger = logging.getLogger(__name__) cc = coco.CountryConverter() @@ -32,7 +30,7 @@ def locate_missing_industrial_sites(df): try: from geopy.extra.rate_limiter import RateLimiter from geopy.geocoders import Nominatim - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'geopy' not found." "Install via 'conda install -c conda-forge geopy'" @@ -86,12 +84,7 @@ def prepare_hotmaps_database(regions): gdf = gpd.GeoDataFrame(df, geometry="coordinates", crs="EPSG:4326") - kws = ( - dict(op="within") - if parse(gpd.__version__) < Version("0.10") - else dict(predicate="within") - ) - gdf = gpd.sjoin(gdf, regions, how="inner", **kws) + gdf = gpd.sjoin(gdf, regions, how="inner", predicate="within") gdf.rename(columns={"index_right": "bus"}, inplace=True) gdf["country"] = gdf.bus.str[:2] @@ -101,7 +94,7 @@ def prepare_hotmaps_database(regions): # get all duplicated entries duplicated_i = gdf.index[gdf.index.duplicated()] # convert from raw data country name to iso-2-code - code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") + code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") # noqa: F841 # screen out malformed country allocation gdf_filtered = gdf.loc[duplicated_i].query("country == @code") # concat not duplicated and filtered gdf @@ -130,7 +123,7 @@ def build_nodal_distribution_key(hotmaps, regions, countries): if not facilities.empty: emissions = facilities["Emissions_ETS_2014"].fillna( - hotmaps["Emissions_EPRTR_2014"] + hotmaps["Emissions_EPRTR_2014"].dropna() ) if emissions.sum() == 0: key = pd.Series(1 / len(facilities), facilities.index) @@ -156,8 +149,8 @@ if __name__ == "__main__": simpl="", clusters=128, ) - - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) countries = snakemake.params.countries diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index d1c672f1..8129177a 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ from functools import partial import country_converter as coco import pandas as pd +from _helpers import set_scenario_config from tqdm import tqdm cc = coco.CountryConverter() @@ -73,7 +74,7 @@ def industrial_energy_demand_per_country(country, year, jrc_dir): def get_subsector_data(sheet): df = df_dict[sheet][year].groupby(fuels).sum() - df["ammonia"] = 0.0 + df["hydrogen"] = 0.0 df["other"] = df["all"] - df.loc[df.index != "all"].sum() @@ -94,51 +95,50 @@ def industrial_energy_demand_per_country(country, year, jrc_dir): return df -def add_ammonia_energy_demand(demand): - # MtNH3/a - fn = snakemake.input.ammonia_production - ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3 +def separate_basic_chemicals(demand, production): - def get_ammonia_by_fuel(x): - fuels = { - "gas": params["MWh_CH4_per_tNH3_SMR"], - "electricity": params["MWh_elec_per_tNH3_SMR"], + ammonia = pd.DataFrame( + { + "hydrogen": production["Ammonia"] * params["MWh_H2_per_tNH3_electrolysis"], + "electricity": production["Ammonia"] + * params["MWh_elec_per_tNH3_electrolysis"], } - - return pd.Series({k: x * v for k, v in fuels.items()}) - - ammonia_by_fuel = ammonia.apply(get_ammonia_by_fuel).T - ammonia_by_fuel = ammonia_by_fuel.unstack().reindex( - index=demand.index, fill_value=0.0 - ) - - ammonia = pd.DataFrame({"ammonia": ammonia * params["MWh_NH3_per_tNH3"]}).T + ).T + chlorine = pd.DataFrame( + { + "hydrogen": production["Chlorine"] * params["MWh_H2_per_tCl"], + "electricity": production["Chlorine"] * params["MWh_elec_per_tCl"], + } + ).T + methanol = pd.DataFrame( + { + "gas": production["Methanol"] * params["MWh_CH4_per_tMeOH"], + "electricity": production["Methanol"] * params["MWh_elec_per_tMeOH"], + } + ).T demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0) + demand["Chlorine"] = chlorine.unstack().reindex(index=demand.index, fill_value=0.0) + demand["Methanol"] = methanol.unstack().reindex(index=demand.index, fill_value=0.0) - demand["Basic chemicals (without ammonia)"] = ( - demand["Basic chemicals"] - ammonia_by_fuel + demand["HVC"] = ( + demand["Basic chemicals"] + - demand["Ammonia"] + - demand["Methanol"] + - demand["Chlorine"] ) - demand["Basic chemicals (without ammonia)"].clip(lower=0, inplace=True) - demand.drop(columns="Basic chemicals", inplace=True) + demand["HVC"].clip(lower=0, inplace=True) + return demand -def add_non_eu28_industrial_energy_demand(countries, demand): +def add_non_eu28_industrial_energy_demand(countries, demand, production): non_eu28 = countries.difference(eu28) if non_eu28.empty: return demand - # output in MtMaterial/a - fn = snakemake.input.industrial_production_per_country - production = pd.read_csv(fn, index_col=0) / 1e3 - - # recombine HVC, Chlorine and Methanol to Basic chemicals (without ammonia) - chemicals = ["HVC", "Chlorine", "Methanol"] - production["Basic chemicals (without ammonia)"] = production[chemicals].sum(axis=1) - production.drop(columns=chemicals, inplace=True) eu28_production = production.loc[countries.intersection(eu28)].sum() eu28_energy = demand.groupby(level=1).sum() @@ -175,6 +175,7 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("build_industrial_energy_demand_per_country_today") + set_scenario_config(snakemake) params = snakemake.params.industry year = params.get("reference_year", 2015) @@ -182,9 +183,15 @@ if __name__ == "__main__": demand = industrial_energy_demand(countries.intersection(eu28), year) - demand = add_ammonia_energy_demand(demand) + # output in MtMaterial/a + production = ( + pd.read_csv(snakemake.input.industrial_production_per_country, index_col=0) + / 1e3 + ) - demand = add_non_eu28_industrial_energy_demand(countries, demand) + demand = separate_basic_chemicals(demand, production) + + demand = add_non_eu28_industrial_energy_demand(countries, demand, production) # for format compatibility demand = demand.stack(dropna=False).unstack(level=[0, 2]) diff --git a/scripts/build_industrial_energy_demand_per_node.py b/scripts/build_industrial_energy_demand_per_node.py index 55c10c5d..ce72ea7a 100644 --- a/scripts/build_industrial_energy_demand_per_node.py +++ b/scripts/build_industrial_energy_demand_per_node.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,6 +7,7 @@ Build industrial energy demand per model region. """ import pandas as pd +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): @@ -18,24 +19,33 @@ if __name__ == "__main__": clusters=48, planning_horizons=2030, ) + set_scenario_config(snakemake) - # import EU ratios df as csv + # import ratios fn = snakemake.input.industry_sector_ratios - industry_sector_ratios = pd.read_csv(fn, index_col=0) + sector_ratios = pd.read_csv(fn, header=[0, 1], index_col=0) - # material demand per node and industry (kton/a) + # material demand per node and industry (Mton/a) fn = snakemake.input.industrial_production_per_node - nodal_production = pd.read_csv(fn, index_col=0) + nodal_production = pd.read_csv(fn, index_col=0) / 1e3 # energy demand today to get current electricity fn = snakemake.input.industrial_energy_demand_per_node_today nodal_today = pd.read_csv(fn, index_col=0) - # final energy consumption per node and industry (TWh/a) - nodal_df = nodal_production.dot(industry_sector_ratios.T) + nodal_sector_ratios = pd.concat( + {node: sector_ratios[node[:2]] for node in nodal_production.index}, axis=1 + ) - # convert GWh to TWh and ktCO2 to MtCO2 - nodal_df *= 0.001 + nodal_production_stacked = nodal_production.stack() + nodal_production_stacked.index.names = [None, None] + + # final energy consumption per node and industry (TWh/a) + nodal_df = ( + (nodal_sector_ratios.multiply(nodal_production_stacked)) + .T.groupby(level=0) + .sum() + ) rename_sectors = { "elec": "electricity", diff --git a/scripts/build_industrial_energy_demand_per_node_today.py b/scripts/build_industrial_energy_demand_per_node_today.py index d845e704..8b2b70a0 100644 --- a/scripts/build_industrial_energy_demand_per_node_today.py +++ b/scripts/build_industrial_energy_demand_per_node_today.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -10,6 +10,7 @@ from itertools import product import numpy as np import pandas as pd +from _helpers import set_scenario_config # map JRC/our sectors to hotmaps sector, where mapping exist sector_mapping = { @@ -75,5 +76,6 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) build_nodal_industrial_energy_demand() diff --git a/scripts/build_industrial_production_per_country.py b/scripts/build_industrial_production_per_country.py index 74cb1949..5c14b065 100644 --- a/scripts/build_industrial_production_per_country.py +++ b/scripts/build_industrial_production_per_country.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,18 +7,16 @@ Build industrial production per country. """ import logging -from functools import partial - -logger = logging.getLogger(__name__) - import multiprocessing as mp +from functools import partial import country_converter as coco import numpy as np import pandas as pd -from _helpers import mute_print +from _helpers import configure_logging, mute_print, set_scenario_config from tqdm import tqdm +logger = logging.getLogger(__name__) cc = coco.CountryConverter() tj_to_ktoe = 0.0238845 @@ -99,33 +97,18 @@ fields = { "Other Industrial Sectors": "Physical output (index)", } -eb_names = { - "NO": "Norway", - "AL": "Albania", - "BA": "Bosnia and Herzegovina", - "MK": "FYR of Macedonia", - "GE": "Georgia", - "IS": "Iceland", - "KO": "Kosovo", - "MD": "Moldova", - "ME": "Montenegro", - "RS": "Serbia", - "UA": "Ukraine", - "TR": "Turkey", -} - eb_sectors = { - "Iron & steel industry": "Iron and steel", - "Chemical and Petrochemical industry": "Chemicals Industry", - "Non-ferrous metal industry": "Non-metallic mineral products", - "Paper, Pulp and Print": "Pulp, paper and printing", - "Food and Tabacco": "Food, beverages and tobacco", - "Non-metallic Minerals (Glass, pottery & building mat. Industry)": "Non Ferrous Metals", - "Transport Equipment": "Transport Equipment", + "Iron & steel": "Iron and steel", + "Chemical & petrochemical": "Chemicals Industry", + "Non-ferrous metals": "Non-metallic mineral products", + "Paper, pulp & printing": "Pulp, paper and printing", + "Food, beverages & tobacco": "Food, beverages and tobacco", + "Non-metallic minerals": "Non Ferrous Metals", + "Transport equipment": "Transport Equipment", "Machinery": "Machinery Equipment", - "Textile and Leather": "Textiles and leather", - "Wood and Wood Products": "Wood and wood products", - "Non-specified (Industry)": "Other Industrial Sectors", + "Textile & leather": "Textiles and leather", + "Wood & wood products": "Wood and wood products", + "Not elsewhere specified (industry)": "Other Industrial Sectors", } # TODO: this should go in a csv in `data` @@ -162,12 +145,15 @@ def get_energy_ratio(country, eurostat_dir, jrc_dir, year): e_country = e_switzerland * tj_to_ktoe else: # estimate physical output, energy consumption in the sector and country - fn = f"{eurostat_dir}/{eb_names[country]}.XLSX" - with mute_print(): - df = pd.read_excel( - fn, sheet_name="2016", index_col=2, header=0, skiprows=1 - ).squeeze("columns") - e_country = df.loc[eb_sectors.keys(), "Total all products"].rename(eb_sectors) + fn = f"{eurostat_dir}/{country}-Energy-balance-sheets-April-2023-edition.xlsb" + df = pd.read_excel( + fn, + sheet_name=str(min(2021, year)), + index_col=2, + header=0, + skiprows=4, + ) + e_country = df.loc[eb_sectors.keys(), "Total"].rename(eb_sectors) fn = f"{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx" @@ -263,7 +249,11 @@ def separate_basic_chemicals(demand, year): demand["Basic chemicals"].clip(lower=0.0, inplace=True) # assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals - distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum() + distribution_key = ( + demand["Basic chemicals"] + / params["basic_chemicals_without_NH3_production_today"] + / 1e3 + ) demand["HVC"] = params["HVC_production_today"] * 1e3 * distribution_key demand["Chlorine"] = params["chlorine_production_today"] * 1e3 * distribution_key demand["Methanol"] = params["methanol_production_today"] * 1e3 * distribution_key @@ -276,8 +266,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("build_industrial_production_per_country") - - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) countries = snakemake.params.countries diff --git a/scripts/build_industrial_production_per_country_tomorrow.py b/scripts/build_industrial_production_per_country_tomorrow.py index ffed5195..a8b6c312 100644 --- a/scripts/build_industrial_production_per_country_tomorrow.py +++ b/scripts/build_industrial_production_per_country_tomorrow.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,6 +7,7 @@ Build future industrial production per country. """ import pandas as pd +from _helpers import set_scenario_config from prepare_sector_network import get if __name__ == "__main__": @@ -14,6 +15,7 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("build_industrial_production_per_country_tomorrow") + set_scenario_config(snakemake) params = snakemake.params.industry diff --git a/scripts/build_industrial_production_per_node.py b/scripts/build_industrial_production_per_node.py index 7b69948a..1eeecbae 100644 --- a/scripts/build_industrial_production_per_node.py +++ b/scripts/build_industrial_production_per_node.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,6 +9,7 @@ Build industrial production per model region. from itertools import product import pandas as pd +from _helpers import set_scenario_config # map JRC/our sectors to hotmaps sector, where mapping exist sector_mapping = { @@ -72,5 +73,6 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) build_nodal_industrial_production() diff --git a/scripts/build_industry_sector_ratios.py b/scripts/build_industry_sector_ratios.py index 45705002..b3ef9321 100644 --- a/scripts/build_industry_sector_ratios.py +++ b/scripts/build_industry_sector_ratios.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,7 +7,7 @@ Build specific energy consumption by carrier and industries. """ import pandas as pd -from _helpers import mute_print +from _helpers import mute_print, set_scenario_config # GWh/ktoe OR MWh/toe toe_to_MWh = 11.630 @@ -408,15 +408,15 @@ def chemicals_industry(): df.loc["methane", sector] -= ammonia_total * params["MWh_CH4_per_tNH3_SMR"] df.loc["elec", sector] -= ammonia_total * params["MWh_elec_per_tNH3_SMR"] - # subtract chlorine demand + # subtract chlorine demand (in MtCl/a) chlorine_total = params["chlorine_production_today"] - df.loc["hydrogen", sector] -= chlorine_total * params["MWh_H2_per_tCl"] - df.loc["elec", sector] -= chlorine_total * params["MWh_elec_per_tCl"] + df.loc["hydrogen", sector] -= chlorine_total * params["MWh_H2_per_tCl"] * 1e3 + df.loc["elec", sector] -= chlorine_total * params["MWh_elec_per_tCl"] * 1e3 - # subtract methanol demand + # subtract methanol demand (in MtMeOH/a) methanol_total = params["methanol_production_today"] - df.loc["methane", sector] -= methanol_total * params["MWh_CH4_per_tMeOH"] - df.loc["elec", sector] -= methanol_total * params["MWh_elec_per_tMeOH"] + df.loc["methane", sector] -= methanol_total * params["MWh_CH4_per_tMeOH"] * 1e3 + df.loc["elec", sector] -= methanol_total * params["MWh_elec_per_tMeOH"] * 1e3 # MWh/t material df.loc[sources, sector] = df.loc[sources, sector] / s_out @@ -1464,6 +1464,7 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("build_industry_sector_ratios") + set_scenario_config(snakemake) # TODO make params option year = 2015 diff --git a/scripts/build_industry_sector_ratios_intermediate.py b/scripts/build_industry_sector_ratios_intermediate.py new file mode 100644 index 00000000..14e09505 --- /dev/null +++ b/scripts/build_industry_sector_ratios_intermediate.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build specific energy consumption by carrier and industries and by country, +that interpolates between the current average energy consumption (from +2015-2020) and the ideal future best-in-class consumption. +""" + +import pandas as pd +from prepare_sector_network import get + + +def build_industry_sector_ratios_intermediate(): + + # in TWh/a + demand = pd.read_csv( + snakemake.input.industrial_energy_demand_per_country_today, + header=[0, 1], + index_col=0, + ) + + # in Mt/a + production = ( + pd.read_csv(snakemake.input.industrial_production_per_country, index_col=0) + / 1e3 + ).stack() + production.index.names = [None, None] + + # in MWh/t + future_sector_ratios = pd.read_csv( + snakemake.input.industry_sector_ratios, index_col=0 + ) + + today_sector_ratios = demand.div(production, axis=1) + + today_sector_ratios.dropna(how="all", axis=1, inplace=True) + + rename = { + "waste": "biomass", + "electricity": "elec", + "solid": "coke", + "gas": "methane", + "other": "biomass", + "liquid": "naphtha", + } + today_sector_ratios = today_sector_ratios.rename(rename).groupby(level=0).sum() + + fraction_future = get(params["sector_ratios_fraction_future"], year) + + intermediate_sector_ratios = {} + for ct, group in today_sector_ratios.T.groupby(level=0): + today_sector_ratios_ct = ( + group.droplevel(0) + .T.reindex_like(future_sector_ratios) + .fillna(future_sector_ratios) + ) + intermediate_sector_ratios[ct] = ( + today_sector_ratios_ct * (1 - fraction_future) + + future_sector_ratios * fraction_future + ) + intermediate_sector_ratios = pd.concat(intermediate_sector_ratios, axis=1) + + intermediate_sector_ratios.to_csv(snakemake.output.industry_sector_ratios) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_industry_sector_ratios_intermediate", + planning_horizons="2030", + ) + + year = int(snakemake.wildcards.planning_horizons[-4:]) + + params = snakemake.params.industry + + build_industry_sector_ratios_intermediate() diff --git a/scripts/build_line_rating.py b/scripts/build_line_rating.py index 032ba39c..5b4642d1 100755 --- a/scripts/build_line_rating.py +++ b/scripts/build_line_rating.py @@ -50,7 +50,6 @@ With a heat balance considering the maximum temperature threshold of the transmi the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated. """ -import logging import re import atlite @@ -59,7 +58,7 @@ import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from shapely.geometry import LineString as Line from shapely.geometry import Point @@ -99,7 +98,7 @@ def calculate_line_rating(n, cutout): ------- xarray DataArray object with maximal power. """ - relevant_lines = n.lines[(n.lines["underground"] == False)] + relevant_lines = n.lines[~n.lines["underground"]].copy() buses = relevant_lines[["bus0", "bus1"]].values x = n.buses.x y = n.buses.y @@ -119,7 +118,7 @@ def calculate_line_rating(n, cutout): .apply(lambda x: int(re.findall(r"(\d+)-bundle", x)[0])) ) # Set default number of bundles per line - relevant_lines["n_bundle"].fillna(1, inplace=True) + relevant_lines["n_bundle"] = relevant_lines["n_bundle"].fillna(1) R *= relevant_lines["n_bundle"] R = calculate_resistance(T=353, R_ref=R) Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8) @@ -145,9 +144,12 @@ if __name__ == "__main__": opts="Co2L-4H", ) configure_logging(snakemake) + set_scenario_config(snakemake) + + snapshots = snakemake.params.snapshots n = pypsa.Network(snakemake.input.base_network) - time = pd.date_range(freq="h", **snakemake.config["snapshots"]) + time = pd.date_range(freq="h", **snapshots) cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) da = calculate_line_rating(n, cutout) diff --git a/scripts/build_monthly_prices.py b/scripts/build_monthly_prices.py index c2e88972..d35243c3 100644 --- a/scripts/build_monthly_prices.py +++ b/scripts/build_monthly_prices.py @@ -1,16 +1,13 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ -Created on Tue May 16 10:37:35 2023. - -This script extracts monthly fuel prices of oil, gas, coal and lignite, -as well as CO2 prices - +This script extracts monthly fuel prices of oil, gas, coal and lignite, as well +as CO2 prices. Inputs ------ @@ -46,7 +43,7 @@ Data was accessed at 16.5.2023 import logging import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config logger = logging.getLogger(__name__) @@ -114,6 +111,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_monthly_prices") configure_logging(snakemake) + set_scenario_config(snakemake) fuel_price = get_fuel_price() fuel_price.to_csv(snakemake.output.fuel_price) diff --git a/scripts/build_natura_raster.py b/scripts/build_natura_raster.py index 8fdb4ea3..2e2fcbbf 100644 --- a/scripts/build_natura_raster.py +++ b/scripts/build_natura_raster.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -46,7 +46,7 @@ import logging import atlite import geopandas as gpd import rasterio as rio -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from rasterio.features import geometry_mask from rasterio.warp import transform_bounds @@ -92,6 +92,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_natura_raster") configure_logging(snakemake) + set_scenario_config(snakemake) cutouts = snakemake.input.cutouts xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) diff --git a/scripts/build_population_layouts.py b/scripts/build_population_layouts.py index e864d925..5d7fbd4e 100644 --- a/scripts/build_population_layouts.py +++ b/scripts/build_population_layouts.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,14 +8,14 @@ Build mapping between cutout grid cells and population (total, urban, rural). import logging -logger = logging.getLogger(__name__) - - import atlite import geopandas as gpd import numpy as np import pandas as pd import xarray as xr +from _helpers import configure_logging, set_scenario_config + +logger = logging.getLogger(__name__) if __name__ == "__main__": if "snakemake" not in globals(): @@ -23,7 +23,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_population_layouts") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) cutout = atlite.Cutout(snakemake.input.cutout) @@ -34,7 +35,7 @@ if __name__ == "__main__": nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index") # Indicator matrix NUTS3 -> grid cells - I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) + I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) # noqa: E741 # Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity # but imprecisions mean not perfect @@ -84,7 +85,8 @@ if __name__ == "__main__": # correct for imprecision of Iinv*I pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum() - pop_cells_ct *= pop_ct / pop_cells_ct.sum() + if pop_cells_ct.sum() != 0: + pop_cells_ct *= pop_ct / pop_cells_ct.sum() # The first low density grid cells to reach rural fraction are rural asc_density_i = density_cells_ct.sort_values().index diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 879e3b9b..56133dc0 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,6 +7,7 @@ Distribute country-level energy demands by population. """ import pandas as pd +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): @@ -17,6 +18,7 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index d6553663..66a01624 100755 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -10,6 +10,7 @@ Retrieves conventional powerplant capacities and locations from these to buses and creates a ``.csv`` file. It is possible to amend the powerplant database with custom entries provided in ``data/custom_powerplants.csv``. +Lastly, for every substation, powerplants with zero-initial capacity can be added for certain fuel types automatically. Relevant Settings ----------------- @@ -19,6 +20,7 @@ Relevant Settings electricity: powerplants_filter: custom_powerplants: + everywhere_powerplants: .. seealso:: Documentation of the configuration file ``config/config.yaml`` at @@ -44,6 +46,7 @@ Description ----------- The configuration options ``electricity: powerplants_filter`` and ``electricity: custom_powerplants`` can be used to control whether data should be retrieved from the original powerplants database or from custom amendmends. These specify `pandas.query `_ commands. +In addition the configuration option ``electricity: everywhere_powerplants`` can be used to place powerplants with zero-initial capacity of certain fuel types at all substations. 1. Adding all powerplants from custom: @@ -73,14 +76,22 @@ The configuration options ``electricity: powerplants_filter`` and ``electricity: powerplants_filter: Country not in ['Germany'] and YearCommissioned <= 2015 custom_powerplants: YearCommissioned <= 2015 + +4. Adding powerplants at all substations for 4 conventional carrier types: + + .. code:: yaml + + everywhere_powerplants: ['Natural Gas', 'Coal', 'nuclear', 'OCGT'] """ +import itertools import logging +import numpy as np import pandas as pd import powerplantmatching as pm import pypsa -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from powerplantmatching.export import map_country_bus logger = logging.getLogger(__name__) @@ -97,6 +108,45 @@ def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False): ) +def add_everywhere_powerplants(ppl, substations, everywhere_powerplants): + # Create a dataframe with "everywhere_powerplants" of stated carriers at the location of all substations + everywhere_ppl = ( + pd.DataFrame( + itertools.product(substations.index.values, everywhere_powerplants), + columns=["substation_index", "Fueltype"], + ).merge( + substations[["x", "y", "country"]], + left_on="substation_index", + right_index=True, + ) + ).drop(columns="substation_index") + + # PPL uses different columns names compared to substations dataframe -> rename + everywhere_ppl = everywhere_ppl.rename( + columns={"x": "lon", "y": "lat", "country": "Country"} + ) + + # Add default values for the powerplants + everywhere_ppl["Name"] = ( + "Automatically added everywhere-powerplant " + everywhere_ppl.Fueltype + ) + everywhere_ppl["Set"] = "PP" + everywhere_ppl["Technology"] = everywhere_ppl["Fueltype"] + everywhere_ppl["Capacity"] = 0.0 + + # Assign plausible values for the commissioning and decommissioning years + # required for multi-year models + everywhere_ppl["DateIn"] = ppl["DateIn"].min() + everywhere_ppl["DateOut"] = ppl["DateOut"].max() + + # NaN values for efficiency will be replaced by the generic efficiency by attach_conventional_generators(...) in add_electricity.py later + everywhere_ppl["Efficiency"] = np.nan + + return pd.concat( + [ppl, everywhere_ppl], sort=False, ignore_index=True, verify_integrity=True + ) + + def replace_natural_gas_technology(df): mapping = {"Steam Turbine": "CCGT", "Combustion Engine": "OCGT"} tech = df.Technology.replace(mapping).fillna("CCGT") @@ -115,6 +165,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_powerplants") configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.base_network) countries = snakemake.params.countries @@ -149,6 +200,11 @@ if __name__ == "__main__": if countries_wo_ppl := set(countries) - set(ppl.Country.unique()): logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}") + # Add "everywhere powerplants" to all bus locations + ppl = add_everywhere_powerplants( + ppl, n.buses.query("substation_lv"), snakemake.params.everywhere_powerplants + ) + substations = n.buses.query("substation_lv") ppl = ppl.dropna(subset=["lat", "lon"]) ppl = map_country_bus(ppl, substations) diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 3a1c525e..fd97aef1 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -26,20 +26,9 @@ Relevant settings renewable: {technology}: - cutout: - corine: - grid_codes: - distance: - natura: - max_depth: - max_shore_distance: - min_shore_distance: - capacity_per_sqkm: - correction_factor: - potential: - min_p_max_pu: - clip_p_max_pu: - resource: + cutout: corine: luisa: grid_codes: distance: natura: max_depth: + max_shore_distance: min_shore_distance: capacity_per_sqkm: + correction_factor: min_p_max_pu: clip_p_max_pu: resource: .. seealso:: Documentation of the configuration file ``config/config.yaml`` at @@ -48,21 +37,37 @@ Relevant settings Inputs ------ -- ``data/bundle/corine/g250_clc06_V18_5.tif``: `CORINE Land Cover (CLC) `_ inventory on `44 classes `_ of land use (e.g. forests, arable land, industrial, urban areas). +- ``data/bundle/corine/g250_clc06_V18_5.tif``: `CORINE Land Cover (CLC) + `_ inventory on `44 + classes `_ of + land use (e.g. forests, arable land, industrial, urban areas) at 100m + resolution. .. image:: img/corine.png :scale: 33 % -- ``data/bundle/GEBCO_2014_2D.nc``: A `bathymetric `_ data set with a global terrain model for ocean and land at 15 arc-second intervals by the `General Bathymetric Chart of the Oceans (GEBCO) `_. +- ``data/LUISA_basemap_020321_50m.tif``: `LUISA Base Map + `_ land + coverage dataset at 50m resolution similar to CORINE. For codes in relation to + CORINE land cover, see `Annex 1 of the technical documentation + `_. + +- ``data/bundle/GEBCO_2014_2D.nc``: A `bathymetric + `_ data set with a global terrain + model for ocean and land at 15 arc-second intervals by the `General + Bathymetric Chart of the Oceans (GEBCO) + `_. .. image:: img/gebco_2019_grid_image.jpg :scale: 50 % - **Source:** `GEBCO `_ + **Source:** `GEBCO + `_ - ``resources/natura.tiff``: confer :ref:`natura` - ``resources/offshore_shapes.geojson``: confer :ref:`shapes` -- ``resources/regions_onshore.geojson``: (if not offshore wind), confer :ref:`busregions` +- ``resources/regions_onshore.geojson``: (if not offshore wind), confer + :ref:`busregions` - ``resources/regions_offshore.geojson``: (if offshore wind), :ref:`busregions` - ``"cutouts/" + params["renewable"][{technology}]['cutout']``: :ref:`cutout` - ``networks/base.nc``: :ref:`base` @@ -128,25 +133,26 @@ Description This script functions at two main spatial resolutions: the resolution of the network nodes and their `Voronoi cells `_, and the resolution of the -cutout grid cells for the weather data. Typically the weather data grid is -finer than the network nodes, so we have to work out the distribution of -generators across the grid cells within each Voronoi cell. This is done by -taking account of a combination of the available land at each grid cell and the -capacity factor there. +cutout grid cells for the weather data. Typically the weather data grid is finer +than the network nodes, so we have to work out the distribution of generators +across the grid cells within each Voronoi cell. This is done by taking account +of a combination of the available land at each grid cell and the capacity factor +there. First the script computes how much of the technology can be installed at each -cutout grid cell and each node using the `GLAES -`_ library. This uses the CORINE land use data, -Natura2000 nature reserves and GEBCO bathymetry data. +cutout grid cell and each node using the `atlite +`_ library. This uses the CORINE land use data, +LUISA land use data, Natura2000 nature reserves, GEBCO bathymetry data, and +shipping lanes. .. image:: img/eligibility.png :scale: 50 % :align: center -To compute the layout of generators in each node's Voronoi cell, the -installable potential in each grid cell is multiplied with the capacity factor -at each grid cell. This is done since we assume more generators are installed -at cells with a higher capacity factor. +To compute the layout of generators in each node's Voronoi cell, the installable +potential in each grid cell is multiplied with the capacity factor at each grid +cell. This is done since we assume more generators are installed at cells with a +higher capacity factor. .. image:: img/offwinddc-gridcell.png :scale: 50 % @@ -164,20 +170,14 @@ at cells with a higher capacity factor. :scale: 50 % :align: center -This layout is then used to compute the generation availability time series -from the weather data cutout from ``atlite``. +This layout is then used to compute the generation availability time series from +the weather data cutout from ``atlite``. -Two methods are available to compute the maximal installable potential for the -node (`p_nom_max`): ``simple`` and ``conservative``: - -- ``simple`` adds up the installable potentials of the individual grid cells. - If the model comes close to this limit, then the time series may slightly - overestimate production since it is assumed the geographical distribution is - proportional to capacity factor. - -- ``conservative`` assertains the nodal limit by increasing capacities - proportional to the layout until the limit of an individual grid cell is - reached. +The maximal installable potential for the node (`p_nom_max`) is computed by +adding up the installable potentials of the individual grid cells. If the model +comes close to this limit, then the time series may slightly overestimate +production since it is assumed the geographical distribution is proportional to +capacity factor. """ import functools import logging @@ -188,7 +188,7 @@ import geopandas as gpd import numpy as np import pandas as pd import xarray as xr -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from dask.distributed import Client from pypsa.geo import haversine from shapely.geometry import LineString @@ -200,20 +200,25 @@ if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake("build_renewable_profiles", technology="solar") + snakemake = mock_snakemake("build_renewable_profiles", technology="offwind-dc") configure_logging(snakemake) + set_scenario_config(snakemake) nprocesses = int(snakemake.threads) noprogress = snakemake.config["run"].get("disable_progressbar", True) noprogress = noprogress or not snakemake.config["atlite"]["show_progress"] params = snakemake.params.renewable[snakemake.wildcards.technology] resource = params["resource"] # pv panel params / wind turbine params + + tech = next(t for t in ["panel", "turbine"] if t in resource) + models = resource[tech] + if not isinstance(models, dict): + models = {0: models} + resource[tech] = models[next(iter(models))] + correction_factor = params.get("correction_factor", 1.0) capacity_per_sqkm = params["capacity_per_sqkm"] - p_nom_max_meth = params.get("potential", "conservative") - - if isinstance(params.get("corine", {}), list): - params["corine"] = {"grid_codes": params["corine"]} + snapshots = snakemake.params.snapshots if correction_factor != 1.0: logger.info(f"correction_factor is set as {correction_factor}") @@ -223,7 +228,7 @@ if __name__ == "__main__": else: client = None - sns = pd.date_range(freq="h", **snakemake.config["snapshots"]) + sns = pd.date_range(freq="h", **snapshots) cutout = atlite.Cutout(snakemake.input.cutout).sel(time=sns) regions = gpd.read_file(snakemake.input.regions) assert not regions.empty, ( @@ -240,16 +245,29 @@ if __name__ == "__main__": if params["natura"]: excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True) - corine = params.get("corine", {}) - if "grid_codes" in corine: - codes = corine["grid_codes"] - excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035) - if corine.get("distance", 0.0) > 0.0: - codes = corine["distance_grid_codes"] - buffer = corine["distance"] - excluder.add_raster( - snakemake.input.corine, codes=codes, buffer=buffer, crs=3035 - ) + for dataset in ["corine", "luisa"]: + kwargs = {"nodata": 0} if dataset == "luisa" else {} + settings = params.get(dataset, {}) + if not settings: + continue + if dataset == "luisa" and res > 50: + logger.info( + "LUISA data is available at 50m resolution, " + f"but coarser {res}m resolution is used." + ) + if isinstance(settings, list): + settings = {"grid_codes": settings} + if "grid_codes" in settings: + codes = settings["grid_codes"] + excluder.add_raster( + snakemake.input[dataset], codes=codes, invert=True, crs=3035, **kwargs + ) + if settings.get("distance", 0.0) > 0.0: + codes = settings["distance_grid_codes"] + buffer = settings["distance"] + excluder.add_raster( + snakemake.input[dataset], codes=codes, buffer=buffer, crs=3035, **kwargs + ) if params.get("ship_threshold"): shipping_threshold = ( @@ -277,15 +295,14 @@ if __name__ == "__main__": snakemake.input.country_shapes, buffer=buffer, invert=True ) + logger.info("Calculate landuse availability...") + start = time.time() + kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress) - if noprogress: - logger.info("Calculate landuse availabilities...") - start = time.time() - availability = cutout.availabilitymatrix(regions, excluder, **kwargs) - duration = time.time() - start - logger.info(f"Completed availability calculation ({duration:2.2f}s)") - else: - availability = cutout.availabilitymatrix(regions, excluder, **kwargs) + availability = cutout.availabilitymatrix(regions, excluder, **kwargs) + + duration = time.time() - start + logger.info(f"Completed landuse availability calculation ({duration:2.2f}s)") # For Moldova and Ukraine: Overwrite parts not covered by Corine with # externally determined available areas @@ -304,28 +321,53 @@ if __name__ == "__main__": func = getattr(cutout, resource.pop("method")) if client is not None: resource["dask_kwargs"] = {"scheduler": client} + + logger.info("Calculate average capacity factor...") + start = time.time() + capacity_factor = correction_factor * func(capacity_factor=True, **resource) layout = capacity_factor * area * capacity_per_sqkm - profile, capacities = func( - matrix=availability.stack(spatial=["y", "x"]), - layout=layout, - index=buses, - per_unit=True, - return_capacity=True, - **resource, - ) - logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')") - if p_nom_max_meth == "simple": - p_nom_max = capacity_per_sqkm * availability @ area - elif p_nom_max_meth == "conservative": - max_cap_factor = capacity_factor.where(availability != 0).max(["x", "y"]) - p_nom_max = capacities / max_cap_factor - else: - raise AssertionError( - 'Config key `potential` should be one of "simple" ' - f'(default) or "conservative", not "{p_nom_max_meth}"' + duration = time.time() - start + logger.info(f"Completed average capacity factor calculation ({duration:2.2f}s)") + + profiles = [] + capacities = [] + for year, model in models.items(): + + logger.info( + f"Calculate weighted capacity factor time series for model {model}..." ) + start = time.time() + + resource[tech] = model + + profile, capacity = func( + matrix=availability.stack(spatial=["y", "x"]), + layout=layout, + index=buses, + per_unit=True, + return_capacity=True, + **resource, + ) + + dim = {"year": [year]} + profile = profile.expand_dims(dim) + capacity = capacity.expand_dims(dim) + + profiles.append(profile.rename("profile")) + capacities.append(capacity.rename("weight")) + + duration = time.time() - start + logger.info( + f"Completed weighted capacity factor time series calculation for model {model} ({duration:2.2f}s)" + ) + + profiles = xr.merge(profiles) + capacities = xr.merge(capacities) + + logger.info("Calculating maximal capacity per bus") + p_nom_max = capacity_per_sqkm * availability @ area logger.info("Calculate average distances.") layoutmatrix = (layout * availability).stack(spatial=["y", "x"]) @@ -349,8 +391,8 @@ if __name__ == "__main__": ds = xr.merge( [ - (correction_factor * profile).rename("profile"), - capacities.rename("weight"), + correction_factor * profiles, + capacities, p_nom_max.rename("p_nom_max"), potential.rename("potential"), average_distance.rename("average_distance"), @@ -370,9 +412,13 @@ if __name__ == "__main__": ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses]) # select only buses with some capacity and minimal capacity factor + mean_profile = ds["profile"].mean("time") + if "year" in ds.indexes: + mean_profile = mean_profile.max("year") + ds = ds.sel( bus=( - (ds["profile"].mean("time") > params.get("min_p_max_pu", 0.0)) + (mean_profile > params.get("min_p_max_pu", 0.0)) & (ds["p_nom_max"] > params.get("min_p_nom_max", 0.0)) ) ) diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py old mode 100644 new mode 100755 index f5313c21..52f545e9 --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -68,6 +68,7 @@ The script has the following structure: """ import pandas as pd import xarray as xr +from _helpers import set_scenario_config # (i) --- FIXED PARAMETER / STANDARD VALUES ----------------------------------- @@ -297,8 +298,8 @@ def prepare_building_stock_data(): errors="ignore", ) - u_values.subsector.replace(rename_sectors, inplace=True) - u_values.btype.replace(rename_sectors, inplace=True) + u_values["subsector"] = u_values.subsector.replace(rename_sectors) + u_values["btype"] = u_values.btype.replace(rename_sectors) # for missing weighting of surfaces of building types assume MFH u_values["assumed_subsector"] = u_values.subsector @@ -306,8 +307,8 @@ def prepare_building_stock_data(): ~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector" ] = "MFH" - u_values.country_code.replace({"UK": "GB"}, inplace=True) - u_values.bage.replace({"Berfore 1945": "Before 1945"}, inplace=True) + u_values["country_code"] = u_values.country_code.replace({"UK": "GB"}) + u_values["bage"] = u_values.bage.replace({"Berfore 1945": "Before 1945"}) u_values = u_values[~u_values.bage.isna()] u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True) @@ -533,16 +534,16 @@ def prepare_temperature_data(): """ temperature = xr.open_dataarray(snakemake.input.air_temperature).to_pandas() d_heat = ( - temperature.groupby(temperature.columns.str[:2], axis=1) + temperature.T.groupby(temperature.columns.str[:2]) .mean() - .resample("1D") + .T.resample("1D") .mean() < t_threshold ).sum() temperature_average_d_heat = ( - temperature.groupby(temperature.columns.str[:2], axis=1) + temperature.T.groupby(temperature.columns.str[:2]) .mean() - .apply( + .T.apply( lambda x: get_average_temperature_during_heating_season(x, t_threshold=15) ) ) @@ -554,7 +555,7 @@ def prepare_temperature_data(): # windows --------------------------------------------------------------- -def window_limit(l, window_assumptions): +def window_limit(l, window_assumptions): # noqa: E741 """ Define limit u value from which on window is retrofitted. """ @@ -567,7 +568,7 @@ def window_limit(l, window_assumptions): return m * l + a -def u_retro_window(l, window_assumptions): +def u_retro_window(l, window_assumptions): # noqa: E741 """ Define retrofitting value depending on renovation strength. """ @@ -580,7 +581,7 @@ def u_retro_window(l, window_assumptions): return max(m * l + a, 0.8) -def window_cost(u, cost_retro, window_assumptions): +def window_cost(u, cost_retro, window_assumptions): # noqa: E741 """ Get costs for new windows depending on u value. """ @@ -600,33 +601,40 @@ def window_cost(u, cost_retro, window_assumptions): return window_cost -def calculate_costs(u_values, l, cost_retro, window_assumptions): +def calculate_costs(u_values, l, cost_retro, window_assumptions): # noqa: E741 """ Returns costs for a given retrofitting strength weighted by the average surface/volume ratio of the component for each building type. """ return u_values.apply( lambda x: ( - cost_retro.loc[x.name[3], "cost_var"] - * 100 - * float(l) - * l_weight.loc[x.name[3]][0] - + cost_retro.loc[x.name[3], "cost_fix"] - ) - * x.A_element - / x.A_C_Ref - if x.name[3] != "Window" - else ( - (window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) * x.A_element) + ( + cost_retro.loc[x.name[3], "cost_var"] + * 100 + * float(l) + * l_weight.loc[x.name[3]].iloc[0] + + cost_retro.loc[x.name[3], "cost_fix"] + ) + * x.A_element / x.A_C_Ref - ) - if x.value > window_limit(float(l), window_assumptions) - else 0, + if x.name[3] != "Window" + else ( + ( + ( + window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) + * x.A_element + ) + / x.A_C_Ref + ) + if x.value > window_limit(float(l), window_assumptions) + else 0 + ) + ), axis=1, ) -def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): +def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): # noqa: E741 """ Calculate U-values after building retrofitting, depending on the old U-values (u_values). This is for simple insulation measuers, adding an @@ -648,12 +656,14 @@ def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): k: thermal conductivity """ return u_values.apply( - lambda x: k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) - if x.name[3] != "Window" - else ( - min(x.value, u_retro_window(float(l), window_assumptions)) - if x.value > window_limit(float(l), window_assumptions) - else x.value + lambda x: ( + k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) + if x.name[3] != "Window" + else ( + min(x.value, u_retro_window(float(l), window_assumptions)) + if x.value > window_limit(float(l), window_assumptions) + else x.value + ) ), axis=1, ) @@ -720,6 +730,7 @@ def map_to_lstrength(l_strength, df): .swaplevel(axis=1) .dropna(axis=1) ) + return pd.concat([df.drop([2, 3], axis=1, level=1), l_strength_df], axis=1) @@ -745,7 +756,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) """ # (1) by transmission # calculate new U values of building elements due to additional insulation - for l in l_strength: + for l in l_strength: # noqa: E741 u_values[f"new_U_{l}"] = calculate_new_u( u_values, l, l_weight, window_assumptions ) @@ -800,6 +811,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) * data_tabula.A_envelope / data_tabula.A_C_Ref ) + heat_transfer_perm2 = pd.concat( [ heat_transfer_perm2, @@ -836,9 +848,9 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) F_red_temp = map_to_lstrength(l_strength, F_red_temp) Q_ht = ( - heat_transfer_perm2.groupby(level=1, axis=1) + heat_transfer_perm2.T.groupby(level=1) .sum() - .mul(F_red_temp.droplevel(0, axis=1)) + .T.mul(F_red_temp.droplevel(0, axis=1)) .mul(temperature_factor.reindex(heat_transfer_perm2.index, level=0), axis=0) ) @@ -878,7 +890,7 @@ def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain): Calculates gain utilisation factor nu. """ # time constant of the building tau [h] = c_m [Wh/(m^2K)] * 1 /(H_tr_e+H_tb*H_ve) [m^2 K /W] - tau = c_m / heat_transfer_perm2.groupby(level=1, axis=1).sum() + tau = c_m / heat_transfer_perm2.T.groupby(axis=1).sum().T alpha = alpha_H_0 + (tau / tau_H_0) # heat balance ratio gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0) @@ -1042,6 +1054,7 @@ if __name__ == "__main__": ll="v1.0", sector_opts="Co2L0-168H-T-H-B-I-solar3-dist1", ) + set_scenario_config(snakemake) # ******** config ********************************************************* diff --git a/scripts/build_salt_cavern_potentials.py b/scripts/build_salt_cavern_potentials.py index ed039772..f2c2ce8f 100644 --- a/scripts/build_salt_cavern_potentials.py +++ b/scripts/build_salt_cavern_potentials.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -24,6 +24,7 @@ onshore (>50km from sea), offshore (Figure 7). import geopandas as gpd import pandas as pd +from _helpers import set_scenario_config def concat_gdf(gdf_list, crs="EPSG:4326"): @@ -77,6 +78,8 @@ if __name__ == "__main__": "build_salt_cavern_potentials", simpl="", clusters="37" ) + set_scenario_config(snakemake) + fn_onshore = snakemake.input.regions_onshore fn_offshore = snakemake.input.regions_offshore diff --git a/scripts/build_sequestration_potentials.py b/scripts/build_sequestration_potentials.py index f6ad3526..106c1271 100644 --- a/scripts/build_sequestration_potentials.py +++ b/scripts/build_sequestration_potentials.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -10,6 +10,7 @@ database_en>`_. import geopandas as gpd import pandas as pd +from _helpers import set_scenario_config def area(gdf): @@ -39,6 +40,8 @@ if __name__ == "__main__": "build_sequestration_potentials", simpl="", clusters="181" ) + set_scenario_config(snakemake) + cf = snakemake.params.sequestration_potential gdf = gpd.read_file(snakemake.input.sequestration_potential[0]) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 35bae147..fd64411a 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -77,7 +77,7 @@ import geopandas as gpd import numpy as np import pandas as pd import pycountry as pyc -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from shapely.geometry import MultiPolygon, Polygon logger = logging.getLogger(__name__) @@ -158,7 +158,7 @@ def country_cover(country_shapes, eez_shapes=None): shapes = pd.concat([shapes, eez_shapes]) europe_shape = shapes.unary_union if isinstance(europe_shape, MultiPolygon): - europe_shape = max(europe_shape, key=attrgetter("area")) + europe_shape = max(europe_shape.geoms, key=attrgetter("area")) return Polygon(shell=europe_shape.exterior) @@ -254,6 +254,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_shapes") configure_logging(snakemake) + set_scenario_config(snakemake) country_shapes = countries(snakemake.input.naturalearth, snakemake.params.countries) country_shapes.reset_index().to_file(snakemake.output.country_shapes) diff --git a/scripts/build_ship_raster.py b/scripts/build_ship_raster.py index 90e006b0..c8285180 100644 --- a/scripts/build_ship_raster.py +++ b/scripts/build_ship_raster.py @@ -42,11 +42,11 @@ Description """ import logging -import os import zipfile +from pathlib import Path import rioxarray -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from build_natura_raster import determine_cutout_xXyY logger = logging.getLogger(__name__) @@ -57,16 +57,19 @@ if __name__ == "__main__": snakemake = mock_snakemake("build_ship_raster") configure_logging(snakemake) + set_scenario_config(snakemake) cutouts = snakemake.input.cutouts xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) with zipfile.ZipFile(snakemake.input.ship_density) as zip_f: - zip_f.extract("shipdensity_global.tif") - with rioxarray.open_rasterio("shipdensity_global.tif") as ship_density: - ship_density = ship_density.drop(["band"]).sel( - x=slice(min(xs), max(Xs)), y=slice(max(Ys), min(ys)) - ) - ship_density.rio.to_raster(snakemake.output[0]) + resources = Path(snakemake.output[0]).parent + fn = "shipdensity_global.tif" + zip_f.extract(fn, resources) + with rioxarray.open_rasterio(resources / fn) as ship_density: + ship_density = ship_density.drop_vars(["band"]).sel( + x=slice(min(xs), max(Xs)), y=slice(max(Ys), min(ys)) + ) + ship_density.rio.to_raster(snakemake.output[0]) - os.remove("shipdensity_global.tif") + (resources / fn).unlink() diff --git a/scripts/build_shipping_demand.py b/scripts/build_shipping_demand.py index 8000c66c..60d36ac7 100644 --- a/scripts/build_shipping_demand.py +++ b/scripts/build_shipping_demand.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ import json import geopandas as gpd import pandas as pd +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): @@ -21,6 +22,7 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) scope = gpd.read_file(snakemake.input.scope).geometry[0] regions = gpd.read_file(snakemake.input.regions).set_index("name") diff --git a/scripts/build_solar_thermal_profiles.py b/scripts/build_solar_thermal_profiles.py index d285691a..3c460709 100644 --- a/scripts/build_solar_thermal_profiles.py +++ b/scripts/build_solar_thermal_profiles.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ import geopandas as gpd import numpy as np import pandas as pd import xarray as xr +from _helpers import set_scenario_config from dask.distributed import Client, LocalCluster if __name__ == "__main__": @@ -22,6 +23,7 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) nprocesses = int(snakemake.threads) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) @@ -33,10 +35,7 @@ if __name__ == "__main__": cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) clustered_regions = ( - gpd.read_file(snakemake.input.regions_onshore) - .set_index("name") - .buffer(0) - .squeeze() + gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) I = cutout.indicatormatrix(clustered_regions) diff --git a/scripts/build_temperature_profiles.py b/scripts/build_temperature_profiles.py index 9db37c25..5b736a26 100644 --- a/scripts/build_temperature_profiles.py +++ b/scripts/build_temperature_profiles.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,6 +11,7 @@ import geopandas as gpd import numpy as np import pandas as pd import xarray as xr +from _helpers import set_scenario_config from dask.distributed import Client, LocalCluster if __name__ == "__main__": @@ -22,6 +23,7 @@ if __name__ == "__main__": simpl="", clusters=48, ) + set_scenario_config(snakemake) nprocesses = int(snakemake.threads) cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) @@ -31,13 +33,10 @@ if __name__ == "__main__": cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) clustered_regions = ( - gpd.read_file(snakemake.input.regions_onshore) - .set_index("name") - .buffer(0) - .squeeze() + gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop_layout = xr.open_dataarray(snakemake.input.pop_layout) diff --git a/scripts/build_transport_demand.py b/scripts/build_transport_demand.py index 0bcfb7ed..de561e3f 100644 --- a/scripts/build_transport_demand.py +++ b/scripts/build_transport_demand.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,10 +8,14 @@ improvements due to drivetrain changes, time series for electric vehicle availability and demand-side management constraints. """ +import logging + import numpy as np import pandas as pd import xarray as xr -from _helpers import generate_periodic_profiles +from _helpers import configure_logging, generate_periodic_profiles, set_scenario_config + +logger = logging.getLogger(__name__) def build_nodal_transport_data(fn, pop_layout): @@ -130,6 +134,12 @@ def bev_availability_profile(fn, snapshots, nodes, options): traffic.mean() - traffic.min() ) + if not avail[avail < 0].empty: + logger.warning( + "The BEV availability weekly profile has negative values which can " + "lead to infeasibility." + ) + return generate_periodic_profiles( dt_index=snapshots, nodes=nodes, @@ -160,6 +170,8 @@ if __name__ == "__main__": simpl="", clusters=48, ) + configure_logging(snakemake) + set_scenario_config(snakemake) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) diff --git a/scripts/cluster_gas_network.py b/scripts/cluster_gas_network.py index e7554dff..19585aa9 100755 --- a/scripts/cluster_gas_network.py +++ b/scripts/cluster_gas_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,14 +8,14 @@ Cluster gas transmission network to clustered model regions. import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd -from packaging.version import Version, parse +from _helpers import configure_logging, set_scenario_config from pypsa.geo import haversine_pts from shapely import wkt +logger = logging.getLogger(__name__) + def concat_gdf(gdf_list, crs="EPSG:4326"): """ @@ -41,12 +41,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): for i in [0, 1]: gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326") - kws = ( - dict(op="within") - if parse(gpd.__version__) < Version("0.10") - else dict(predicate="within") - ) - bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right + bus_mapping = gpd.sjoin( + gdf, bus_regions, how="left", predicate="within" + ).index_right bus_mapping = bus_mapping.groupby(bus_mapping.index).first() df[f"bus{i}"] = bus_mapping @@ -75,10 +72,10 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): return df -def reindex_pipes(df): +def reindex_pipes(df, prefix="gas pipeline"): def make_index(x): connector = " <-> " if x.bidirectional else " -> " - return "gas pipeline " + x.bus0 + connector + x.bus1 + return prefix + " " + x.bus0 + connector + x.bus1 df.index = df.apply(make_index, axis=1) @@ -109,8 +106,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37") - - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) fn = snakemake.input.cleaned_gas_network df = pd.read_csv(fn, index_col=0) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 28f08396..757184b2 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -122,17 +122,20 @@ Exemplary unsolved network clustered to 37 nodes: """ import logging +import os import warnings from functools import reduce import geopandas as gpd +import linopy import matplotlib.pyplot as plt import numpy as np import pandas as pd -import pyomo.environ as po import pypsa import seaborn as sns -from _helpers import configure_logging, update_p_nom_max +from _helpers import configure_logging, set_scenario_config, update_p_nom_max +from add_electricity import load_costs +from packaging.version import Version, parse from pypsa.clustering.spatial import ( busmap_by_greedy_modularity, busmap_by_hac, @@ -140,12 +143,10 @@ from pypsa.clustering.spatial import ( get_clustering_from_busmap, ) +PD_GE_2_2 = parse(pd.__version__) >= Version("2.2") + warnings.filterwarnings(action="ignore", category=UserWarning) - -from add_electricity import load_costs - idx = pd.IndexSlice - logger = logging.getLogger(__name__) @@ -217,7 +218,7 @@ def get_feature_for_hac(n, buses_i=None, feature=None): return feature_data -def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): +def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="scip"): """ Determine the number of clusters per country. """ @@ -257,31 +258,22 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): L.sum(), 1.0, rtol=1e-3 ), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." - m = po.ConcreteModel() - - def n_bounds(model, *n_id): - return (1, N[n_id]) - - m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers) - m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters)) - m.objective = po.Objective( - expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index), - sense=po.minimize, + m = linopy.Model() + clusters = m.add_variables( + lower=1, upper=N, coords=[L.index], name="n", integer=True ) - - opt = po.SolverFactory(solver_name) - if solver_name == "appsi_highs" or not opt.has_capability("quadratic_objective"): - logger.warning( - f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`." + m.add_constraints(clusters.sum() == n_clusters, name="tot") + # leave out constant in objective (L * n_clusters) ** 2 + m.objective = (clusters * clusters - 2 * clusters * L * n_clusters).sum() + if solver_name == "gurobi": + logging.getLogger("gurobipy").propagate = False + elif solver_name not in ["scip", "cplex"]: + logger.info( + f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `scip`." ) - opt = po.SolverFactory("ipopt") - - results = opt.solve(m) - assert ( - results["Solver"][0]["Status"] == "ok" - ), f"Solver returned non-optimally: {results}" - - return pd.Series(m.n.get_values(), index=L.index).round().astype(int) + solver_name = "scip" + m.solve(solver_name=solver_name) + return m.solution["n"].to_series().astype(int) def busmap_for_n_clusters( @@ -373,9 +365,11 @@ def busmap_for_n_clusters( f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}." ) + compat_kws = dict(include_groups=False) if PD_GE_2_2 else {} + return ( n.buses.groupby(["country", "sub_network"], group_keys=False) - .apply(busmap_for_country) + .apply(busmap_for_country, **compat_kws) .squeeze() .rename("busmap") ) @@ -388,7 +382,7 @@ def clustering_for_n_clusters( aggregate_carriers=None, line_length_factor=1.25, aggregation_strategies=dict(), - solver_name="cbc", + solver_name="scip", algorithm="hac", feature=None, extended_link_costs=0, @@ -462,10 +456,10 @@ if __name__ == "__main__": snakemake = mock_snakemake("cluster_network", simpl="", clusters="37") configure_logging(snakemake) + set_scenario_config(snakemake) params = snakemake.params solver_name = snakemake.config["solving"]["solver"]["name"] - solver_name = "appsi_highs" if solver_name == "highs" else solver_name n = pypsa.Network(snakemake.input.network) @@ -500,7 +494,9 @@ if __name__ == "__main__": gens.efficiency, bins=[0, low, high, 1], labels=labels ).astype(str) carriers += [f"{c} {label} efficiency" for label in labels] - n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency") + n.generators.update( + {"carrier": gens.carrier + " " + suffix + " efficiency"} + ) aggregate_carriers = carriers if n_clusters == len(n.buses): diff --git a/scripts/copy_config.py b/scripts/copy_config.py index a549d893..0a5fe10b 100644 --- a/scripts/copy_config.py +++ b/scripts/copy_config.py @@ -1,15 +1,14 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ Copy used configuration files and important scripts for archiving. """ -from pathlib import Path -from shutil import copy import yaml +from _helpers import set_scenario_config if __name__ == "__main__": if "snakemake" not in globals(): @@ -17,6 +16,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("copy_config") + set_scenario_config(snakemake) + with open(snakemake.output[0], "w") as yaml_file: yaml.dump( snakemake.config, diff --git a/scripts/determine_availability_matrix_MD_UA.py b/scripts/determine_availability_matrix_MD_UA.py index 8d10f45d..80c04083 100644 --- a/scripts/determine_availability_matrix_MD_UA.py +++ b/scripts/determine_availability_matrix_MD_UA.py @@ -1,7 +1,10 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +""" +Create land elibility analysis for Ukraine and Moldova with different datasets. +""" import functools import logging @@ -12,7 +15,7 @@ import fiona import geopandas as gpd import matplotlib.pyplot as plt import numpy as np -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from atlite.gis import shape_availability from rasterio.plot import show @@ -35,6 +38,7 @@ if __name__ == "__main__": "determine_availability_matrix_MD_UA", technology="solar" ) configure_logging(snakemake) + set_scenario_config(snakemake) nprocesses = None # snakemake.config["atlite"].get("nprocesses") noprogress = not snakemake.config["atlite"].get("show_progress", True) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index fb13e91e..18642afc 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,18 +8,16 @@ capacity factors, curtailment, energy balances, prices and other metrics. """ import logging - -logger = logging.getLogger(__name__) - import sys import numpy as np import pandas as pd import pypsa +from _helpers import configure_logging, set_scenario_config from prepare_sector_network import prepare_costs idx = pd.IndexSlice - +logger = logging.getLogger(__name__) opt_name = {"Store": "e", "Line": "s", "Transformer": "s"} @@ -509,21 +507,15 @@ def calculate_weighted_prices(n, label, weighted_prices): if carrier in ["H2", "gas"]: load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0) - elif carrier[:5] == "space": - load = heat_demand_df[buses.str[:2]].rename( - columns=lambda i: str(i) + suffix - ) else: - load = n.loads_t.p_set[buses] + load = n.loads_t.p_set[buses.intersection(n.loads.index)] for tech in value: names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] if not names.empty: load += ( - n.links_t.p0[names] - .groupby(n.links.loc[names, "bus0"], axis=1) - .sum() + n.links_t.p0[names].T.groupby(n.links.loc[names, "bus0"]).sum().T ) # Add H2 Store when charging @@ -563,14 +555,16 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.generators_t.p[gens] - .groupby(n.generators.loc[gens, "bus"], axis=1) + .T.groupby(n.generators.loc[gens, "bus"]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) - revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan ## Now do market value of links ## @@ -586,14 +580,17 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.links_t["p" + i][links] - .groupby(n.links.loc[links, "bus" + i], axis=1) + .T.groupby(n.links.loc[links, "bus" + i]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan return market_values @@ -677,7 +674,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("make_summary") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) networks_dict = { (cluster, ll, opt + sector_opt, planning_horizon): "results/" diff --git a/scripts/make_summary_perfect.py b/scripts/make_summary_perfect.py index c387c6cf..76bd4ad0 100644 --- a/scripts/make_summary_perfect.py +++ b/scripts/make_summary_perfect.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -12,15 +12,13 @@ other metrics. import numpy as np import pandas as pd import pypsa -from make_summary import ( - assign_carriers, - assign_locations, - calculate_cfs, - calculate_nodal_cfs, - calculate_nodal_costs, -) +from _helpers import set_scenario_config +from make_summary import calculate_cfs # noqa: F401 +from make_summary import calculate_nodal_cfs # noqa: F401 +from make_summary import calculate_nodal_costs # noqa: F401 +from make_summary import assign_carriers, assign_locations from prepare_sector_network import prepare_costs -from pypsa.descriptors import get_active_assets, nominal_attrs +from pypsa.descriptors import get_active_assets from six import iteritems idx = pd.IndexSlice @@ -249,8 +247,9 @@ def calculate_energy(n, label, energy): .groupby(level=0) .sum() .multiply(c.df.sign) - .groupby(c.df.carrier, axis=1) + .T.groupby(c.df.carrier) .sum() + .T ) else: c_energies = pd.DataFrame( @@ -268,7 +267,7 @@ def calculate_energy(n, label, energy): totals[no_bus] = float( n.component_attrs[c.name].loc["p" + port, "default"] ) - c_energies -= totals.groupby(c.df.carrier, axis=1).sum() + c_energies -= totals.T.groupby(c.df.carrier).sum().T c_energies = pd.concat([c_energies.T], keys=[c.list_name]) @@ -379,9 +378,8 @@ def calculate_supply_energy(n, label, supply_energy): .groupby(level=0) .sum() .multiply(c.df.loc[items, "sign"]) - .groupby(c.df.loc[items, "carrier"], axis=1) + .T.groupby(c.df.loc[items, "carrier"]) .sum() - .T ) s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) @@ -398,16 +396,9 @@ def calculate_supply_energy(n, label, supply_energy): if len(items) == 0: continue - s = ( - (-1) - * c.pnl["p" + end] - .reindex(items, axis=1) - .multiply(n.snapshot_weightings.objective, axis=0) - .groupby(level=0) - .sum() - .groupby(c.df.loc[items, "carrier"], axis=1) - .sum() - ).T + s = (-1) * c.pnl["p" + end].reindex(items, axis=1).multiply( + n.snapshot_weightings.objective, axis=0 + ).groupby(level=0).sum().T.groupby(c.df.loc[items, "carrier"]).sum() s.index = s.index + end s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) @@ -502,7 +493,7 @@ def calculate_weighted_prices(n, label, weighted_prices): else: suffix = " " + carrier - buses = n.buses.index[n.buses.index.str[2:] == suffix] + buses = n.buses.index[n.buses.index.str[5:] == suffix] if buses.empty: continue @@ -513,14 +504,14 @@ def calculate_weighted_prices(n, label, weighted_prices): else n.loads_t.p_set.reindex(buses, axis=1) ) for tech in value: - names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] + names = n.links.index[ + n.links.index.to_series().str[-len(tech) - 5 : -5] == tech + ] if names.empty: continue - load += ( - n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum() - ) + load += n.links_t.p0[names].T.groupby(n.links.loc[names, "bus0"]).sum().T # Add H2 Store when charging # if carrier == "H2": @@ -528,9 +519,12 @@ def calculate_weighted_prices(n, label, weighted_prices): # stores[stores > 0.] = 0. # load += -stores - weighted_prices.loc[carrier, label] = ( - load * n.buses_t.marginal_price[buses] - ).sum().sum() / load.sum().sum() + if total_load := load.sum().sum(): + weighted_prices.loc[carrier, label] = ( + load * n.buses_t.marginal_price[buses] + ).sum().sum() / total_load + else: + weighted_prices.loc[carrier, label] = np.nan if carrier[:5] == "space": print(load * n.buses_t.marginal_price[buses]) @@ -558,14 +552,17 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.generators_t.p[gens] - .groupby(n.generators.loc[gens, "bus"], axis=1) + .T.groupby(n.generators.loc[gens, "bus"]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan ## Now do market value of links ## @@ -581,14 +578,17 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.links_t["p" + i][links] - .groupby(n.links.loc[links, "bus" + i], axis=1) + .T.groupby(n.links.loc[links, "bus" + i]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan return market_values @@ -647,7 +647,7 @@ def calculate_co2_emissions(n, label, df): emitted = n.generators_t.p[gens.index].mul(em_pu) emitted_grouped = ( - emitted.groupby(level=0).sum().groupby(n.generators.carrier, axis=1).sum().T + emitted.groupby(level=0).sum().T.groupby(n.generators.carrier).sum() ) df = df.reindex(emitted_grouped.index.union(df.index)) @@ -723,6 +723,7 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake("make_summary_perfect") + set_scenario_config(snakemake) run = snakemake.config["run"]["name"] if run != "": diff --git a/scripts/plot_gas_network.py b/scripts/plot_gas_network.py new file mode 100644 index 00000000..26186d51 --- /dev/null +++ b/scripts/plot_gas_network.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised gas network, storage and selected other +infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging, set_scenario_config +from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def plot_ch4_map(n): + # if "gas pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + bus_size_factor = 8e7 + linewidth_factor = 1e4 + # MW below which not drawn + line_lower_threshold = 1e3 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + fossil_gas_i = n.generators[n.generators.carrier == "gas"].index + fossil_gas = ( + n.generators_t.p.loc[:, fossil_gas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.generators.loc[fossil_gas_i, "bus"]) + .sum() + / bus_size_factor + ) + fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) + fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) + # make a fake MultiIndex so that area is correct for legend + fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) + + methanation_i = n.links.query("carrier == 'Sabatier'").index + methanation = ( + abs( + n.links_t.p1.loc[:, methanation_i].mul( + n.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .groupby(n.links.loc[methanation_i, "bus1"]) + .sum() + / bus_size_factor + ) + methanation = ( + methanation.groupby(methanation.index) + .sum() + .rename(index=lambda x: x.replace(" gas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) + + biogas_i = n.stores[n.stores.carrier == "biogas"].index + biogas = ( + n.stores_t.p.loc[:, biogas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.stores.loc[biogas_i, "bus"]) + .sum() + / bus_size_factor + ) + biogas = ( + biogas.groupby(biogas.index) + .sum() + .rename(index=lambda x: x.replace(" biogas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) + + bus_sizes = pd.concat([fossil_gas, methanation, biogas]) + bus_sizes.sort_index(inplace=True) + + to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] + n.links.drop(to_remove, inplace=True) + + link_widths_rem = n.links.p_nom_opt / linewidth_factor + link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + link_widths_orig = n.links.p_nom / linewidth_factor + link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 + + max_usage = n.links_t.p0[n.links.index].abs().max(axis=0) + link_widths_used = max_usage / linewidth_factor + link_widths_used[max_usage < line_lower_threshold] = 0.0 + + tech_colors = snakemake.params.plotting["tech_colors"] + + pipe_colors = { + "gas pipeline": "#f08080", + "gas pipeline new": "#c46868", + "gas pipeline (in 2020)": "lightgrey", + "gas pipeline (available)": "#e8d1d1", + } + + link_color_used = n.links.carrier.map(pipe_colors) + + n.links.bus0 = n.links.bus0.str.replace(" gas", "") + n.links.bus1 = n.links.bus1.str.replace(" gas", "") + + bus_colors = { + "fossil gas": tech_colors["fossil gas"], + "methanation": tech_colors["methanation"], + "biogas": "seagreen", + } + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + n.plot( + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=pipe_colors["gas pipeline (in 2020)"], + link_widths=link_widths_orig, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=pipe_colors["gas pipeline (available)"], + link_widths=link_widths_rem, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=link_color_used, + link_widths=link_widths_used, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + sizes = [100, 10] + labels = [f"{s} TWh" for s in sizes] + sizes = [s / bus_size_factor * 1e6 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.03), + labelspacing=0.8, + frameon=False, + handletextpad=1, + title="gas sources", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.25, 1.03), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title="gas pipeline", + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = list(pipe_colors.values()) + list(bus_colors.values()) + labels = list(pipe_colors.keys()) + list(bus_colors.keys()) + + # legend on the side + # legend_kw = dict( + # bbox_to_anchor=(1.47, 1.04), + # frameon=False, + # ) + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.24), + ncol=2, + frameon=False, + ) + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_gas_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_ch4_map(n) diff --git a/scripts/plot_hydrogen_network.py b/scripts/plot_hydrogen_network.py new file mode 100644 index 00000000..b4585fb2 --- /dev/null +++ b/scripts/plot_hydrogen_network.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised hydrogen network, storage and selected other +infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging, set_scenario_config +from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def group_pipes(df, drop_direction=False): + """ + Group pipes which connect same buses and return overall capacity. + """ + df = df.copy() + if drop_direction: + positive_order = df.bus0 < df.bus1 + df_p = df[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + df_n = df[~positive_order].rename(columns=swap_buses) + df = pd.concat([df_p, df_n]) + + # there are pipes for each investment period rename to AC buses name for plotting + df["index_orig"] = df.index + df.index = df.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + return df.groupby(level=0).agg( + {"p_nom_opt": "sum", "bus0": "first", "bus1": "first", "index_orig": "first"} + ) + + +def plot_h2_map(n, regions): + # if "H2 pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + h2_storage = n.stores.query("carrier == 'H2'") + regions["H2"] = ( + h2_storage.rename(index=h2_storage.bus.map(n.buses.location)) + .e_nom_opt.groupby(level=0) + .sum() + .div(1e6) + ) # TWh + regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) + + bus_size_factor = 1e5 + linewidth_factor = 7e3 + # MW below which not drawn + line_lower_threshold = 750 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + carriers = ["H2 Electrolysis", "H2 Fuel Cell"] + + elec = n.links[n.links.carrier.isin(carriers)].index + + bus_sizes = ( + n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() + / bus_size_factor + ) + + # make a fake MultiIndex so that area is correct for legend + bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) + # drop all links which are not H2 pipelines + n.links.drop( + n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True + ) + + h2_new = n.links[n.links.carrier == "H2 pipeline"] + h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] + + if snakemake.params.foresight == "myopic": + # sum capacitiy for pipelines from different investment periods + h2_new = group_pipes(h2_new) + + if not h2_retro.empty: + h2_retro = ( + group_pipes(h2_retro, drop_direction=True) + .reindex(h2_new.index) + .fillna(0) + ) + + if not h2_retro.empty: + if snakemake.params.foresight != "myopic": + positive_order = h2_retro.bus0 < h2_retro.bus1 + h2_retro_p = h2_retro[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) + h2_retro = pd.concat([h2_retro_p, h2_retro_n]) + + h2_retro["index_orig"] = h2_retro.index + h2_retro.index = h2_retro.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + + retro_w_new_i = h2_retro.index.intersection(h2_new.index) + h2_retro_w_new = h2_retro.loc[retro_w_new_i] + + retro_wo_new_i = h2_retro.index.difference(h2_new.index) + h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] + h2_retro_wo_new.index = h2_retro_wo_new.index_orig + + to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] + h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() + + else: + h2_total = h2_new.p_nom_opt + + link_widths_total = h2_total / linewidth_factor + + n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) + n.links = n.links.groupby(level=0).first() + link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) + link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + retro = n.links.p_nom_opt.where( + n.links.carrier == "H2 pipeline retrofitted", other=0.0 + ) + link_widths_retro = retro / linewidth_factor + link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + n.links.bus0 = n.links.bus0.str.replace(" H2", "") + n.links.bus1 = n.links.bus1.str.replace(" H2", "") + + regions = regions.to_crs(proj.proj4_init) + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + color_h2_pipe = "#b3f3f4" + color_retrofit = "#499a9c" + + bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} + + n.plot( + geomap=True, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=color_h2_pipe, + link_widths=link_widths_total, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + geomap=True, + bus_sizes=0, + link_colors=color_retrofit, + link_widths=link_widths_retro, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + regions.plot( + ax=ax, + column="H2", + cmap="Blues", + linewidths=0, + legend=True, + vmax=6, + vmin=0, + legend_kwds={ + "label": "Hydrogen Storage [TWh]", + "shrink": 0.7, + "extend": "max", + }, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + sizes = [s / bus_size_factor * 1e3 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1), + labelspacing=0.8, + handletextpad=0, + frameon=False, + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [30, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.23, 1), + frameon=False, + labelspacing=0.8, + handletextpad=1, + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] + labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.13), + ncol=2, + frameon=False, + ) + + add_legend_patches(ax, colors, labels, legend_kw=legend_kw) + + ax.set_facecolor("white") + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_hydrogen_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_h2_map(n, regions) diff --git a/scripts/plot_network.py b/scripts/plot_network.py deleted file mode 100644 index f44bb6de..00000000 --- a/scripts/plot_network.py +++ /dev/null @@ -1,1106 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: MIT -""" -Creates plots for optimised network topologies, including electricity, gas and -hydrogen networks, and regional generation, storage and conversion capacities -built. - -This rule plots a map of the network with technology capacities at the -nodes. -""" - -import logging - -logger = logging.getLogger(__name__) - -import cartopy.crs as ccrs -import geopandas as gpd -import matplotlib.pyplot as plt -import pandas as pd -import pypsa -from make_summary import assign_carriers -from plot_summary import preferred_order, rename_techs -from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches - -plt.style.use(["ggplot"]) - - -def rename_techs_tyndp(tech): - tech = rename_techs(tech) - if "heat pump" in tech or "resistive heater" in tech: - return "power-to-heat" - elif tech in ["H2 Electrolysis", "methanation", "helmeth", "H2 liquefaction"]: - return "power-to-gas" - elif tech == "H2": - return "H2 storage" - elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: - return "ammonia" - elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: - return "gas-to-power/heat" - # elif "solar" in tech: - # return "solar" - elif tech in ["Fischer-Tropsch", "methanolisation"]: - return "power-to-liquid" - elif "offshore wind" in tech: - return "offshore wind" - elif "CC" in tech or "sequestration" in tech: - return "CCS" - else: - return tech - - -def assign_location(n): - for c in n.iterate_components(n.one_port_components | n.branch_components): - ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) - for i in ifind.value_counts().index: - # these have already been assigned defaults - if i == -1: - continue - names = ifind.index[ifind == i] - c.df.loc[names, "location"] = names.str[:i] - - -def plot_map( - network, - components=["links", "stores", "storage_units", "generators"], - bus_size_factor=1.7e10, - transmission=False, - with_legend=True, -): - tech_colors = snakemake.params.plotting["tech_colors"] - - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - costs = pd.DataFrame(index=n.buses.index) - - for comp in components: - df_c = getattr(n, comp) - - if df_c.empty: - continue - - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" - - costs_c = ( - (df_c.capital_cost * df_c[attr]) - .groupby([df_c.location, df_c.nice_group]) - .sum() - .unstack() - .fillna(0.0) - ) - costs = pd.concat([costs, costs_c], axis=1) - - logger.debug(f"{comp}, {costs}") - - costs = costs.groupby(costs.columns, axis=1).sum() - - costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) - - new_columns = preferred_order.intersection(costs.columns).append( - costs.columns.difference(preferred_order) - ) - costs = costs[new_columns] - - for item in new_columns: - if item not in tech_colors: - logger.warning(f"{item} not in config/plotting/tech_colors") - - costs = costs.stack() # .sort_index() - - # hack because impossible to drop buses... - eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - logger.info(f"Dropping non-buses {to_drop.tolist()}") - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - threshold = 100e6 # 100 mEUR/a - carriers = costs.groupby(level=1).sum() - carriers = carriers.where(carriers > threshold).dropna() - carriers = list(carriers.index) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 4e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - title = "added grid" - - if snakemake.wildcards["ll"] == "v1.0": - # should be zero - line_widths = n.lines.s_nom_opt - n.lines.s_nom - link_widths = n.links.p_nom_opt - n.links.p_nom - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "current grid" - else: - line_widths = n.lines.s_nom_opt - n.lines.s_nom_min - link_widths = n.links.p_nom_opt - n.links.p_nom_min - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - title = "total grid" - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - fig, ax = plt.subplots(subplot_kw={"projection": ccrs.EqualEarth()}) - fig.set_size_inches(7, 6) - - n.plot( - bus_sizes=costs / bus_size_factor, - bus_colors=tech_colors, - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - if with_legend: - colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] - labels = carriers + ["HVAC line", "HVDC link"] - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig(snakemake.output.map, transparent=True, bbox_inches="tight") - - -def group_pipes(df, drop_direction=False): - """ - Group pipes which connect same buses and return overall capacity. - """ - if drop_direction: - positive_order = df.bus0 < df.bus1 - df_p = df[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - df_n = df[~positive_order].rename(columns=swap_buses) - df = pd.concat([df_p, df_n]) - - # there are pipes for each investment period rename to AC buses name for plotting - df.index = df.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"}) - - -def plot_h2_map(network, regions): - n = network.copy() - if "H2 pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - h2_storage = n.stores.query("carrier == 'H2'") - regions["H2"] = h2_storage.rename( - index=h2_storage.bus.map(n.buses.location) - ).e_nom_opt.div( - 1e6 - ) # TWh - regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) - - bus_size_factor = 1e5 - linewidth_factor = 7e3 - # MW below which not drawn - line_lower_threshold = 750 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - carriers = ["H2 Electrolysis", "H2 Fuel Cell"] - - elec = n.links[n.links.carrier.isin(carriers)].index - - bus_sizes = ( - n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() - / bus_size_factor - ) - - # make a fake MultiIndex so that area is correct for legend - bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) - # drop all links which are not H2 pipelines - n.links.drop( - n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True - ) - - h2_new = n.links[n.links.carrier == "H2 pipeline"] - h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] - - if snakemake.params.foresight == "myopic": - # sum capacitiy for pipelines from different investment periods - h2_new = group_pipes(h2_new) - - if not h2_retro.empty: - h2_retro = ( - group_pipes(h2_retro, drop_direction=True) - .reindex(h2_new.index) - .fillna(0) - ) - - if not h2_retro.empty: - positive_order = h2_retro.bus0 < h2_retro.bus1 - h2_retro_p = h2_retro[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) - h2_retro = pd.concat([h2_retro_p, h2_retro_n]) - - h2_retro["index_orig"] = h2_retro.index - h2_retro.index = h2_retro.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - - retro_w_new_i = h2_retro.index.intersection(h2_new.index) - h2_retro_w_new = h2_retro.loc[retro_w_new_i] - - retro_wo_new_i = h2_retro.index.difference(h2_new.index) - h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] - h2_retro_wo_new.index = h2_retro_wo_new.index_orig - - to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] - h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() - - else: - h2_total = h2_new.p_nom_opt - - link_widths_total = h2_total / linewidth_factor - - n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) - n.links = n.links.groupby(level=0).first() - link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) - link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - retro = n.links.p_nom_opt.where( - n.links.carrier == "H2 pipeline retrofitted", other=0.0 - ) - link_widths_retro = retro / linewidth_factor - link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - n.links.bus0 = n.links.bus0.str.replace(" H2", "") - n.links.bus1 = n.links.bus1.str.replace(" H2", "") - - proj = ccrs.EqualEarth() - regions = regions.to_crs(proj.proj4_init) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - color_h2_pipe = "#b3f3f4" - color_retrofit = "#499a9c" - - bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} - - n.plot( - geomap=True, - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=color_h2_pipe, - link_widths=link_widths_total, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - geomap=True, - bus_sizes=0, - link_colors=color_retrofit, - link_widths=link_widths_retro, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - regions.plot( - ax=ax, - column="H2", - cmap="Blues", - linewidths=0, - legend=True, - vmax=6, - vmin=0, - legend_kwds={ - "label": "Hydrogen Storage [TWh]", - "shrink": 0.7, - "extend": "max", - }, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - sizes = [s / bus_size_factor * 1e3 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1), - labelspacing=0.8, - handletextpad=0, - frameon=False, - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [30, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.23, 1), - frameon=False, - labelspacing=0.8, - handletextpad=1, - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] - labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.13), - ncol=2, - frameon=False, - ) - - add_legend_patches(ax, colors, labels, legend_kw=legend_kw) - - ax.set_facecolor("white") - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-h2_network"), bbox_inches="tight" - ) - - -def plot_ch4_map(network): - n = network.copy() - - if "gas pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - bus_size_factor = 8e7 - linewidth_factor = 1e4 - # MW below which not drawn - line_lower_threshold = 1e3 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fossil_gas_i = n.generators[n.generators.carrier == "gas"].index - fossil_gas = ( - n.generators_t.p.loc[:, fossil_gas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.generators.loc[fossil_gas_i, "bus"]) - .sum() - / bus_size_factor - ) - fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) - fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) - # make a fake MultiIndex so that area is correct for legend - fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) - - methanation_i = n.links[n.links.carrier.isin(["helmeth", "Sabatier"])].index - methanation = ( - abs( - n.links_t.p1.loc[:, methanation_i].mul( - n.snapshot_weightings.generators, axis=0 - ) - ) - .sum() - .groupby(n.links.loc[methanation_i, "bus1"]) - .sum() - / bus_size_factor - ) - methanation = ( - methanation.groupby(methanation.index) - .sum() - .rename(index=lambda x: x.replace(" gas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) - - biogas_i = n.stores[n.stores.carrier == "biogas"].index - biogas = ( - n.stores_t.p.loc[:, biogas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.stores.loc[biogas_i, "bus"]) - .sum() - / bus_size_factor - ) - biogas = ( - biogas.groupby(biogas.index) - .sum() - .rename(index=lambda x: x.replace(" biogas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) - - bus_sizes = pd.concat([fossil_gas, methanation, biogas]) - bus_sizes.sort_index(inplace=True) - - to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] - n.links.drop(to_remove, inplace=True) - - link_widths_rem = n.links.p_nom_opt / linewidth_factor - link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - link_widths_orig = n.links.p_nom / linewidth_factor - link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 - - max_usage = n.links_t.p0.abs().max(axis=0) - link_widths_used = max_usage / linewidth_factor - link_widths_used[max_usage < line_lower_threshold] = 0.0 - - tech_colors = snakemake.params.plotting["tech_colors"] - - pipe_colors = { - "gas pipeline": "#f08080", - "gas pipeline new": "#c46868", - "gas pipeline (in 2020)": "lightgrey", - "gas pipeline (available)": "#e8d1d1", - } - - link_color_used = n.links.carrier.map(pipe_colors) - - n.links.bus0 = n.links.bus0.str.replace(" gas", "") - n.links.bus1 = n.links.bus1.str.replace(" gas", "") - - bus_colors = { - "fossil gas": tech_colors["fossil gas"], - "methanation": tech_colors["methanation"], - "biogas": "seagreen", - } - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()}) - - n.plot( - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=pipe_colors["gas pipeline (in 2020)"], - link_widths=link_widths_orig, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=pipe_colors["gas pipeline (available)"], - link_widths=link_widths_rem, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=link_color_used, - link_widths=link_widths_used, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - sizes = [100, 10] - labels = [f"{s} TWh" for s in sizes] - sizes = [s / bus_size_factor * 1e6 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.03), - labelspacing=0.8, - frameon=False, - handletextpad=1, - title="gas sources", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.25, 1.03), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title="gas pipeline", - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = list(pipe_colors.values()) + list(bus_colors.values()) - labels = list(pipe_colors.keys()) + list(bus_colors.keys()) - - # legend on the side - # legend_kw = dict( - # bbox_to_anchor=(1.47, 1.04), - # frameon=False, - # ) - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.24), - ncol=2, - frameon=False, - ) - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-ch4_network"), bbox_inches="tight" - ) - - -def plot_map_without(network): - n = network.copy() - assign_location(n) - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()}) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 200.0 - line_upper_threshold = 1e4 - linewidth_factor = 3e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - # hack because impossible to drop buses... - if "EU gas" in n.buses.index: - eu_location = snakemake.params.plotting.get( - "eu_node_location", dict(x=-5.5, y=46) - ) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")] - n.links.drop(to_drop, inplace=True) - - if snakemake.wildcards["ll"] == "v1.0": - line_widths = n.lines.s_nom - link_widths = n.links.p_nom - else: - line_widths = n.lines.s_nom_min - link_widths = n.links.p_nom_min - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - n.plot( - bus_colors="k", - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - handles = [] - labels = [] - - for s in (10, 5): - handles.append( - plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor) - ) - labels.append(f"{s} GW") - l1_1 = ax.legend( - handles, - labels, - loc="upper left", - bbox_to_anchor=(0.05, 1.01), - frameon=False, - labelspacing=0.8, - handletextpad=1.5, - title="Today's transmission", - ) - ax.add_artist(l1_1) - - fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight") - - -def plot_series(network, carrier="AC", name="test"): - n = network.copy() - assign_location(n) - assign_carriers(n) - - buses = n.buses.index[n.buses.carrier.str.contains(carrier)] - - supply = pd.DataFrame(index=n.snapshots) - for c in n.iterate_components(n.branch_components): - n_port = 4 if c.name == "Link" else 2 - for i in range(n_port): - supply = pd.concat( - ( - supply, - ( - -1 - * c.pnl[f"p{str(i)}"] - .loc[:, c.df.index[c.df[f"bus{str(i)}"].isin(buses)]] - .groupby(c.df.carrier, axis=1) - .sum() - ), - ), - axis=1, - ) - - for c in n.iterate_components(n.one_port_components): - comps = c.df.index[c.df.bus.isin(buses)] - supply = pd.concat( - ( - supply, - ((c.pnl["p"].loc[:, comps]).multiply(c.df.loc[comps, "sign"])) - .groupby(c.df.carrier, axis=1) - .sum(), - ), - axis=1, - ) - - supply = supply.groupby(rename_techs_tyndp, axis=1).sum() - - both = supply.columns[(supply < 0.0).any() & (supply > 0.0).any()] - - positive_supply = supply[both] - negative_supply = supply[both] - - positive_supply[positive_supply < 0.0] = 0.0 - negative_supply[negative_supply > 0.0] = 0.0 - - supply[both] = positive_supply - - suffix = " charging" - - negative_supply.columns = negative_supply.columns + suffix - - supply = pd.concat((supply, negative_supply), axis=1) - - # 14-21.2 for flaute - # 19-26.1 for flaute - - start = "2013-02-19" - stop = "2013-02-26" - - threshold = 10e3 - - to_drop = supply.columns[(abs(supply) < threshold).all()] - - if len(to_drop) != 0: - logger.info(f"Dropping {to_drop.tolist()} from supply") - supply.drop(columns=to_drop, inplace=True) - - supply.index.name = None - - supply = supply / 1e3 - - supply.rename( - columns={"electricity": "electric demand", "heat": "heat demand"}, inplace=True - ) - supply.columns = supply.columns.str.replace("residential ", "") - supply.columns = supply.columns.str.replace("services ", "") - supply.columns = supply.columns.str.replace("urban decentral ", "decentral ") - - preferred_order = pd.Index( - [ - "electric demand", - "transmission lines", - "hydroelectricity", - "hydro reservoir", - "run of river", - "pumped hydro storage", - "CHP", - "onshore wind", - "offshore wind", - "solar PV", - "solar thermal", - "building retrofitting", - "ground heat pump", - "air heat pump", - "resistive heater", - "OCGT", - "gas boiler", - "gas", - "natural gas", - "methanation", - "hydrogen storage", - "battery storage", - "hot water storage", - ] - ) - - new_columns = preferred_order.intersection(supply.columns).append( - supply.columns.difference(preferred_order) - ) - - supply = supply.groupby(supply.columns, axis=1).sum() - fig, ax = plt.subplots() - fig.set_size_inches((8, 5)) - - ( - supply.loc[start:stop, new_columns].plot( - ax=ax, - kind="area", - stacked=True, - linewidth=0.0, - color=[ - snakemake.params.plotting["tech_colors"][i.replace(suffix, "")] - for i in new_columns - ], - ) - ) - - handles, labels = ax.get_legend_handles_labels() - - handles.reverse() - labels.reverse() - - new_handles = [] - new_labels = [] - - for i, item in enumerate(labels): - if "charging" not in item: - new_handles.append(handles[i]) - new_labels.append(labels[i]) - - ax.legend(new_handles, new_labels, ncol=3, loc="upper left", frameon=False) - ax.set_xlim([start, stop]) - ax.set_ylim([-1300, 1900]) - ax.grid(True) - ax.set_ylabel("Power [GW]") - fig.tight_layout() - - fig.savefig( - "{}/{RDIR}maps/series-{}-{}-{}-{}-{}.pdf".format( - "results", - snakemake.params.RDIR, - snakemake.wildcards["ll"], - carrier, - start, - stop, - name, - ), - transparent=True, - ) - - -def plot_map_perfect( - network, - components=["Link", "Store", "StorageUnit", "Generator"], - bus_size_factor=1.7e10, -): - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - # investment periods - investments = n.snapshots.levels[0] - - costs = {} - for comp in components: - df_c = n.df(comp) - if df_c.empty: - continue - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" - - active = pd.concat( - [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], - axis=1, - ).astype(int) - capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost - capital_cost_t = ( - (active.mul(capital_cost, axis=0)) - .groupby([n.df(comp).location, n.df(comp).nice_group]) - .sum() - ) - - capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") - - costs[comp] = capital_cost_t - - costs = pd.concat(costs).groupby(level=[1, 2]).sum() - costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) - - new_columns = preferred_order.intersection(costs.index.levels[1]).append( - costs.index.levels[1].difference(preferred_order) - ) - costs = costs.reindex(new_columns, level=1) - - for item in new_columns: - if item not in snakemake.config["plotting"]["tech_colors"]: - print( - "Warning!", - item, - "not in config/plotting/tech_colors, assign random color", - ) - snakemake.config["plotting"]["tech_colors"] = "pink" - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - print("dropping non-buses", to_drop) - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 2e3 - ac_color = "gray" - dc_color = "m" - - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "Today's transmission" - - line_widths[line_widths < line_lower_threshold] = 0.0 - link_widths[link_widths < line_lower_threshold] = 0.0 - - line_widths[line_widths > line_upper_threshold] = line_upper_threshold - link_widths[link_widths > line_upper_threshold] = line_upper_threshold - - for year in costs.columns: - fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) - fig.set_size_inches(7, 6) - fig.suptitle(year) - - n.plot( - bus_sizes=costs[year] / bus_size_factor, - bus_colors=snakemake.config["plotting"]["tech_colors"], - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - fig.savefig( - snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight" - ) - - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - snakemake = mock_snakemake( - "plot_network", - simpl="", - opts="", - clusters="37", - ll="v1.0", - sector_opts="4380H-T-H-B-I-A-solar+p3-dist1", - ) - - logging.basicConfig(level=snakemake.config["logging"]["level"]) - - n = pypsa.Network(snakemake.input.network) - - regions = gpd.read_file(snakemake.input.regions).set_index("name") - - map_opts = snakemake.params.plotting["map"] - - if map_opts["boundaries"] is None: - map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] - - if snakemake.params["foresight"] == "perfect": - plot_map_perfect( - n, - components=["Link", "Store", "StorageUnit", "Generator"], - bus_size_factor=2e10, - ) - else: - plot_map( - n, - components=["generators", "links", "stores", "storage_units"], - bus_size_factor=2e10, - transmission=False, - ) - - plot_h2_map(n, regions) - plot_ch4_map(n) - plot_map_without(n) - - # plot_series(n, carrier="AC", name=suffix) - # plot_series(n, carrier="heat", name=suffix) diff --git a/scripts/plot_power_network.py b/scripts/plot_power_network.py new file mode 100644 index 00000000..6db53bcc --- /dev/null +++ b/scripts/plot_power_network.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built. +""" + +import logging + +import cartopy.crs as ccrs +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging, set_scenario_config +from plot_summary import preferred_order, rename_techs +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def rename_techs_tyndp(tech): + tech = rename_techs(tech) + if "heat pump" in tech or "resistive heater" in tech: + return "power-to-heat" + elif tech in ["H2 Electrolysis", "methanation", "H2 liquefaction"]: + return "power-to-gas" + elif tech == "H2": + return "H2 storage" + elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: + return "ammonia" + elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: + return "gas-to-power/heat" + # elif "solar" in tech: + # return "solar" + elif tech in ["Fischer-Tropsch", "methanolisation"]: + return "power-to-liquid" + elif "offshore wind" in tech: + return "offshore wind" + elif "CC" in tech or "sequestration" in tech: + return "CCS" + else: + return tech + + +def assign_location(n): + for c in n.iterate_components(n.one_port_components | n.branch_components): + ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) + for i in ifind.value_counts().index: + # these have already been assigned defaults + if i == -1: + continue + names = ifind.index[ifind == i] + c.df.loc[names, "location"] = names.str[:i] + + +def load_projection(plotting_params): + proj_kwargs = plotting_params.get("projection", dict(name="EqualEarth")) + proj_func = getattr(ccrs, proj_kwargs.pop("name")) + return proj_func(**proj_kwargs) + + +def plot_map( + n, + components=["links", "stores", "storage_units", "generators"], + bus_size_factor=2e10, + transmission=False, + with_legend=True, +): + tech_colors = snakemake.params.plotting["tech_colors"] + + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + costs = pd.DataFrame(index=n.buses.index) + + for comp in components: + df_c = getattr(n, comp) + + if df_c.empty: + continue + + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" + + costs_c = ( + (df_c.capital_cost * df_c[attr]) + .groupby([df_c.location, df_c.nice_group]) + .sum() + .unstack() + .fillna(0.0) + ) + costs = pd.concat([costs, costs_c], axis=1) + + logger.debug(f"{comp}, {costs}") + + costs = costs.T.groupby(costs.columns).sum().T + + costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) + + new_columns = preferred_order.intersection(costs.columns).append( + costs.columns.difference(preferred_order) + ) + costs = costs[new_columns] + + for item in new_columns: + if item not in tech_colors: + logger.warning(f"{item} not in config/plotting/tech_colors") + + costs = costs.stack() # .sort_index() + + # hack because impossible to drop buses... + eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) + n.buses.loc["EU gas", "x"] = eu_location["x"] + n.buses.loc["EU gas", "y"] = eu_location["y"] + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + logger.info(f"Dropping non-buses {to_drop.tolist()}") + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + threshold = 100e6 # 100 mEUR/a + carriers = costs.groupby(level=1).sum() + carriers = carriers.where(carriers > threshold).dropna() + carriers = list(carriers.index) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 4e3 + ac_color = "rosybrown" + dc_color = "darkseagreen" + + title = "added grid" + + if snakemake.wildcards["ll"] == "v1.0": + # should be zero + line_widths = n.lines.s_nom_opt - n.lines.s_nom + link_widths = n.links.p_nom_opt - n.links.p_nom + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "current grid" + else: + line_widths = n.lines.s_nom_opt - n.lines.s_nom_min + link_widths = n.links.p_nom_opt - n.links.p_nom_min + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + title = "total grid" + + line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) + link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) + + line_widths = line_widths.replace(line_lower_threshold, 0) + link_widths = link_widths.replace(line_lower_threshold, 0) + + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + + n.plot( + bus_sizes=costs / bus_size_factor, + bus_colors=tech_colors, + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + if with_legend: + colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] + labels = carriers + ["HVAC line", "HVDC link"] + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map(n) diff --git a/scripts/plot_power_network_clustered.py b/scripts/plot_power_network_clustered.py new file mode 100644 index 00000000..0c3dc635 --- /dev/null +++ b/scripts/plot_power_network_clustered.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023-2024 PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Plot clustered electricity transmission network. +""" + +import geopandas as gpd +import matplotlib.pyplot as plt +import pypsa +from _helpers import set_scenario_config +from matplotlib.lines import Line2D +from plot_power_network import load_projection +from pypsa.plot import add_legend_lines + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_clustered", + clusters=128, + configfiles=["../../config/config.test.yaml"], + ) + set_scenario_config(snakemake) + + lw_factor = 2e3 + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name") + + proj = load_projection(snakemake.params.plotting) + + fig, ax = plt.subplots(figsize=(8, 8), subplot_kw={"projection": proj}) + regions.to_crs(proj.proj4_init).plot( + ax=ax, facecolor="none", edgecolor="lightgray", linewidth=0.75 + ) + n.plot( + ax=ax, + margin=0.06, + line_widths=n.lines.s_nom / lw_factor, + link_colors=n.links.p_nom.apply( + lambda x: "darkseagreen" if x > 0 else "skyblue" + ), + link_widths=2.0, + ) + + sizes = [10, 20] + labels = [f"HVAC ({s} GW)" for s in sizes] + scale = 1e3 / lw_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc=[0.25, 0.9], + frameon=False, + labelspacing=0.5, + handletextpad=1, + fontsize=13, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="rosybrown"), legend_kw=legend_kw + ) + + handles = [ + Line2D([0], [0], color="darkseagreen", lw=2), + Line2D([0], [0], color="skyblue", lw=2), + ] + plt.legend( + handles, + ["HVDC existing", "HVDC planned"], + frameon=False, + loc=[0.0, 0.9], + fontsize=13, + ) + + plt.savefig(snakemake.output.map, bbox_inches="tight") diff --git a/scripts/plot_power_network_perfect.py b/scripts/plot_power_network_perfect.py new file mode 100644 index 00000000..f7506a00 --- /dev/null +++ b/scripts/plot_power_network_perfect.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built for the perfect foresight scenario. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging, set_scenario_config +from plot_power_network import assign_location, load_projection, rename_techs_tyndp +from plot_summary import preferred_order +from pypsa.plot import add_legend_circles, add_legend_lines + +logger = logging.getLogger(__name__) + + +def plot_map_perfect( + n, + components=["Link", "Store", "StorageUnit", "Generator"], + bus_size_factor=2e10, +): + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + # investment periods + investments = n.snapshots.levels[0] + + costs = {} + for comp in components: + df_c = n.df(comp) + if df_c.empty: + continue + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" + + active = pd.concat( + [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], + axis=1, + ).astype(int) + capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost + capital_cost_t = ( + (active.mul(capital_cost, axis=0)) + .groupby([n.df(comp).location, n.df(comp).nice_group]) + .sum() + ) + + capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") + + costs[comp] = capital_cost_t + + costs = pd.concat(costs).groupby(level=[1, 2]).sum() + costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) + + new_columns = preferred_order.intersection(costs.index.levels[1]).append( + costs.index.levels[1].difference(preferred_order) + ) + costs = costs.reindex(new_columns, level=1) + + for item in new_columns: + if item not in snakemake.config["plotting"]["tech_colors"]: + print( + "Warning!", + item, + "not in config/plotting/tech_colors, assign random color", + ) + snakemake.config["plotting"]["tech_colors"] = "pink" + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + print("dropping non-buses", to_drop) + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 2e3 + ac_color = "gray" + dc_color = "m" + + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "Today's transmission" + + line_widths[line_widths < line_lower_threshold] = 0.0 + link_widths[link_widths < line_lower_threshold] = 0.0 + + line_widths[line_widths > line_upper_threshold] = line_upper_threshold + link_widths[link_widths > line_upper_threshold] = line_upper_threshold + + for year in costs.columns: + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + fig.suptitle(year) + + n.plot( + bus_sizes=costs[year] / bus_size_factor, + bus_colors=snakemake.config["plotting"]["tech_colors"], + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + fig.savefig(snakemake.output[f"map_{year}"], bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_perfect", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + set_scenario_config(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map_perfect(n) diff --git a/scripts/plot_statistics.py b/scripts/plot_statistics.py index b2728931..738fa618 100644 --- a/scripts/plot_statistics.py +++ b/scripts/plot_statistics.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT import matplotlib.pyplot as plt import pypsa import seaborn as sns -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config sns.set_theme("paper", style="whitegrid") @@ -24,6 +24,7 @@ if __name__ == "__main__": ll="v1.0", ) configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.network) @@ -58,7 +59,7 @@ if __name__ == "__main__": fig, ax = plt.subplots() ds = n.statistics.installed_capacity().dropna() ds = ds.drop("Line") - ds = ds.drop(("Generator", "Load")) + ds = ds.drop(("Generator", "Load"), errors="ignore") ds = ds / 1e3 ds.attrs["unit"] = "GW" plot_static_per_carrier(ds, ax) @@ -67,7 +68,7 @@ if __name__ == "__main__": fig, ax = plt.subplots() ds = n.statistics.optimal_capacity() ds = ds.drop("Line") - ds = ds.drop(("Generator", "Load")) + ds = ds.drop(("Generator", "Load"), errors="ignore") ds = ds / 1e3 ds.attrs["unit"] = "GW" plot_static_per_carrier(ds, ax) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 5804e785..bfe9995f 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -8,17 +8,15 @@ Creates plots from summary CSV files. import logging -logger = logging.getLogger(__name__) - import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt -import numpy as np import pandas as pd - -plt.style.use("ggplot") - +from _helpers import configure_logging, set_scenario_config from prepare_sector_network import co2_emissions_year +logger = logging.getLogger(__name__) +plt.style.use("ggplot") + # consolidate and rename def rename_techs(label): @@ -121,7 +119,6 @@ preferred_order = pd.Index( "gas boiler", "gas", "natural gas", - "helmeth", "methanation", "ammonia", "hydrogen storage", @@ -155,7 +152,7 @@ def plot_costs(): df = df.drop(to_drop) - logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year") + logger.info(f"Total system cost of {round(df.sum().iloc[0])} EUR billion per year") new_index = preferred_order.intersection(df.index).append( df.index.difference(preferred_order) @@ -215,7 +212,7 @@ def plot_energy(): df = df.drop(to_drop) - logger.info(f"Total energy of {round(df.sum()[0])} TWh/a") + logger.info(f"Total energy of {round(df.sum().iloc[0])} TWh/a") if df.empty: fig, ax = plt.subplots(figsize=(12, 8)) @@ -285,9 +282,14 @@ def plot_balances(): # remove trailing link ports df.index = [ - i[:-1] - if ((i not in ["co2", "NH3", "H2"]) and (i[-1:] in ["0", "1", "2", "3"])) - else i + ( + i[:-1] + if ( + (i not in ["co2", "NH3", "H2"]) + and (i[-1:] in ["0", "1", "2", "3", "4"]) + ) + else i + ) for i in df.index ] @@ -305,7 +307,9 @@ def plot_balances(): df = df.drop(to_drop) - logger.debug(f"Total energy balance for {v} of {round(df.sum()[0],2)} {units}") + logger.debug( + f"Total energy balance for {v} of {round(df.sum().iloc[0],2)} {units}" + ) if df.empty: continue @@ -424,13 +428,13 @@ def historical_emissions(countries): ) emissions = co2_totals.loc["electricity"] - if "T" in opts: + if options["transport"]: emissions += co2_totals.loc[[i + " non-elec" for i in ["rail", "road"]]].sum() - if "H" in opts: + if options["heating"]: emissions += co2_totals.loc[ [i + " non-elec" for i in ["residential", "services"]] ].sum() - if "I" in opts: + if options["industry"]: emissions += co2_totals.loc[ [ "industrial non-elec", @@ -444,7 +448,7 @@ def historical_emissions(countries): return emissions -def plot_carbon_budget_distribution(input_eurostat): +def plot_carbon_budget_distribution(input_eurostat, options): """ Plot historical carbon emissions in the EU and decarbonization path. """ @@ -458,7 +462,6 @@ def plot_carbon_budget_distribution(input_eurostat): plt.rcParams["ytick.labelsize"] = 20 emissions_scope = snakemake.params.emissions_scope - report_year = snakemake.params.eurostat_report_year input_co2 = snakemake.input.co2 # historic emissions @@ -466,9 +469,8 @@ def plot_carbon_budget_distribution(input_eurostat): e_1990 = co2_emissions_year( countries, input_eurostat, - opts, + options, emissions_scope, - report_year, input_co2, year=1990, ) @@ -569,7 +571,8 @@ if __name__ == "__main__": snakemake = mock_snakemake("plot_summary") - logging.basicConfig(level=snakemake.config["logging"]["level"]) + configure_logging(snakemake) + set_scenario_config(snakemake) n_header = 4 @@ -579,7 +582,9 @@ if __name__ == "__main__": plot_balances() - for sector_opts in snakemake.params.sector_opts: - opts = sector_opts.split("-") - if any("cb" in o for o in opts) or snakemake.config["foresight"] == "perfect": - plot_carbon_budget_distribution(snakemake.input.eurostat) + co2_budget = snakemake.params["co2_budget"] + if ( + isinstance(co2_budget, str) and co2_budget.startswith("cb") + ) or snakemake.params["foresight"] == "perfect": + options = snakemake.params.sector + plot_carbon_budget_distribution(snakemake.input.eurostat, options) diff --git a/scripts/plot_validation_cross_border_flows.py b/scripts/plot_validation_cross_border_flows.py index 65f4f8c7..8de7d8a1 100644 --- a/scripts/plot_validation_cross_border_flows.py +++ b/scripts/plot_validation_cross_border_flows.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -9,7 +9,7 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa import seaborn as sns -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config sns.set_theme("paper", style="whitegrid") @@ -187,6 +187,7 @@ if __name__ == "__main__": ll="v1.0", ) configure_logging(snakemake) + set_scenario_config(snakemake) countries = snakemake.params.countries diff --git a/scripts/plot_validation_electricity_prices.py b/scripts/plot_validation_electricity_prices.py index 2a187b9f..9efd6c46 100644 --- a/scripts/plot_validation_electricity_prices.py +++ b/scripts/plot_validation_electricity_prices.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -8,8 +8,7 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa import seaborn as sns -from _helpers import configure_logging -from pypsa.statistics import get_bus_and_carrier +from _helpers import configure_logging, set_scenario_config sns.set_theme("paper", style="whitegrid") @@ -25,6 +24,7 @@ if __name__ == "__main__": ll="v1.0", ) configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.network) n.loads.carrier = "load" diff --git a/scripts/plot_validation_electricity_production.py b/scripts/plot_validation_electricity_production.py index 5c5569d0..5a68cfa5 100644 --- a/scripts/plot_validation_electricity_production.py +++ b/scripts/plot_validation_electricity_production.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa import seaborn as sns -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config from pypsa.statistics import get_bus_and_carrier sns.set_theme("paper", style="whitegrid") @@ -35,6 +35,7 @@ if __name__ == "__main__": ll="v1.0", ) configure_logging(snakemake) + set_scenario_config(snakemake) n = pypsa.Network(snakemake.input.network) n.loads.carrier = "load" @@ -45,6 +46,12 @@ if __name__ == "__main__": header=[0, 1], parse_dates=True, ) + subset_technologies = ["Geothermal", "Nuclear", "Biomass", "Lignite", "Oil", "Coal"] + lowercase_technologies = [ + technology.lower() if technology in subset_technologies else technology + for technology in historic.columns.levels[1] + ] + historic.columns = historic.columns.set_levels(lowercase_technologies, level=1) colors = n.carriers.set_index("nice_name").color.where( lambda s: s != "", "lightgrey" diff --git a/scripts/prepare_links_p_nom.py b/scripts/prepare_links_p_nom.py index 4b915d22..7c1ed211 100644 --- a/scripts/prepare_links_p_nom.py +++ b/scripts/prepare_links_p_nom.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -40,7 +40,7 @@ Description import logging import pandas as pd -from _helpers import configure_logging +from _helpers import configure_logging, set_scenario_config logger = logging.getLogger(__name__) @@ -69,6 +69,7 @@ if __name__ == "__main__": snakemake = mock_snakemake("prepare_links_p_nom", simpl="") configure_logging(snakemake) + set_scenario_config(snakemake) links_p_nom = pd.read_html( "https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol" diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index 90d6ed2e..0ef8d9aa 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -58,12 +58,15 @@ Description """ import logging -import re import numpy as np import pandas as pd import pypsa -from _helpers import configure_logging +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from add_electricity import load_costs, update_transmission_costs from pypsa.descriptors import expand_series @@ -72,6 +75,28 @@ idx = pd.IndexSlice logger = logging.getLogger(__name__) +def maybe_adjust_costs_and_potentials(n, adjustments): + if not adjustments: + return + + for attr, carrier_factor in adjustments.items(): + for carrier, factor in carrier_factor.items(): + # beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan + if carrier == "AC": # lines do not have carrier + n.lines[attr] *= factor + continue + comps = { + "p_nom_max": {"Generator", "Link", "StorageUnit"}, + "e_nom_max": {"Store"}, + "capital_cost": {"Generator", "Link", "StorageUnit", "Store"}, + "marginal_cost": {"Generator", "Link", "StorageUnit", "Store"}, + } + for c in n.iterate_components(comps[attr]): + sel = c.df.index[c.df.carrier == carrier] + c.df.loc[sel, attr] *= factor + logger.info(f"changing {attr} for {carrier} by factor {factor}") + + def add_co2limit(n, co2limit, Nyears=1.0): n.add( "GlobalConstraint", @@ -195,7 +220,7 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): logger.info(f"Aggregating time series to {segments} segments.") try: import tsam.timeseriesaggregation as tsam - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" ) @@ -266,12 +291,12 @@ def set_line_nom_max( n.lines["s_nom_max"] = n.lines["s_nom"] + s_nom_max_ext if np.isfinite(p_nom_max_ext) and p_nom_max_ext > 0: - logger.info(f"Limiting line extensions to {p_nom_max_ext} MW") + logger.info(f"Limiting link extensions to {p_nom_max_ext} MW") hvdc = n.links.index[n.links.carrier == "DC"] n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext - n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) - n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) + n.lines["s_nom_max"] = n.lines.s_nom_max.clip(upper=s_nom_max_set) + n.links["p_nom_max"] = n.links.p_nom_max.clip(upper=p_nom_max_set) if __name__ == "__main__": @@ -279,11 +304,11 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "prepare_network", simpl="", clusters="37", ll="v1.0", opts="Ept" + "prepare_network", simpl="", clusters="37", ll="v1.0", opts="Co2L-4H" ) configure_logging(snakemake) - - opts = snakemake.wildcards.opts.split("-") + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) n = pypsa.Network(snakemake.input[0]) Nyears = n.snapshot_weightings.objective.sum() / 8760.0 @@ -296,77 +321,36 @@ if __name__ == "__main__": set_line_s_max_pu(n, snakemake.params.lines["s_max_pu"]) - for o in opts: - m = re.match(r"^\d+h$", o, re.IGNORECASE) - if m is not None: - n = average_every_nhours(n, m.group(0)) - break + # temporal averaging + time_resolution = snakemake.params.time_resolution + is_string = isinstance(time_resolution, str) + if is_string and time_resolution.lower().endswith("h"): + n = average_every_nhours(n, time_resolution) - for o in opts: - m = re.match(r"^\d+seg$", o, re.IGNORECASE) - if m is not None: - solver_name = snakemake.config["solving"]["solver"]["name"] - n = apply_time_segmentation(n, m.group(0)[:-3], solver_name) - break + # segments with package tsam + if is_string and time_resolution.lower().endswith("seg"): + solver_name = snakemake.config["solving"]["solver"]["name"] + segments = int(time_resolution.replace("seg", "")) + n = apply_time_segmentation(n, segments, solver_name) - for o in opts: - if "Co2L" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) - if len(m) > 0: - co2limit = float(m[0]) * snakemake.params.co2base - add_co2limit(n, co2limit, Nyears) - logger.info("Setting CO2 limit according to wildcard value.") - else: - add_co2limit(n, snakemake.params.co2limit, Nyears) - logger.info("Setting CO2 limit according to config value.") - break + if snakemake.params.co2limit_enable: + add_co2limit(n, snakemake.params.co2limit, Nyears) - for o in opts: - if "CH4L" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) - if len(m) > 0: - limit = float(m[0]) * 1e6 - add_gaslimit(n, limit, Nyears) - logger.info("Setting gas usage limit according to wildcard value.") - else: - add_gaslimit(n, snakemake.params.gaslimit, Nyears) - logger.info("Setting gas usage limit according to config value.") - break + if snakemake.params.gaslimit_enable: + add_gaslimit(n, snakemake.params.gaslimit, Nyears) - for o in opts: - if "+" not in o: - continue - oo = o.split("+") - suptechs = map(lambda c: c.split("-", 2)[0], n.carriers.index) - if oo[0].startswith(tuple(suptechs)): - carrier = oo[0] - # handles only p_nom_max as stores and lines have no potentials - attr_lookup = {"p": "p_nom_max", "c": "capital_cost", "m": "marginal_cost"} - attr = attr_lookup[oo[1][0]] - factor = float(oo[1][1:]) - if carrier == "AC": # lines do not have carrier - n.lines[attr] *= factor - else: - comps = {"Generator", "Link", "StorageUnit", "Store"} - for c in n.iterate_components(comps): - sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel, attr] *= factor + maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"]) - for o in opts: - if "Ept" in o: - logger.info( - "Setting time dependent emission prices according spot market price" - ) - add_dynamic_emission_prices(n) - elif "Ep" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) - if len(m) > 0: - logger.info("Setting emission prices according to wildcard value.") - add_emission_prices(n, dict(co2=float(m[0]))) - else: - logger.info("Setting emission prices according to config value.") - add_emission_prices(n, snakemake.params.costs["emission_prices"]) - break + emission_prices = snakemake.params.costs["emission_prices"] + if emission_prices["co2_monthly_prices"]: + logger.info( + "Setting time dependent emission prices according spot market price" + ) + add_dynamic_emission_prices(n) + elif emission_prices["enable"]: + add_emission_prices( + n, dict(co2=snakemake.params.costs["emission_prices"]["co2"]) + ) ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:] set_transmission_limit(n, ll_type, factor, costs, Nyears) @@ -379,10 +363,9 @@ if __name__ == "__main__": p_nom_max_ext=snakemake.params.links.get("max_extension", np.inf), ) - if "ATK" in opts: - enforce_autarky(n) - elif "ATKc" in opts: - enforce_autarky(n, only_crossborder=True) + if snakemake.params.autarky["enable"]: + only_crossborder = snakemake.params.autarky["by_country"] + enforce_autarky(n, only_crossborder=only_crossborder) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/prepare_perfect_foresight.py b/scripts/prepare_perfect_foresight.py index 00f23fab..f7e8495e 100644 --- a/scripts/prepare_perfect_foresight.py +++ b/scripts/prepare_perfect_foresight.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -12,7 +12,11 @@ import re import numpy as np import pandas as pd import pypsa -from _helpers import update_config_with_sector_opts +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from add_existing_baseyear import add_build_year_to_new_assets from pypsa.descriptors import expand_series from pypsa.io import import_components_from_dataframe @@ -56,7 +60,9 @@ def get_investment_weighting(time_weighting, r=0.01): end = time_weighting.cumsum() start = time_weighting.cumsum().shift().fillna(0) return pd.concat([start, end], axis=1).apply( - lambda x: sum(get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))), + lambda x: sum( + get_social_discount(t, r) for t in range(int(x.iloc[0]), int(x.iloc[1])) + ), axis=1, ) @@ -162,15 +168,17 @@ def concat_networks(years): add_build_year_to_new_assets(network, year) # static ---------------------------------- - # (1) add buses and carriers - for component in network.iterate_components(["Bus", "Carrier"]): - df_year = component.df - # get missing assets - missing = get_missing(df_year, n, component.list_name) - import_components_from_dataframe(n, missing, component.name) - # (2) add generators, links, stores and loads for component in network.iterate_components( - ["Generator", "Link", "Store", "Load", "Line", "StorageUnit"] + [ + "Bus", + "Carrier", + "Generator", + "Link", + "Store", + "Load", + "Line", + "StorageUnit", + ] ): df_year = component.df.copy() missing = get_missing(df_year, n, component.list_name) @@ -186,7 +194,7 @@ def concat_networks(years): pnl = getattr(n, component.list_name + "_t") for k in iterkeys(component.pnl): pnl_year = component.pnl[k].copy().reindex(snapshots, level=1) - if pnl_year.empty and ~(component.name == "Load" and k == "p_set"): + if pnl_year.empty and (not (component.name == "Load" and k == "p_set")): continue if component.name == "Load": static_load = network.loads.loc[network.loads.p_set != 0] @@ -199,8 +207,13 @@ def concat_networks(years): pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year else: - # this is to avoid adding multiple times assets with - # infinite lifetime as ror + # For components that aren't new, we just extend + # time-varying data from the previous investment + # period. + if i > 0: + pnl[k].loc[(year,)] = pnl[k].loc[(years[i - 1],)].values + + # Now, add time-varying data for new components. cols = pnl_year.columns.difference(pnl[k].columns) pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1) @@ -214,7 +227,7 @@ def concat_networks(years): # set investment periods n.investment_periods = n.snapshots.levels[0] # weighting of the investment period -> assuming last period same weighting as the period before - time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill") + time_w = n.investment_periods.to_series().diff().shift(-1).ffill() n.investment_period_weightings["years"] = time_w # set objective weightings objective_w = get_investment_weighting( @@ -295,17 +308,14 @@ def set_all_phase_outs(n): n.mremove("Link", remove_i) -def set_carbon_constraints(n, opts): +def set_carbon_constraints(n): """ Add global constraints for carbon emissions. """ - budget = None - for o in opts: - # other budgets - m = re.match(r"^\d+p\d$", o, re.IGNORECASE) - if m is not None: - budget = snakemake.config["co2_budget"][m.group(0)] * 1e9 - if budget != None: + budget = snakemake.config["co2_budget"] + if budget and isinstance(budget, float): + budget *= 1e9 # convert to t CO2 + logger.info(f"add carbon budget of {budget}") n.add( "GlobalConstraint", @@ -332,7 +342,7 @@ def set_carbon_constraints(n, opts): ) # set minimum CO2 emission constraint to avoid too fast reduction - if "co2min" in opts: + if "co2min" in snakemake.wildcards.sector_opts.split("-"): emissions_1990 = 4.53693 emissions_2019 = 3.344096 target_2030 = 0.45 * emissions_1990 @@ -428,7 +438,7 @@ def apply_time_segmentation_perfect( """ try: import tsam.timeseriesaggregation as tsam - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" ) @@ -478,21 +488,6 @@ def apply_time_segmentation_perfect( return n -def set_temporal_aggregation_SEG(n, opts, solver_name): - """ - Aggregate network temporally with tsam. - """ - for o in opts: - # segments with package tsam - m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) - if m is not None: - segments = int(m[1]) - logger.info(f"Use temporal segmentation with {segments} segments") - n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name) - break - return n - - if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -503,17 +498,15 @@ if __name__ == "__main__": opts="", clusters="37", ll="v1.5", - sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1", + sector_opts="1p7-4380H-T-H-B-I-A-dist1", ) + configure_logging(snakemake) + set_scenario_config(snakemake) - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) # parameters ----------------------------------------------------------- years = snakemake.config["scenario"]["planning_horizons"] - opts = snakemake.wildcards.sector_opts.split("-") - social_discountrate = snakemake.config["costs"]["social_discountrate"] - for o in opts: - if "sdr" in o: - social_discountrate = float(o.replace("sdr", "")) / 100 + social_discountrate = snakemake.params.costs["social_discountrate"] logger.info( f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%" @@ -523,9 +516,10 @@ if __name__ == "__main__": n = concat_networks(years) # temporal aggregate - opts = snakemake.wildcards.sector_opts.split("-") solver_name = snakemake.config["solving"]["solver"]["name"] - n = set_temporal_aggregation_SEG(n, opts, solver_name) + segments = snakemake.params.time_resolution + if isinstance(segments, (int, float)): + n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name) # adjust global constraints lv limit if the same for all years n = adjust_lvlimit(n) @@ -541,8 +535,7 @@ if __name__ == "__main__": add_H2_boilers(n) # set carbon constraints - opts = snakemake.wildcards.sector_opts.split("-") - n = set_carbon_constraints(n, opts) + n = set_carbon_constraints(n) # export network n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py old mode 100644 new mode 100755 index d5ca27a7..2e8bf6fd --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -9,33 +9,30 @@ technologies for the buildings, transport and industry sectors. import logging import os -import re from itertools import product +from types import SimpleNamespace import networkx as nx import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts -from add_electricity import calculate_annuity, sanitize_carriers +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) +from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locations from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from networkx.algorithms import complement from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation +from prepare_network import maybe_adjust_costs_and_potentials from pypsa.geo import haversine_pts from pypsa.io import import_components_from_dataframe from scipy.stats import beta -logger = logging.getLogger(__name__) - -from types import SimpleNamespace - spatial = SimpleNamespace() - -from packaging.version import Version, parse - -pd_version = parse(pd.__version__) -agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {} +logger = logging.getLogger(__name__) def define_spatial(nodes, options): @@ -95,12 +92,17 @@ def define_spatial(nodes, options): spatial.gas.industry = nodes + " gas for industry" spatial.gas.industry_cc = nodes + " gas for industry CC" spatial.gas.biogas_to_gas = nodes + " biogas to gas" + spatial.gas.biogas_to_gas_cc = nodes + "biogas to gas CC" else: spatial.gas.nodes = ["EU gas"] spatial.gas.locations = ["EU"] spatial.gas.biogas = ["EU biogas"] spatial.gas.industry = ["gas for industry"] spatial.gas.biogas_to_gas = ["EU biogas to gas"] + if options.get("biomass_spatial", options["biomass_transport"]): + spatial.gas.biogas_to_gas_cc = nodes + " biogas to gas CC" + else: + spatial.gas.biogas_to_gas_cc = ["EU biogas to gas CC"] if options.get("co2_spatial", options["co2network"]): spatial.gas.industry_cc = nodes + " gas for industry CC" else: @@ -127,15 +129,43 @@ def define_spatial(nodes, options): spatial.h2.locations = nodes # methanol + + # beware: unlike other carriers, uses locations rather than locations+carriername + # this allows to avoid separation between nodes and locations + spatial.methanol = SimpleNamespace() + spatial.methanol.nodes = ["EU methanol"] spatial.methanol.locations = ["EU"] + if options["regional_methanol_demand"]: + spatial.methanol.demand_locations = nodes + spatial.methanol.shipping = nodes + " shipping methanol" + else: + spatial.methanol.demand_locations = ["EU"] + spatial.methanol.shipping = ["EU shipping methanol"] + # oil spatial.oil = SimpleNamespace() + spatial.oil.nodes = ["EU oil"] spatial.oil.locations = ["EU"] + if options["regional_oil_demand"]: + spatial.oil.demand_locations = nodes + spatial.oil.naphtha = nodes + " naphtha for industry" + spatial.oil.kerosene = nodes + " kerosene for aviation" + spatial.oil.shipping = nodes + " shipping oil" + spatial.oil.agriculture_machinery = nodes + " agriculture machinery oil" + spatial.oil.land_transport = nodes + " land transport oil" + else: + spatial.oil.demand_locations = ["EU"] + spatial.oil.naphtha = ["EU naphtha for industry"] + spatial.oil.kerosene = ["EU kerosene for aviation"] + spatial.oil.shipping = ["EU shipping oil"] + spatial.oil.agriculture_machinery = ["EU agriculture machinery oil"] + spatial.oil.land_transport = ["EU land transport oil"] + # uranium spatial.uranium = SimpleNamespace() spatial.uranium.nodes = ["EU uranium"] @@ -146,6 +176,13 @@ def define_spatial(nodes, options): spatial.coal.nodes = ["EU coal"] spatial.coal.locations = ["EU"] + if options["regional_coal_demand"]: + spatial.coal.demand_locations = nodes + spatial.coal.industry = nodes + " coal for industry" + else: + spatial.coal.demand_locations = ["EU"] + spatial.coal.industry = ["EU coal for industry"] + # lignite spatial.lignite = SimpleNamespace() spatial.lignite.nodes = ["EU lignite"] @@ -154,18 +191,16 @@ def define_spatial(nodes, options): return spatial -from types import SimpleNamespace - spatial = SimpleNamespace() -def emission_sectors_from_opts(opts): +def determine_emission_sectors(options): sectors = ["electricity"] - if "T" in opts: + if options["transport"]: sectors += ["rail non-elec", "road non-elec"] - if "H" in opts: + if options["heating"]: sectors += ["residential non-elec", "services non-elec"] - if "I" in opts: + if options["industry"]: sectors += [ "industrial non-elec", "industrial processes", @@ -174,7 +209,7 @@ def emission_sectors_from_opts(opts): "domestic navigation", "international navigation", ] - if "A" in opts: + if options["agriculture"]: sectors += ["agriculture"] return sectors @@ -184,11 +219,36 @@ def get(item, investment_year=None): """ Check whether item depends on investment year. """ - return item[investment_year] if isinstance(item, dict) else item + if not isinstance(item, dict): + return item + elif investment_year in item.keys(): + return item[investment_year] + else: + logger.warning( + f"Investment key {investment_year} not found in dictionary {item}." + ) + keys = sorted(item.keys()) + if investment_year < keys[0]: + logger.warning(f"Lower than minimum key. Taking minimum key {keys[0]}") + return item[keys[0]] + elif investment_year > keys[-1]: + logger.warning(f"Higher than maximum key. Taking maximum key {keys[0]}") + return item[keys[-1]] + else: + logger.warning( + "Interpolate linearly between the next lower and next higher year." + ) + lower_key = max(k for k in keys if k < investment_year) + higher_key = min(k for k in keys if k > investment_year) + lower = item[lower_key] + higher = item[higher_key] + return lower + (higher - lower) * (investment_year - lower_key) / ( + higher_key - lower_key + ) def co2_emissions_year( - countries, input_eurostat, opts, emissions_scope, report_year, input_co2, year + countries, input_eurostat, options, emissions_scope, input_co2, year ): """ Calculate CO2 emissions in one specific year (e.g. 1990 or 2018). @@ -198,15 +258,13 @@ def co2_emissions_year( # TODO: read Eurostat data from year > 2014 # this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK if year > 2014: - eurostat_co2 = build_eurostat_co2( - input_eurostat, countries, report_year, year=2014 - ) + eurostat_co2 = build_eurostat_co2(input_eurostat, countries, 2014) else: - eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, year) + eurostat_co2 = build_eurostat_co2(input_eurostat, countries, year) co2_totals = build_co2_totals(countries, eea_co2, eurostat_co2) - sectors = emission_sectors_from_opts(opts) + sectors = determine_emission_sectors(options) co2_emissions = co2_totals.loc[countries, sectors].sum().sum() @@ -217,11 +275,10 @@ def co2_emissions_year( # TODO: move to own rule with sector-opts wildcard? -def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): +def build_carbon_budget(o, input_eurostat, fn, emissions_scope, input_co2, options): """ Distribute carbon budget following beta or exponential transition path. """ - # opts? if "be" in o: # beta decay @@ -237,9 +294,8 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): e_1990 = co2_emissions_year( countries, input_eurostat, - opts, + options, emissions_scope, - report_year, input_co2, year=1990, ) @@ -248,14 +304,15 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): e_0 = co2_emissions_year( countries, input_eurostat, - opts, + options, emissions_scope, - report_year, input_co2, year=2018, ) planning_horizons = snakemake.params.planning_horizons + if not isinstance(planning_horizons, list): + planning_horizons = [planning_horizons] t_0 = planning_horizons[0] if "be" in o: @@ -390,8 +447,15 @@ def update_wind_solar_costs(n, costs): # code adapted from pypsa-eur/scripts/add_electricity.py for connection in ["dc", "ac"]: tech = "offwind-" + connection - profile = snakemake.input["profile_offwind_" + connection] + if tech not in n.generators.carrier.values: + continue + profile = snakemake.input["profile_offwind-" + connection] with xr.open_dataset(profile) as ds: + + # if-statement for compatibility with old profiles + if "year" in ds.indexes: + ds = ds.sel(year=ds.year.min(), drop=True) + underwater_fraction = ds["underwater_fraction"].to_pandas() connection_cost = ( snakemake.params.length_factor @@ -425,13 +489,13 @@ def update_wind_solar_costs(n, costs): logger.info( "Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format( - connection_cost[0].min(), connection_cost[0].max(), tech + connection_cost.min(), connection_cost.max(), tech ) ) - n.generators.loc[ - n.generators.carrier == tech, "capital_cost" - ] = capital_cost.rename(index=lambda node: node + " " + tech) + n.generators.loc[n.generators.carrier == tech, "capital_cost"] = ( + capital_cost.rename(index=lambda node: node + " " + tech) + ) def add_carrier_buses(n, carrier, nodes=None): @@ -452,10 +516,11 @@ def add_carrier_buses(n, carrier, nodes=None): n.add("Carrier", carrier) unit = "MWh_LHV" if carrier == "gas" else "MWh_th" + # preliminary value for non-gas carriers to avoid zeros + capital_cost = costs.at["gas storage", "fixed"] if carrier == "gas" else 0.02 n.madd("Bus", nodes, location=location, carrier=carrier, unit=unit) - # capital cost could be corrected to e.g. 0.2 EUR/kWh * annuity and O&M n.madd( "Store", nodes + " Store", @@ -463,8 +528,7 @@ def add_carrier_buses(n, carrier, nodes=None): e_nom_extendable=True, e_cyclic=True, carrier=carrier, - capital_cost=0.2 - * costs.at[carrier, "discount rate"], # preliminary value to avoid zeros + capital_cost=capital_cost, ) n.madd( @@ -517,7 +581,18 @@ def patch_electricity_network(n): n.loads_t.p_set.rename(lambda x: x.strip(), axis=1, inplace=True) -def add_co2_tracking(n, options): +def add_eu_bus(n, x=-5.5, y=46): + """ + Add EU bus to the network. + + This cosmetic bus serves as a reference point for the location of + the EU buses in the plots and summaries. + """ + n.add("Bus", "EU", location="EU", x=x, y=y, carrier="none") + n.add("Carrier", "none") + + +def add_co2_tracking(n, costs, options): # minus sign because opposite to how fossil fuels used: # CH4 burning puts CH4 down, atmosphere up n.add("Carrier", "co2", co2_emissions=-1.0) @@ -535,7 +610,7 @@ def add_co2_tracking(n, options): bus="co2 atmosphere", ) - # this tracks CO2 stored, e.g. underground + # add CO2 tanks n.madd( "Bus", spatial.co2.nodes, @@ -544,6 +619,39 @@ def add_co2_tracking(n, options): unit="t_co2", ) + n.madd( + "Store", + spatial.co2.nodes, + e_nom_extendable=True, + capital_cost=costs.at["CO2 storage tank", "fixed"], + carrier="co2 stored", + e_cyclic=True, + bus=spatial.co2.nodes, + ) + n.add("Carrier", "co2 stored") + + # this tracks CO2 sequestered, e.g. underground + sequestration_buses = pd.Index(spatial.co2.nodes).str.replace( + " stored", " sequestered" + ) + n.madd( + "Bus", + sequestration_buses, + location=spatial.co2.locations, + carrier="co2 sequestered", + unit="t_co2", + ) + + n.madd( + "Link", + sequestration_buses, + bus0=spatial.co2.nodes, + bus1=sequestration_buses, + carrier="co2 sequestered", + efficiency=1.0, + p_nom_extendable=True, + ) + if options["regional_co2_sequestration_potential"]["enable"]: upper_limit = ( options["regional_co2_sequestration_potential"]["max_size"] * 1e3 @@ -559,22 +667,22 @@ def add_co2_tracking(n, options): .mul(1e6) / annualiser ) # t - e_nom_max = e_nom_max.rename(index=lambda x: x + " co2 stored") + e_nom_max = e_nom_max.rename(index=lambda x: x + " co2 sequestered") else: e_nom_max = np.inf n.madd( "Store", - spatial.co2.nodes, + sequestration_buses, e_nom_extendable=True, e_nom_max=e_nom_max, capital_cost=options["co2_sequestration_cost"], - carrier="co2 stored", - bus=spatial.co2.nodes, + bus=sequestration_buses, lifetime=options["co2_sequestration_lifetime"], + carrier="co2 sequestered", ) - n.add("Carrier", "co2 stored") + n.add("Carrier", "co2 sequestered") if options["co2_vent"]: n.madd( @@ -603,6 +711,8 @@ def add_co2_network(n, costs): * co2_links.length ) capital_cost = cost_onshore + cost_submarine + cost_factor = snakemake.config["sector"]["co2_network_cost_factor"] + capital_cost *= cost_factor n.madd( "Link", @@ -646,38 +756,38 @@ def add_dac(n, costs): heat_buses = n.buses.index[n.buses.carrier.isin(heat_carriers)] locations = n.buses.location[heat_buses] - efficiency2 = -( + electricity_input = ( costs.at["direct air capture", "electricity-input"] + costs.at["direct air capture", "compression-electricity-input"] - ) - efficiency3 = -( + ) # MWh_el / tCO2 + heat_input = ( costs.at["direct air capture", "heat-input"] - costs.at["direct air capture", "compression-heat-output"] - ) + ) # MWh_th / tCO2 n.madd( "Link", heat_buses.str.replace(" heat", " DAC"), - bus0="co2 atmosphere", - bus1=spatial.co2.df.loc[locations, "nodes"].values, - bus2=locations.values, - bus3=heat_buses, + bus0=locations.values, + bus1=heat_buses, + bus2="co2 atmosphere", + bus3=spatial.co2.df.loc[locations, "nodes"].values, carrier="DAC", - capital_cost=costs.at["direct air capture", "fixed"], - efficiency=1.0, - efficiency2=efficiency2, - efficiency3=efficiency3, + capital_cost=costs.at["direct air capture", "fixed"] / electricity_input, + efficiency=-heat_input / electricity_input, + efficiency2=-1 / electricity_input, + efficiency3=1 / electricity_input, p_nom_extendable=True, lifetime=costs.at["direct air capture", "lifetime"], ) -def add_co2limit(n, nyears=1.0, limit=0.0): +def add_co2limit(n, options, nyears=1.0, limit=0.0): logger.info(f"Adding CO2 budget limit as per unit of 1990 levels of {limit}") countries = snakemake.params.countries - sectors = emission_sectors_from_opts(opts) + sectors = determine_emission_sectors(options) # convert Mt to tCO2 co2_totals = 1e6 * pd.read_csv(snakemake.input.co2_totals_name, index_col=0) @@ -691,6 +801,7 @@ def add_co2limit(n, nyears=1.0, limit=0.0): "CO2Limit", carrier_attribute="co2_emissions", sense="<=", + type="co2_atmosphere", constant=co2_limit, ) @@ -805,14 +916,13 @@ def add_ammonia(n, costs): bus2=nodes + " H2", p_nom_extendable=True, carrier="Haber-Bosch", - efficiency=1 - / ( - cf_industry["MWh_elec_per_tNH3_electrolysis"] - / cf_industry["MWh_NH3_per_tNH3"] - ), # output: MW_NH3 per MW_elec - efficiency2=-cf_industry["MWh_H2_per_tNH3_electrolysis"] - / cf_industry["MWh_elec_per_tNH3_electrolysis"], # input: MW_H2 per MW_elec - capital_cost=costs.at["Haber-Bosch", "fixed"], + efficiency=1 / costs.at["Haber-Bosch", "electricity-input"], + efficiency2=-costs.at["Haber-Bosch", "hydrogen-input"] + / costs.at["Haber-Bosch", "electricity-input"], + capital_cost=costs.at["Haber-Bosch", "fixed"] + / costs.at["Haber-Bosch", "electricity-input"], + marginal_cost=costs.at["Haber-Bosch", "VOM"] + / costs.at["Haber-Bosch", "electricity-input"], lifetime=costs.at["Haber-Bosch", "lifetime"], ) @@ -844,47 +954,6 @@ def add_ammonia(n, costs): ) -def add_wave(n, wave_cost_factor): - # TODO: handle in Snakefile - wave_fn = "data/WindWaveWEC_GLTB.xlsx" - - # in kW - capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600}) - - # in EUR/MW - annuity_factor = calculate_annuity(25, 0.07) + 0.03 - costs = ( - 1e6 - * wave_cost_factor - * annuity_factor - * pd.Series({"Attenuator": 2.5, "F2HB": 2, "MultiPA": 1.5}) - ) - - sheets = pd.read_excel( - wave_fn, - sheet_name=["FirthForth", "Hebrides"], - usecols=["Attenuator", "F2HB", "MultiPA"], - index_col=0, - skiprows=[0], - parse_dates=True, - ) - - wave = pd.concat( - [sheets[l].divide(capacity, axis=1) for l in locations], keys=locations, axis=1 - ) - - for wave_type in costs.index: - n.add( - "Generator", - "Hebrides " + wave_type, - bus="GB4 0", # TODO this location is hardcoded - p_nom_extendable=True, - carrier="wave", - capital_cost=costs[wave_type], - p_max_pu=wave["Hebrides", wave_type], - ) - - def insert_electricity_distribution_grid(n, costs): # TODO pop_layout? # TODO options? @@ -982,6 +1051,7 @@ def insert_electricity_distribution_grid(n, costs): "Store", nodes + " home battery", bus=nodes + " home battery", + location=nodes, e_cyclic=True, e_nom_extendable=True, carrier="home battery", @@ -1023,7 +1093,7 @@ def insert_gas_distribution_costs(n, costs): f"Inserting gas distribution grid with investment cost factor of {f_costs}" ) - capital_cost = costs.loc["electricity distribution grid"]["fixed"] * f_costs + capital_cost = costs.at["electricity distribution grid", "fixed"] * f_costs # gas boilers gas_b = n.links.index[ @@ -1100,6 +1170,7 @@ def add_storage_and_grids(n, costs): efficiency=costs.at["OCGT", "efficiency"], capital_cost=costs.at["OCGT", "fixed"] * costs.at["OCGT", "efficiency"], # NB: fixed cost is per MWel + marginal_cost=costs.at["OCGT", "VOM"], lifetime=costs.at["OCGT", "lifetime"], ) @@ -1160,7 +1231,7 @@ def add_storage_and_grids(n, costs): if options["gas_network"]: logger.info( - "Add natural gas infrastructure, incl. LNG terminals, production and entry-points." + "Add natural gas infrastructure, incl. LNG terminals, production, storage and entry-points." ) if options["H2_retrofit"]: @@ -1205,10 +1276,25 @@ def add_storage_and_grids(n, costs): remove_i = n.generators[gas_i & internal_i].index n.generators.drop(remove_i, inplace=True) - p_nom = gas_input_nodes.sum(axis=1).rename(lambda x: x + " gas") + input_types = ["lng", "pipeline", "production"] + p_nom = gas_input_nodes[input_types].sum(axis=1).rename(lambda x: x + " gas") n.generators.loc[gas_i, "p_nom_extendable"] = False n.generators.loc[gas_i, "p_nom"] = p_nom + # add existing gas storage capacity + gas_i = n.stores.carrier == "gas" + e_nom = ( + gas_input_nodes["storage"] + .rename(lambda x: x + " gas Store") + .reindex(n.stores.index) + .fillna(0.0) + * 1e3 + ) # MWh_LHV + e_nom.clip( + upper=e_nom.quantile(0.98), inplace=True + ) # limit extremely large storage + n.stores.loc[gas_i, "e_nom_min"] = e_nom + # add candidates for new gas pipelines to achieve full connectivity G = nx.Graph() @@ -1343,6 +1429,7 @@ def add_storage_and_grids(n, costs): bus2=spatial.co2.nodes, p_nom_extendable=True, carrier="Sabatier", + p_min_pu=options.get("min_part_load_methanation", 0), efficiency=costs.at["methanation", "efficiency"], efficiency2=-costs.at["methanation", "efficiency"] * costs.at["gas", "CO2 intensity"], @@ -1351,23 +1438,6 @@ def add_storage_and_grids(n, costs): lifetime=costs.at["methanation", "lifetime"], ) - if options["helmeth"]: - n.madd( - "Link", - spatial.nodes, - suffix=" helmeth", - bus0=nodes, - bus1=spatial.gas.nodes, - bus2=spatial.co2.nodes, - carrier="helmeth", - p_nom_extendable=True, - efficiency=costs.at["helmeth", "efficiency"], - efficiency2=-costs.at["helmeth", "efficiency"] - * costs.at["gas", "CO2 intensity"], - capital_cost=costs.at["helmeth", "fixed"], - lifetime=costs.at["helmeth", "lifetime"], - ) - if options.get("coal_cc"): n.madd( "Link", @@ -1430,7 +1500,6 @@ def add_land_transport(n, costs): # TODO options? logger.info("Add land transport") - nhours = n.snapshot_weightings.generators.sum() transport = pd.read_csv( snakemake.input.transport_demand, index_col=0, parse_dates=True @@ -1467,8 +1536,8 @@ def add_land_transport(n, costs): n.madd( "Bus", nodes, - location=nodes, suffix=" EV battery", + location=nodes, carrier="Li ion", unit="MWh_el", ) @@ -1560,67 +1629,65 @@ def add_land_transport(n, costs): ice_efficiency = options["transport_internal_combustion_efficiency"] - n.madd( - "Load", - nodes, - suffix=" land transport oil", - bus=spatial.oil.nodes, - carrier="land transport oil", - p_set=ice_share / ice_efficiency * transport[nodes], - ) - - co2 = ( + p_set_land_transport_oil = ( ice_share / ice_efficiency - * transport[nodes].sum().sum() - / nhours - * costs.at["oil", "CO2 intensity"] + * transport[nodes].rename(columns=lambda x: x + " land transport oil") ) - n.add( + if not options["regional_oil_demand"]: + p_set_land_transport_oil = p_set_land_transport_oil.sum(axis=1).to_frame( + name="EU land transport oil" + ) + + n.madd( + "Bus", + spatial.oil.land_transport, + location=spatial.oil.demand_locations, + carrier="land transport oil", + unit="land transport", + ) + + n.madd( "Load", - "land transport oil emissions", - bus="co2 atmosphere", - carrier="land transport oil emissions", - p_set=-co2, + spatial.oil.land_transport, + bus=spatial.oil.land_transport, + carrier="land transport oil", + p_set=p_set_land_transport_oil, + ) + + n.madd( + "Link", + spatial.oil.land_transport, + bus0=spatial.oil.nodes, + bus1=spatial.oil.land_transport, + bus2="co2 atmosphere", + carrier="land transport oil", + efficiency2=costs.at["oil", "CO2 intensity"], + p_nom_extendable=True, ) def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = ( - xr.open_dataarray(snakemake.input.heat_demand_total) - .to_pandas() - .reindex(index=n.snapshots, method="ffill") + heat_demand_shape = ( + xr.open_dataset(snakemake.input.hourly_heat_demand_total) + .to_dataframe() + .unstack(level=1) ) - intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) - sectors = ["residential", "services"] uses = ["water", "space"] heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - weekday = list(intraday_profiles[f"{sector} {use} weekday"]) - weekend = list(intraday_profiles[f"{sector} {use} weekend"]) - weekly_profile = weekday * 5 + weekend * 2 - intraday_year_profile = generate_periodic_profiles( - daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile, - ) + name = f"{sector} {use}" - if use == "space": - heat_demand_shape = daily_space_heat_demand * intraday_year_profile - else: - heat_demand_shape = intraday_year_profile - - heat_demand[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 - electric_heat_supply[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + electric_heat_supply[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) @@ -1630,7 +1697,7 @@ def build_heat_demand(n): electric_nodes = n.loads.index[n.loads.carrier == "electricity"] n.loads_t.p_set[electric_nodes] = ( n.loads_t.p_set[electric_nodes] - - electric_heat_supply.groupby(level=1, axis=1).sum()[electric_nodes] + - electric_heat_supply.T.groupby(level=1).sum().T[electric_nodes] ) return heat_demand @@ -1643,7 +1710,11 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() + overdim_factor = options["overdimension_individual_heating"] + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) @@ -1683,12 +1754,17 @@ def add_heat(n, costs): for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" + if name == "urban central": + nodes = dist_fraction.index[dist_fraction > 0] + else: + nodes = pop_layout.index + n.add("Carrier", name + " heat") n.madd( "Bus", - nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat", + location=nodes, carrier=name + " heat", unit="MWh_th", ) @@ -1696,8 +1772,9 @@ def add_heat(n, costs): if name == "urban central" and options.get("central_heat_vent"): n.madd( "Generator", - nodes[name] + f" {name} heat vent", - location=nodes[name], + nodes + f" {name} heat vent", + bus=nodes + f" {name} heat", + location=nodes, carrier=name + " heat vent", p_nom_extendable=True, p_max_pu=0, @@ -1710,11 +1787,11 @@ def add_heat(n, costs): for sector in sectors: # heat demand weighting if "rural" in name: - factor = 1 - urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes] elif "urban central" in name: - factor = dist_fraction[nodes[name]] + factor = dist_fraction[nodes] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] + factor = urban_fraction[nodes] - dist_fraction[nodes] else: raise NotImplementedError( f" {name} not in " f"heat systems: {heat_systems}" @@ -1723,15 +1800,17 @@ def add_heat(n, costs): if sector in name: heat_load = ( heat_demand[[sector + " water", sector + " space"]] - .groupby(level=1, axis=1) - .sum()[nodes[name]] + .T.groupby(level=1) + .sum() + .T[nodes] .multiply(factor) ) if name == "urban central": heat_load = ( - heat_demand.groupby(level=1, axis=1) - .sum()[nodes[name]] + heat_demand.T.groupby(level=1) + .sum() + .T[nodes] .multiply( factor * (1 + options["district_heating"]["district_heating_loss"]) ) @@ -1739,54 +1818,56 @@ def add_heat(n, costs): n.madd( "Load", - nodes[name], + nodes, suffix=f" {name} heat", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " heat", p_set=heat_load, ) ## Add heat pumps - heat_pump_type = "air" if "urban" in name else "ground" + heat_pump_types = ["air"] if "urban" in name else ["ground", "air"] - costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" - efficiency = ( - cop[heat_pump_type][nodes[name]] - if options["time_dep_hp_cop"] - else costs.at[costs_name, "efficiency"] - ) + for heat_pump_type in heat_pump_types: + costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" + efficiency = ( + cop[heat_pump_type][nodes] + if options["time_dep_hp_cop"] + else costs.at[costs_name, "efficiency"] + ) - n.madd( - "Link", - nodes[name], - suffix=f" {name} {heat_pump_type} heat pump", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", - carrier=f"{name} {heat_pump_type} heat pump", - efficiency=efficiency, - capital_cost=costs.at[costs_name, "efficiency"] - * costs.at[costs_name, "fixed"], - p_nom_extendable=True, - lifetime=costs.at[costs_name, "lifetime"], - ) + n.madd( + "Link", + nodes, + suffix=f" {name} {heat_pump_type} heat pump", + bus0=nodes, + bus1=nodes + f" {name} heat", + carrier=f"{name} {heat_pump_type} heat pump", + efficiency=efficiency, + capital_cost=costs.at[costs_name, "efficiency"] + * costs.at[costs_name, "fixed"] + * overdim_factor, + p_nom_extendable=True, + lifetime=costs.at[costs_name, "lifetime"], + ) if options["tes"]: n.add("Carrier", name + " water tanks") n.madd( "Bus", - nodes[name] + f" {name} water tanks", - location=nodes[name], + nodes + f" {name} water tanks", + location=nodes, carrier=name + " water tanks", unit="MWh_th", ) n.madd( "Link", - nodes[name] + f" {name} water tanks charger", - bus0=nodes[name] + f" {name} heat", - bus1=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks charger", + bus0=nodes + f" {name} heat", + bus1=nodes + f" {name} water tanks", efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", p_nom_extendable=True, @@ -1794,29 +1875,20 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} water tanks discharger", - bus0=nodes[name] + f" {name} water tanks", - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} water tanks discharger", + bus0=nodes + f" {name} water tanks", + bus1=nodes + f" {name} heat", carrier=name + " water tanks discharger", efficiency=costs.at["water tank discharger", "efficiency"], p_nom_extendable=True, ) - if isinstance(options["tes_tau"], dict): - tes_time_constant_days = options["tes_tau"][name_type] - else: - logger.warning( - "Deprecated: a future version will require you to specify 'tes_tau' ", - "for 'decentral' and 'central' separately.", - ) - tes_time_constant_days = ( - options["tes_tau"] if name_type == "decentral" else 180.0 - ) + tes_time_constant_days = options["tes_tau"][name_type] n.madd( "Store", - nodes[name] + f" {name} water tanks", - bus=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks", + bus=nodes + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", @@ -1825,34 +1897,39 @@ def add_heat(n, costs): lifetime=costs.at[name_type + " water tank storage", "lifetime"], ) - if options["boilers"]: + if options["resistive_heaters"]: key = f"{name_type} resistive heater" n.madd( "Link", - nodes[name] + f" {name} resistive heater", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} resistive heater", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], - capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + capital_cost=costs.at[key, "efficiency"] + * costs.at[key, "fixed"] + * overdim_factor, p_nom_extendable=True, lifetime=costs.at[key, "lifetime"], ) + if options["boilers"]: key = f"{name_type} gas boiler" n.madd( "Link", - nodes[name] + f" {name} gas boiler", + nodes + f" {name} gas boiler", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], efficiency2=costs.at["gas", "CO2 intensity"], - capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + capital_cost=costs.at[key, "efficiency"] + * costs.at[key, "fixed"] + * overdim_factor, lifetime=costs.at[key, "lifetime"], ) @@ -1861,13 +1938,14 @@ def add_heat(n, costs): n.madd( "Generator", - nodes[name], + nodes, suffix=f" {name} solar thermal collector", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, - capital_cost=costs.at[name_type + " solar thermal", "fixed"], - p_max_pu=solar_thermal[nodes[name]], + capital_cost=costs.at[name_type + " solar thermal", "fixed"] + * overdim_factor, + p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -1875,10 +1953,10 @@ def add_heat(n, costs): # add gas CHP; biomass CHP is added in biomass section n.madd( "Link", - nodes[name] + " urban central gas CHP", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, @@ -1894,12 +1972,12 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + " urban central gas CHP CC", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP CC", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", - bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, + bus4=spatial.co2.df.loc[nodes, "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, capital_cost=costs.at["central gas CHP", "fixed"] @@ -1931,11 +2009,11 @@ def add_heat(n, costs): if options["chp"] and options["micro_chp"] and name != "urban central": n.madd( "Link", - nodes[name] + f" {name} micro gas CHP", + nodes + f" {name} micro gas CHP", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", efficiency=costs.at["micro CHP", "efficiency"], @@ -1948,13 +2026,6 @@ def add_heat(n, costs): if options["retrofitting"]["retro_endogen"]: logger.info("Add retrofitting endogenously") - # resample heat demand temporal 'heat_demand_r' depending on in config - # specified temporal resolution, to not overestimate retrofitting - hours = list(filter(re.compile(r"^\d+h$", re.IGNORECASE).search, opts)) - if len(hours) == 0: - hours = [n.snapshots[1] - n.snapshots[0]] - heat_demand_r = heat_demand.resample(hours[0]).mean() - # retrofitting data 'retro_data' with 'costs' [EUR/m^2] and heat # demand 'dE' [per unit of original heat demand] for each country and # different retrofitting strengths [additional insulation thickness in m] @@ -1972,12 +2043,12 @@ def add_heat(n, costs): # share of space heat demand 'w_space' of total heat demand w_space = {} for sector in sectors: - w_space[sector] = heat_demand_r[sector + " space"] / ( - heat_demand_r[sector + " space"] + heat_demand_r[sector + " water"] + w_space[sector] = heat_demand[sector + " space"] / ( + heat_demand[sector + " space"] + heat_demand[sector + " water"] ) w_space["tot"] = ( - heat_demand_r["services space"] + heat_demand_r["residential space"] - ) / heat_demand_r.groupby(level=[1], axis=1).sum() + heat_demand["services space"] + heat_demand["residential space"] + ) / heat_demand.T.groupby(level=[1]).sum().T for name in n.loads[ n.loads.carrier.isin([x + " heat" for x in heat_systems]) @@ -1986,18 +2057,28 @@ def add_heat(n, costs): ct = pop_layout.loc[node, "ct"] # weighting 'f' depending on the size of the population at the node - f = urban_fraction[node] if "urban" in name else (1 - urban_fraction[node]) + if "urban central" in name: + f = dist_fraction[node] + elif "urban decentral" in name: + f = urban_fraction[node] - dist_fraction[node] + else: + f = 1 - urban_fraction[node] if f == 0: continue # get sector name ("residential"/"services"/or both "tot" for urban central) - sec = [x if x in name else "tot" for x in sectors][0] + if "urban central" in name: + sec = "tot" + if "residential" in name: + sec = "residential" + if "services" in name: + sec = "services" # get floor aread at node and region (urban/rural) in m^2 floor_area_node = ( pop_layout.loc[node].fraction * floor_area.loc[ct, "value"] * 10**6 ).loc[sec] * f # total heat demand at node [MWh] - demand = n.loads_t.p_set[name].resample(hours[0]).mean() + demand = n.loads_t.p_set[name] # space heat demand at node [MWh] space_heat_demand = demand * w_space[sec][node] @@ -2034,14 +2115,15 @@ def add_heat(n, costs): strengths = strengths.drop(s) # reindex normed time profile of space heat demand back to hourly resolution - space_pu = space_pu.reindex(index=heat_demand.index).fillna(method="ffill") + space_pu = space_pu.reindex(index=heat_demand.index).ffill() # add for each retrofitting strength a generator with heat generation profile following the profile of the heat demand for strength in strengths: + node_name = " ".join(name.split(" ")[2::]) n.madd( "Generator", [node], - suffix=" retrofitting " + strength + " " + name[6::], + suffix=" retrofitting " + strength + " " + node_name, bus=name, carrier="retrofitting", p_nom_extendable=True, @@ -2055,50 +2137,6 @@ def add_heat(n, costs): ) -def create_nodes_for_heat_sector(): - # TODO pop_layout - - # rural are areas with low heating density and individual heating - # urban are areas with high heating density - # urban can be split into district heating (central) and individual heating (decentral) - - ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() - # distribution of urban population within a country - pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) - - sectors = ["residential", "services"] - - nodes = {} - urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) - - for sector in sectors: - nodes[sector + " rural"] = pop_layout.index - nodes[sector + " urban decentral"] = pop_layout.index - - district_heat_share = pop_weighted_energy_totals["district heat share"] - - # maximum potential of urban demand covered by district heating - central_fraction = options["district_heating"]["potential"] - # district heating share at each node - dist_fraction_node = ( - district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] - ) - nodes["urban central"] = dist_fraction_node.index - # if district heating share larger than urban fraction -> set urban - # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) - # difference of max potential and today's share of district heating - diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(options["district_heating"]["progress"], investment_year) - dist_fraction_node += diff * progress - logger.info( - f"Increase district heating share by a progress factor of {progress:.2%} " - f"resulting in new average share of {dist_fraction_node.mean():.2%}" - ) - - return nodes, dist_fraction_node, urban_fraction - - def add_biomass(n, costs): logger.info("Add biomass") @@ -2165,12 +2203,41 @@ def add_biomass(n, costs): bus1=spatial.gas.nodes, bus2="co2 atmosphere", carrier="biogas to gas", - capital_cost=costs.loc["biogas upgrading", "fixed"], - marginal_cost=costs.loc["biogas upgrading", "VOM"], + capital_cost=costs.at["biogas", "fixed"] + + costs.at["biogas upgrading", "fixed"], + marginal_cost=costs.at["biogas upgrading", "VOM"], + efficiency=costs.at["biogas", "efficiency"], efficiency2=-costs.at["gas", "CO2 intensity"], p_nom_extendable=True, ) + if options.get("biogas_upgrading_cc"): + # Assuming for costs that the CO2 from upgrading is pure, such as in amine scrubbing. I.e., with and without CC is + # equivalent. Adding biomass CHP capture because biogas is often small-scale and decentral so further + # from e.g. CO2 grid or buyers. This is a proxy for the added cost for e.g. a raw biogas pipeline to a central upgrading facility + n.madd( + "Link", + spatial.gas.biogas_to_gas_cc, + bus0=spatial.gas.biogas, + bus1=spatial.gas.nodes, + bus2=spatial.co2.nodes, + bus3="co2 atmosphere", + carrier="biogas to gas CC", + capital_cost=costs.at["biogas CC", "fixed"] + + costs.at["biogas upgrading", "fixed"] + + costs.at["biomass CHP capture", "fixed"] + * costs.at["biogas CC", "CO2 stored"], + marginal_cost=costs.at["biogas CC", "VOM"] + + costs.at["biogas upgrading", "VOM"], + efficiency=costs.at["biogas CC", "efficiency"], + efficiency2=costs.at["biogas CC", "CO2 stored"] + * costs.at["biogas CC", "capture rate"], + efficiency3=-costs.at["gas", "CO2 intensity"] + - costs.at["biogas CC", "CO2 stored"] + * costs.at["biogas CC", "capture rate"], + p_nom_extendable=True, + ) + if options["biomass_transport"]: # add biomass transport transport_costs = pd.read_csv( @@ -2220,6 +2287,14 @@ def add_biomass(n, costs): marginal_cost=costs.at["solid biomass", "fuel"] + bus_transport_costs * average_distance, ) + n.add( + "GlobalConstraint", + "biomass limit", + carrier_attribute="solid biomass", + sense="<=", + constant=biomass_potentials["solid biomass"].sum(), + type="operational_limit", + ) # AC buses with district heating urban_central = n.buses.index[n.buses.carrier == "urban central heat"] @@ -2279,7 +2354,7 @@ def add_biomass(n, costs): if options["biomass_boiler"]: # TODO: Add surcharge for pellets - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", "services rural", @@ -2288,14 +2363,16 @@ def add_biomass(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} biomass boiler", + nodes + f" {name} biomass boiler", p_nom_extendable=True, - bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, - bus1=nodes_heat[name] + f" {name} heat", + bus0=spatial.biomass.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] - * costs.at["biomass boiler", "fixed"], + * costs.at["biomass boiler", "fixed"] + * options["overdimension_individual_heating"], + marginal_cost=costs.at["biomass boiler", "pelletizing cost"], lifetime=costs.at["biomass boiler", "lifetime"], ) @@ -2315,7 +2392,7 @@ def add_biomass(n, costs): + costs.at["BtL", "CO2 stored"], p_nom_extendable=True, capital_cost=costs.at["BtL", "fixed"], - marginal_cost=costs.at["BtL", "efficiency"] * costs.loc["BtL", "VOM"], + marginal_cost=costs.at["BtL", "efficiency"] * costs.at["BtL", "VOM"], ) # TODO: Update with energy penalty @@ -2336,7 +2413,7 @@ def add_biomass(n, costs): p_nom_extendable=True, capital_cost=costs.at["BtL", "fixed"] + costs.at["biomass CHP capture", "fixed"] * costs.at["BtL", "CO2 stored"], - marginal_cost=costs.at["BtL", "efficiency"] * costs.loc["BtL", "VOM"], + marginal_cost=costs.at["BtL", "efficiency"] * costs.at["BtL", "VOM"], ) # BioSNG from solid biomass @@ -2355,7 +2432,7 @@ def add_biomass(n, costs): + costs.at["BioSNG", "CO2 stored"], p_nom_extendable=True, capital_cost=costs.at["BioSNG", "fixed"], - marginal_cost=costs.at["BioSNG", "efficiency"] * costs.loc["BioSNG", "VOM"], + marginal_cost=costs.at["BioSNG", "efficiency"] * costs.at["BioSNG", "VOM"], ) # TODO: Update with energy penalty for CC @@ -2379,7 +2456,7 @@ def add_biomass(n, costs): capital_cost=costs.at["BioSNG", "fixed"] + costs.at["biomass CHP capture", "fixed"] * costs.at["BioSNG", "CO2 stored"], - marginal_cost=costs.at["BioSNG", "efficiency"] * costs.loc["BioSNG", "VOM"], + marginal_cost=costs.at["BioSNG", "efficiency"] * costs.at["BioSNG", "VOM"], ) @@ -2431,9 +2508,14 @@ def add_industry(n, costs): efficiency=1.0, ) + if len(spatial.biomass.industry_cc) <= 1 and len(spatial.co2.nodes) > 1: + link_names = nodes + " " + spatial.biomass.industry_cc + else: + link_names = spatial.biomass.industry_cc + n.madd( "Link", - spatial.biomass.industry_cc, + link_names, bus0=spatial.biomass.nodes, bus1=spatial.biomass.industry, bus2="co2 atmosphere", @@ -2612,6 +2694,8 @@ def add_industry(n, costs): p_min_pu=options.get("min_part_load_methanolisation", 0), capital_cost=costs.at["methanolisation", "fixed"] * options["MWh_MeOH_per_MWh_H2"], # EUR/MW_H2/a + marginal_cost=options["MWh_MeOH_per_MWh_H2"] + * costs.at["methanolisation", "VOM"], lifetime=costs.at["methanolisation", "lifetime"], efficiency=options["MWh_MeOH_per_MWh_H2"], efficiency2=-options["MWh_MeOH_per_MWh_H2"] / options["MWh_MeOH_per_MWh_e"], @@ -2621,48 +2705,44 @@ def add_industry(n, costs): efficiency = ( options["shipping_oil_efficiency"] / options["shipping_methanol_efficiency"] ) - p_set_methanol = shipping_methanol_share * p_set.sum() * efficiency + + p_set_methanol = ( + shipping_methanol_share + * p_set.rename(lambda x: x + " shipping methanol") + * efficiency + ) + + if not options["regional_methanol_demand"]: + p_set_methanol = p_set_methanol.sum() + + n.madd( + "Bus", + spatial.methanol.shipping, + location=spatial.methanol.demand_locations, + carrier="shipping methanol", + unit="MWh_LHV", + ) n.madd( "Load", - spatial.methanol.nodes, - suffix=" shipping methanol", - bus=spatial.methanol.nodes, + spatial.methanol.shipping, + bus=spatial.methanol.shipping, carrier="shipping methanol", p_set=p_set_methanol, ) - # CO2 intensity methanol based on stoichiometric calculation with 22.7 GJ/t methanol (32 g/mol), CO2 (44 g/mol), 277.78 MWh/TJ = 0.218 t/MWh - co2 = p_set_methanol / options["MWh_MeOH_per_tCO2"] - - n.add( - "Load", - "shipping methanol emissions", - bus="co2 atmosphere", - carrier="shipping methanol emissions", - p_set=-co2, - ) - - if shipping_oil_share: - p_set_oil = shipping_oil_share * p_set.sum() - n.madd( - "Load", - spatial.oil.nodes, - suffix=" shipping oil", - bus=spatial.oil.nodes, - carrier="shipping oil", - p_set=p_set_oil, - ) - - co2 = p_set_oil * costs.at["oil", "CO2 intensity"] - - n.add( - "Load", - "shipping oil emissions", - bus="co2 atmosphere", - carrier="shipping oil emissions", - p_set=-co2, + "Link", + spatial.methanol.shipping, + bus0=spatial.methanol.nodes, + bus1=spatial.methanol.shipping, + bus2="co2 atmosphere", + carrier="shipping methanol", + p_nom_extendable=True, + efficiency2=1 + / options[ + "MWh_MeOH_per_tCO2" + ], # CO2 intensity methanol based on stoichiometric calculation with 22.7 GJ/t methanol (32 g/mol), CO2 (44 g/mol), 277.78 MWh/TJ = 0.218 t/MWh ) if "oil" not in n.buses.carrier.unique(): @@ -2678,7 +2758,8 @@ def add_industry(n, costs): # could correct to e.g. 0.001 EUR/kWh * annuity and O&M n.madd( "Store", - [oil_bus + " Store" for oil_bus in spatial.oil.nodes], + spatial.oil.nodes, + suffix=" Store", bus=spatial.oil.nodes, e_nom_extendable=True, e_cyclic=True, @@ -2695,8 +2776,41 @@ def add_industry(n, costs): marginal_cost=costs.at["oil", "fuel"], ) + if shipping_oil_share: + p_set_oil = shipping_oil_share * p_set.rename(lambda x: x + " shipping oil") + + if not options["regional_oil_demand"]: + p_set_oil = p_set_oil.sum() + + n.madd( + "Bus", + spatial.oil.shipping, + location=spatial.oil.demand_locations, + carrier="shipping oil", + unit="MWh_LHV", + ) + + n.madd( + "Load", + spatial.oil.shipping, + bus=spatial.oil.shipping, + carrier="shipping oil", + p_set=p_set_oil, + ) + + n.madd( + "Link", + spatial.oil.shipping, + bus0=spatial.oil.nodes, + bus1=spatial.oil.shipping, + bus2="co2 atmosphere", + carrier="shipping oil", + p_nom_extendable=True, + efficiency2=costs.at["oil", "CO2 intensity"], + ) + if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", @@ -2706,16 +2820,17 @@ def add_industry(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} oil boiler", + nodes + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, - bus1=nodes_heat[name] + f" {name} heat", + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] - * costs.at["decentral oil boiler", "fixed"], + * costs.at["decentral oil boiler", "fixed"] + * options["overdimension_individual_heating"], lifetime=costs.at["decentral oil boiler", "lifetime"], ) @@ -2729,6 +2844,8 @@ def add_industry(n, costs): efficiency=costs.at["Fischer-Tropsch", "efficiency"], capital_cost=costs.at["Fischer-Tropsch", "fixed"] * costs.at["Fischer-Tropsch", "efficiency"], # EUR/MW_H2/a + marginal_cost=costs.at["Fischer-Tropsch", "efficiency"] + * costs.at["Fischer-Tropsch", "VOM"], efficiency2=-costs.at["oil", "CO2 intensity"] * costs.at["Fischer-Tropsch", "efficiency"], p_nom_extendable=True, @@ -2736,53 +2853,101 @@ def add_industry(n, costs): lifetime=costs.at["Fischer-Tropsch", "lifetime"], ) + # naphtha demand_factor = options.get("HVC_demand_factor", 1) - p_set = demand_factor * industrial_demand.loc[nodes, "naphtha"].sum() / nhours if demand_factor != 1: logger.warning(f"Changing HVC demand by {demand_factor*100-100:+.2f}%.") - n.madd( - "Load", - ["naphtha for industry"], - bus=spatial.oil.nodes, - carrier="naphtha for industry", - p_set=p_set, - ) - - demand_factor = options.get("aviation_demand_factor", 1) - all_aviation = ["total international aviation", "total domestic aviation"] - p_set = ( + p_set_plastics = ( demand_factor - * pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1).sum() - * 1e6 + * industrial_demand.loc[nodes, "naphtha"].rename( + lambda x: x + " naphtha for industry" + ) / nhours ) + + if not options["regional_oil_demand"]: + p_set_plastics = p_set_plastics.sum() + + n.madd( + "Bus", + spatial.oil.naphtha, + location=spatial.oil.demand_locations, + carrier="naphtha for industry", + unit="MWh_LHV", + ) + + n.madd( + "Load", + spatial.oil.naphtha, + bus=spatial.oil.naphtha, + carrier="naphtha for industry", + p_set=p_set_plastics, + ) + + # some CO2 from naphtha are process emissions from steam cracker + # rest of CO2 released to atmosphere either in waste-to-energy or decay + process_co2_per_naphtha = ( + industrial_demand.loc[nodes, "process emission from feedstock"].sum() + / industrial_demand.loc[nodes, "naphtha"].sum() + ) + emitted_co2_per_naphtha = costs.at["oil", "CO2 intensity"] - process_co2_per_naphtha + + n.madd( + "Link", + spatial.oil.naphtha, + bus0=spatial.oil.nodes, + bus1=spatial.oil.naphtha, + bus2="co2 atmosphere", + bus3=spatial.co2.process_emissions, + carrier="naphtha for industry", + p_nom_extendable=True, + efficiency2=emitted_co2_per_naphtha, + efficiency3=process_co2_per_naphtha, + ) + + # aviation + demand_factor = options.get("aviation_demand_factor", 1) if demand_factor != 1: logger.warning(f"Changing aviation demand by {demand_factor*100-100:+.2f}%.") + all_aviation = ["total international aviation", "total domestic aviation"] + + p_set = ( + demand_factor + * pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1) + * 1e6 + / nhours + ).rename(lambda x: x + " kerosene for aviation") + + if not options["regional_oil_demand"]: + p_set = p_set.sum() + + n.madd( + "Bus", + spatial.oil.kerosene, + location=spatial.oil.demand_locations, + carrier="kerosene for aviation", + unit="MWh_LHV", + ) + n.madd( "Load", - ["kerosene for aviation"], - bus=spatial.oil.nodes, + spatial.oil.kerosene, + bus=spatial.oil.kerosene, carrier="kerosene for aviation", p_set=p_set, ) - # NB: CO2 gets released again to atmosphere when plastics decay or kerosene is burned - # except for the process emissions when naphtha is used for petrochemicals, which can be captured with other industry process emissions - # tco2 per hour - co2_release = ["naphtha for industry", "kerosene for aviation"] - co2 = ( - n.loads.loc[co2_release, "p_set"].sum() * costs.at["oil", "CO2 intensity"] - - industrial_demand.loc[nodes, "process emission from feedstock"].sum() / nhours - ) - - n.add( - "Load", - "oil emissions", - bus="co2 atmosphere", - carrier="oil emissions", - p_set=-co2, + n.madd( + "Link", + spatial.oil.kerosene, + bus0=spatial.oil.nodes, + bus1=spatial.oil.kerosene, + bus2="co2 atmosphere", + carrier="kerosene for aviation", + p_nom_extendable=True, + efficiency2=costs.at["oil", "CO2 intensity"], ) # TODO simplify bus expression @@ -2791,9 +2956,11 @@ def add_industry(n, costs): nodes, suffix=" low-temperature heat for industry", bus=[ - node + " urban central heat" - if node + " urban central heat" in n.buses.index - else node + " services urban decentral heat" + ( + node + " urban central heat" + if node + " urban central heat" in n.buses.index + else node + " services urban decentral heat" + ) for node in nodes ], carrier="low-temperature heat for industry", @@ -2833,19 +3000,16 @@ def add_industry(n, costs): unit="t_co2", ) - sel = ["process emission", "process emission from feedstock"] if options["co2_spatial"] or options["co2network"]: p_set = ( - -industrial_demand.loc[nodes, sel] - .sum(axis=1) - .rename(index=lambda x: x + " process emissions") + -industrial_demand.loc[nodes, "process emission"].rename( + index=lambda x: x + " process emissions" + ) / nhours ) else: - p_set = -industrial_demand.loc[nodes, sel].sum(axis=1).sum() / nhours + p_set = -industrial_demand.loc[nodes, "process emission"].sum() / nhours - # this should be process emissions fossil+feedstock - # then need load on atmosphere for feedstock emissions that are currently going to atmosphere via Link Fischer-Tropsch demand n.madd( "Load", spatial.co2.process_emissions, @@ -2910,32 +3074,61 @@ def add_industry(n, costs): mwh_coal_per_mwh_coke = 1.366 # from eurostat energy balance p_set = ( - industrial_demand["coal"].sum() - + mwh_coal_per_mwh_coke * industrial_demand["coke"].sum() + industrial_demand["coal"] + + mwh_coal_per_mwh_coke * industrial_demand["coke"] ) / nhours + p_set.rename(lambda x: x + " coal for industry", inplace=True) + + if not options["regional_coal_demand"]: + p_set = p_set.sum() + + n.madd( + "Bus", + spatial.coal.industry, + location=spatial.coal.demand_locations, + carrier="coal for industry", + unit="MWh_LHV", + ) + n.madd( "Load", - spatial.coal.nodes, - suffix=" for industry", - bus=spatial.coal.nodes, + spatial.coal.industry, + bus=spatial.coal.industry, carrier="coal for industry", p_set=p_set, ) + n.madd( + "Link", + spatial.coal.industry, + bus0=spatial.coal.nodes, + bus1=spatial.coal.industry, + bus2="co2 atmosphere", + carrier="coal for industry", + p_nom_extendable=True, + efficiency2=costs.at["coal", "CO2 intensity"], + ) + def add_waste_heat(n): # TODO options? logger.info("Add possibility to use industrial waste heat in district heating") + cf_industry = snakemake.params.industry # AC buses with district heating urban_central = n.buses.index[n.buses.carrier == "urban central heat"] if not urban_central.empty: urban_central = urban_central.str[: -len(" urban central heat")] + link_carriers = n.links.carrier.unique() + # TODO what is the 0.95 and should it be a config option? - if options["use_fischer_tropsch_waste_heat"]: + if ( + options["use_fischer_tropsch_waste_heat"] + and "Fischer-Tropsch" in link_carriers + ): n.links.loc[urban_central + " Fischer-Tropsch", "bus3"] = ( urban_central + " urban central heat" ) @@ -2943,8 +3136,48 @@ def add_waste_heat(n): 0.95 - n.links.loc[urban_central + " Fischer-Tropsch", "efficiency"] ) + if options["use_methanation_waste_heat"] and "Sabatier" in link_carriers: + n.links.loc[urban_central + " Sabatier", "bus3"] = ( + urban_central + " urban central heat" + ) + n.links.loc[urban_central + " Sabatier", "efficiency3"] = ( + 0.95 - n.links.loc[urban_central + " Sabatier", "efficiency"] + ) + + # DEA quotes 15% of total input (11% of which are high-value heat) + if options["use_haber_bosch_waste_heat"] and "Haber-Bosch" in link_carriers: + n.links.loc[urban_central + " Haber-Bosch", "bus3"] = ( + urban_central + " urban central heat" + ) + total_energy_input = ( + cf_industry["MWh_H2_per_tNH3_electrolysis"] + + cf_industry["MWh_elec_per_tNH3_electrolysis"] + ) / cf_industry["MWh_NH3_per_tNH3"] + electricity_input = ( + cf_industry["MWh_elec_per_tNH3_electrolysis"] + / cf_industry["MWh_NH3_per_tNH3"] + ) + n.links.loc[urban_central + " Haber-Bosch", "efficiency3"] = ( + 0.15 * total_energy_input / electricity_input + ) + + if ( + options["use_methanolisation_waste_heat"] + and "methanolisation" in link_carriers + ): + n.links.loc[urban_central + " methanolisation", "bus4"] = ( + urban_central + " urban central heat" + ) + n.links.loc[urban_central + " methanolisation", "efficiency4"] = ( + costs.at["methanolisation", "heat-output"] + / costs.at["methanolisation", "hydrogen-input"] + ) + # TODO integrate usable waste heat efficiency into technology-data from DEA - if options.get("use_electrolysis_waste_heat", False): + if ( + options.get("use_electrolysis_waste_heat", False) + and "H2 Electrolysis" in link_carriers + ): n.links.loc[urban_central + " H2 Electrolysis", "bus2"] = ( urban_central + " urban central heat" ) @@ -2952,7 +3185,7 @@ def add_waste_heat(n): 0.84 - n.links.loc[urban_central + " H2 Electrolysis", "efficiency"] ) - if options["use_fuel_cell_waste_heat"]: + if options["use_fuel_cell_waste_heat"] and "H2 Fuel Cell" in link_carriers: n.links.loc[urban_central + " H2 Fuel Cell", "bus2"] = ( urban_central + " urban central heat" ) @@ -3006,9 +3239,9 @@ def add_agriculture(n, costs): f"Total agriculture machinery shares sum up to {total_share:.2%}, corresponding to increased or decreased demand assumptions." ) - machinery_nodal_energy = pop_weighted_energy_totals.loc[ - nodes, "total agriculture machinery" - ] + machinery_nodal_energy = ( + pop_weighted_energy_totals.loc[nodes, "total agriculture machinery"] * 1e6 + ) if electric_share > 0: efficiency_gain = ( @@ -3022,36 +3255,44 @@ def add_agriculture(n, costs): suffix=" agriculture machinery electric", bus=nodes, carrier="agriculture machinery electric", - p_set=electric_share - / efficiency_gain - * machinery_nodal_energy - * 1e6 - / nhours, + p_set=electric_share / efficiency_gain * machinery_nodal_energy / nhours, ) if oil_share > 0: + p_set = ( + oil_share + * machinery_nodal_energy.rename(lambda x: x + " agriculture machinery oil") + / nhours + ) + + if not options["regional_oil_demand"]: + p_set = p_set.sum() + + n.madd( + "Bus", + spatial.oil.agriculture_machinery, + location=spatial.oil.demand_locations, + carrier="agriculture machinery oil", + unit="MWh_LHV", + ) + n.madd( "Load", - ["agriculture machinery oil"], - bus=spatial.oil.nodes, + spatial.oil.agriculture_machinery, + bus=spatial.oil.agriculture_machinery, carrier="agriculture machinery oil", - p_set=oil_share * machinery_nodal_energy.sum() * 1e6 / nhours, + p_set=p_set, ) - co2 = ( - oil_share - * machinery_nodal_energy.sum() - * 1e6 - / nhours - * costs.at["oil", "CO2 intensity"] - ) - - n.add( - "Load", - "agriculture machinery oil emissions", - bus="co2 atmosphere", - carrier="agriculture machinery oil emissions", - p_set=-co2, + n.madd( + "Link", + spatial.oil.agriculture_machinery, + bus0=spatial.oil.nodes, + bus1=spatial.oil.agriculture_machinery, + bus2="co2 atmosphere", + carrier="agriculture machinery oil", + p_nom_extendable=True, + efficiency2=costs.at["oil", "CO2 intensity"], ) @@ -3072,46 +3313,6 @@ def remove_h2_network(n): n.stores.drop("EU H2 Store", inplace=True) -def maybe_adjust_costs_and_potentials(n, opts): - for o in opts: - if "+" not in o: - continue - oo = o.split("+") - carrier_list = np.hstack( - ( - n.generators.carrier.unique(), - n.links.carrier.unique(), - n.stores.carrier.unique(), - n.storage_units.carrier.unique(), - ) - ) - suptechs = map(lambda c: c.split("-", 2)[0], carrier_list) - if oo[0].startswith(tuple(suptechs)): - carrier = oo[0] - attr_lookup = {"p": "p_nom_max", "e": "e_nom_max", "c": "capital_cost"} - attr = attr_lookup[oo[1][0]] - factor = float(oo[1][1:]) - # beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan - if carrier == "AC": # lines do not have carrier - n.lines[attr] *= factor - else: - if attr == "p_nom_max": - comps = {"Generator", "Link", "StorageUnit"} - elif attr == "e_nom_max": - comps = {"Store"} - else: - comps = {"Generator", "Link", "StorageUnit", "Store"} - for c in n.iterate_components(comps): - if carrier == "solar": - sel = c.df.carrier.str.contains( - carrier - ) & ~c.df.carrier.str.contains("solar rooftop") - else: - sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel, attr] *= factor - logger.info(f"changing {attr} for {carrier} by factor {factor}") - - def limit_individual_line_extension(n, maxext): logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW") n.lines["s_nom_max"] = n.lines["s_nom"] + maxext @@ -3120,24 +3321,24 @@ def limit_individual_line_extension(n, maxext): aggregate_dict = { - "p_nom": "sum", - "s_nom": "sum", + "p_nom": pd.Series.sum, + "s_nom": pd.Series.sum, "v_nom": "max", "v_mag_pu_max": "min", "v_mag_pu_min": "max", - "p_nom_max": "sum", - "s_nom_max": "sum", - "p_nom_min": "sum", - "s_nom_min": "sum", + "p_nom_max": pd.Series.sum, + "s_nom_max": pd.Series.sum, + "p_nom_min": pd.Series.sum, + "s_nom_min": pd.Series.sum, "v_ang_min": "max", "v_ang_max": "min", "terrain_factor": "mean", "num_parallel": "sum", "p_set": "sum", "e_initial": "sum", - "e_nom": "sum", - "e_nom_max": "sum", - "e_nom_min": "sum", + "e_nom": pd.Series.sum, + "e_nom_max": pd.Series.sum, + "e_nom_min": pd.Series.sum, "state_of_charge_initial": "sum", "state_of_charge_set": "sum", "inflow": "sum", @@ -3194,18 +3395,16 @@ def cluster_heat_buses(n): # cluster heat nodes # static dataframe agg = define_clustering(df.columns, aggregate_dict) - df = df.groupby(level=0).agg(agg, **agg_group_kwargs) + df = df.groupby(level=0).agg(agg, numeric_only=False) # time-varying data pnl = c.pnl agg = define_clustering(pd.Index(pnl.keys()), aggregate_dict) for k in pnl.keys(): - pnl[k].rename( - columns=lambda x: x.replace("residential ", "").replace( - "services ", "" - ), - inplace=True, - ) - pnl[k] = pnl[k].groupby(level=0, axis=1).agg(agg[k], **agg_group_kwargs) + + def renamer(s): + return s.replace("residential ", "").replace("services ", "") + + pnl[k] = pnl[k].T.groupby(renamer).agg(agg[k], numeric_only=False).T # remove unclustered assets of service/residential to_drop = c.df.index.difference(df.index) @@ -3231,7 +3430,7 @@ def apply_time_segmentation( """ try: import tsam.timeseriesaggregation as tsam - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" ) @@ -3269,6 +3468,7 @@ def apply_time_segmentation( sn_weightings = pd.Series( weightings, index=snapshots, name="weightings", dtype="float64" ) + logger.info(f"Distribution of snapshot durations:\n{weightings.value_counts()}") n.set_snapshots(sn_weightings.index) n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0) @@ -3282,34 +3482,84 @@ def apply_time_segmentation( return n -def set_temporal_aggregation(n, opts, solver_name): +def set_temporal_aggregation(n, resolution, solver_name): """ Aggregate network temporally. """ - for o in opts: - # temporal averaging - m = re.match(r"^\d+h$", o, re.IGNORECASE) - if m is not None: - n = average_every_nhours(n, m.group(0)) - break - # representative snapshots - m = re.match(r"(^\d+)sn$", o, re.IGNORECASE) - if m is not None: - sn = int(m[1]) - logger.info(f"Use every {sn} snapshot as representative") - n.set_snapshots(n.snapshots[::sn]) - n.snapshot_weightings *= sn - break - # segments with package tsam - m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) - if m is not None: - segments = int(m[1]) - logger.info(f"Use temporal segmentation with {segments} segments") - n = apply_time_segmentation(n, segments, solver_name=solver_name) - break + if not resolution: + return n + + # representative snapshots + if "sn" in resolution.lower(): + sn = int(resolution[:-2]) + logger.info("Use every %s snapshot as representative", sn) + n.set_snapshots(n.snapshots[::sn]) + n.snapshot_weightings *= sn + + # segments with package tsam + elif "seg" in resolution.lower(): + segments = int(resolution[:-3]) + logger.info("Use temporal segmentation with %s segments", segments) + n = apply_time_segmentation(n, segments, solver_name=solver_name) + + # temporal averaging + elif "h" in resolution.lower(): + logger.info("Aggregate to frequency %s", resolution) + n = average_every_nhours(n, resolution) + return n +def lossy_bidirectional_links(n, carrier, efficiencies={}): + "Split bidirectional links into two unidirectional links to include transmission losses." + + carrier_i = n.links.query("carrier == @carrier").index + + if ( + not any((v != 1.0) or (v >= 0) for v in efficiencies.values()) + or carrier_i.empty + ): + return + + efficiency_static = efficiencies.get("efficiency_static", 1) + efficiency_per_1000km = efficiencies.get("efficiency_per_1000km", 1) + compression_per_1000km = efficiencies.get("compression_per_1000km", 0) + + logger.info( + f"Specified losses for {carrier} transmission " + f"(static: {efficiency_static}, per 1000km: {efficiency_per_1000km}, compression per 1000km: {compression_per_1000km}). " + "Splitting bidirectional links." + ) + + n.links.loc[carrier_i, "p_min_pu"] = 0 + n.links.loc[carrier_i, "efficiency"] = ( + efficiency_static + * efficiency_per_1000km ** (n.links.loc[carrier_i, "length"] / 1e3) + ) + rev_links = ( + n.links.loc[carrier_i].copy().rename({"bus0": "bus1", "bus1": "bus0"}, axis=1) + ) + rev_links["length_original"] = rev_links["length"] + rev_links["capital_cost"] = 0 + rev_links["length"] = 0 + rev_links["reversed"] = True + rev_links.index = rev_links.index.map(lambda x: x + "-reversed") + + n.links = pd.concat([n.links, rev_links], sort=False) + n.links["reversed"] = n.links["reversed"].fillna(False) + n.links["length_original"] = n.links["length_original"].fillna(n.links.length) + + # do compression losses after concatenation to take electricity consumption at bus0 in either direction + carrier_i = n.links.query("carrier == @carrier").index + if compression_per_1000km > 0: + n.links.loc[carrier_i, "bus2"] = n.links.loc[carrier_i, "bus0"].map( + n.buses.location + ) # electricity + n.links.loc[carrier_i, "efficiency2"] = ( + -compression_per_1000km * n.links.loc[carrier_i, "length_original"] / 1e3 + ) + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -3319,20 +3569,18 @@ if __name__ == "__main__": configfiles="test/config.overnight.yaml", simpl="", opts="", - clusters="5", - ll="v1.5", - sector_opts="CO2L0-24H-T-H-B-I-A-solar+p3-dist1", + clusters="37", + ll="v1.0", + sector_opts="CO2L0-24H-T-H-B-I-A-dist1", planning_horizons="2030", ) - logging.basicConfig(level=snakemake.config["logging"]["level"]) - - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + configure_logging(snakemake) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) options = snakemake.params.sector - opts = snakemake.wildcards.sector_opts.split("-") - investment_year = int(snakemake.wildcards.planning_horizons[-4:]) n = pypsa.Network(snakemake.input.network) @@ -3362,59 +3610,42 @@ if __name__ == "__main__": for carrier in conventional: add_carrier_buses(n, carrier) - add_co2_tracking(n, options) + add_eu_bus(n) + + add_co2_tracking(n, costs, options) add_generation(n, costs) add_storage_and_grids(n, costs) - # TODO merge with opts cost adjustment below - for o in opts: - if o[:4] == "wave": - wave_cost_factor = float(o[4:].replace("p", ".").replace("m", "-")) - logger.info( - f"Including wave generators with cost factor of {wave_cost_factor}" - ) - add_wave(n, wave_cost_factor) - if o[:4] == "dist": - options["electricity_distribution_grid"] = True - options["electricity_distribution_grid_cost_factor"] = float( - o[4:].replace("p", ".").replace("m", "-") - ) - if o == "biomasstransport": - options["biomass_transport"] = True - - if "nodistrict" in opts: - options["district_heating"]["progress"] = 0.0 - - if "T" in opts: + if options["transport"]: add_land_transport(n, costs) - if "H" in opts: + if options["heating"]: add_heat(n, costs) - if "B" in opts: + if options["biomass"]: add_biomass(n, costs) if options["ammonia"]: add_ammonia(n, costs) - if "I" in opts: + if options["industry"]: add_industry(n, costs) - if "I" in opts and "H" in opts: + if options["heating"]: add_waste_heat(n) - if "A" in opts: # requires H and I + if options["agriculture"]: # requires H and I add_agriculture(n, costs) if options["dac"]: add_dac(n, costs) - if "decentral" in opts: + if not options["electricity_transmission_grid"]: decentral(n) - if "noH2network" in opts: + if not options["H2_network"]: remove_h2_network(n) if options["co2network"]: @@ -3424,51 +3655,37 @@ if __name__ == "__main__": add_allam(n, costs) solver_name = snakemake.config["solving"]["solver"]["name"] - n = set_temporal_aggregation(n, opts, solver_name) + resolution = snakemake.params.time_resolution + n = set_temporal_aggregation(n, resolution, solver_name) - limit_type = "config" - limit = get(snakemake.params.co2_budget, investment_year) - for o in opts: - if "cb" not in o: - continue - limit_type = "carbon budget" + co2_budget = snakemake.params.co2_budget + if isinstance(co2_budget, str) and co2_budget.startswith("cb"): fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv" if not os.path.exists(fn): emissions_scope = snakemake.params.emissions_scope - report_year = snakemake.params.eurostat_report_year input_co2 = snakemake.input.co2 build_carbon_budget( - o, + co2_budget, snakemake.input.eurostat, fn, emissions_scope, - report_year, input_co2, + options, ) co2_cap = pd.read_csv(fn, index_col=0).squeeze() limit = co2_cap.loc[investment_year] - break - for o in opts: - if "Co2L" not in o: - continue - limit_type = "wildcard" - limit = o[o.find("Co2L") + 4 :] - limit = float(limit.replace("p", ".").replace("m", "-")) - break - logger.info(f"Add CO2 limit from {limit_type}") - add_co2limit(n, nyears, limit) + else: + limit = get(co2_budget, investment_year) + add_co2limit(n, options, nyears, limit) - for o in opts: - if not o[:10] == "linemaxext": - continue - maxext = float(o[10:]) * 1e3 + maxext = snakemake.params["lines"]["max_extension"] + if maxext is not None: limit_individual_line_extension(n, maxext) - break if options["electricity_distribution_grid"]: insert_electricity_distribution_grid(n, costs) - maybe_adjust_costs_and_potentials(n, opts) + maybe_adjust_costs_and_potentials(n, snakemake.params["adjustments"]) if options["gas_distribution_grid"]: insert_gas_distribution_costs(n, costs) @@ -3476,6 +3693,18 @@ if __name__ == "__main__": if options["electricity_grid_connection"]: add_electricity_grid_connection(n, costs) + for k, v in options["transmission_efficiency"].items(): + lossy_bidirectional_links(n, k, v) + + # Workaround: Remove lines with conflicting (and unrealistic) properties + # cf. https://github.com/PyPSA/pypsa-eur/issues/444 + if snakemake.config["solving"]["options"]["transmission_losses"]: + idx = n.lines.query("num_parallel == 0").index + logger.info( + f"Removing {len(idx)} line(s) with properties conflicting with transmission losses functionality." + ) + n.mremove("Line", idx) + first_year_myopic = (snakemake.params.foresight in ["myopic", "perfect"]) and ( snakemake.params.planning_horizons[0] == investment_year ) @@ -3486,5 +3715,6 @@ if __name__ == "__main__": n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) sanitize_carriers(n, snakemake.config) + sanitize_locations(n) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/retrieve_cost_data.py b/scripts/retrieve_cost_data.py new file mode 100644 index 00000000..eb1ef041 --- /dev/null +++ b/scripts/retrieve_cost_data.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Retrieve cost data from ``technology-data``. +""" + +import logging +from pathlib import Path + +from _helpers import configure_logging, progress_retrieve, set_scenario_config + +logger = logging.getLogger(__name__) + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("retrieve_cost_data", year=2030) + rootpath = ".." + else: + rootpath = "." + configure_logging(snakemake) + set_scenario_config(snakemake) + + version = snakemake.params.version + baseurl = ( + f"https://raw.githubusercontent.com/PyPSA/technology-data/{version}/outputs/" + ) + filepath = Path(snakemake.output[0]) + url = baseurl + filepath.name + + print(url) + + to_fn = Path(rootpath) / filepath + + print(to_fn) + + logger.info(f"Downloading technology data from '{url}'.") + disable_progress = snakemake.config["run"].get("disable_progressbar", False) + progress_retrieve(url, to_fn, disable=disable_progress) + + logger.info(f"Technology data available at at {to_fn}") diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index 75d8519e..996bbeab 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2019-2022 Fabian Hofmann (TUB, FIAS) -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -36,7 +36,12 @@ import logging import tarfile from pathlib import Path -from _helpers import configure_logging, progress_retrieve +from _helpers import ( + configure_logging, + progress_retrieve, + set_scenario_config, + validate_checksum, +) logger = logging.getLogger(__name__) @@ -49,9 +54,8 @@ if __name__ == "__main__": rootpath = ".." else: rootpath = "." - configure_logging( - snakemake - ) # TODO Make logging compatible with progressbar (see PR #102) + configure_logging(snakemake) + set_scenario_config(snakemake) if snakemake.config["tutorial"]: url = "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz" @@ -65,6 +69,8 @@ if __name__ == "__main__": disable_progress = snakemake.config["run"].get("disable_progressbar", False) progress_retrieve(url, tarball_fn, disable=disable_progress) + validate_checksum(tarball_fn, url) + logger.info("Extracting databundle.") tarfile.open(tarball_fn).extractall(to_fn) diff --git a/scripts/retrieve_electricity_demand.py b/scripts/retrieve_electricity_demand.py new file mode 100644 index 00000000..94077fdf --- /dev/null +++ b/scripts/retrieve_electricity_demand.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Retrieve electricity prices from OPSD. +""" + +import logging + +import pandas as pd + +logger = logging.getLogger(__name__) + +from _helpers import configure_logging, set_scenario_config + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("retrieve_electricity_demand") + rootpath = ".." + else: + rootpath = "." + configure_logging(snakemake) + set_scenario_config(snakemake) + + url = "https://data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv" + + df1, df2 = [ + pd.read_csv(url.format(version=version), index_col=0) + for version in snakemake.params.versions + ] + combined = pd.concat([df1, df2[df2.index > df1.index[-1]]]) + + pattern = "_load_actual_entsoe_transparency" + transparency = combined.filter(like=pattern).rename( + columns=lambda x: x.replace(pattern, "") + ) + pattern = "_load_actual_entsoe_power_statistics" + powerstatistics = combined.filter(like=pattern).rename( + columns=lambda x: x.replace(pattern, "") + ) + + res = transparency.fillna(powerstatistics) + + res.to_csv(snakemake.output[0]) diff --git a/scripts/retrieve_eurostat_data.py b/scripts/retrieve_eurostat_data.py new file mode 100644 index 00000000..4b4cea4a --- /dev/null +++ b/scripts/retrieve_eurostat_data.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2024- The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Retrieve and extract eurostat energy balances data. +""" + + +import logging +import zipfile +from pathlib import Path + +from _helpers import configure_logging, progress_retrieve, set_scenario_config + +logger = logging.getLogger(__name__) + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("retrieve_eurostat_data") + rootpath = ".." + else: + rootpath = "." + configure_logging(snakemake) + set_scenario_config(snakemake) + + disable_progress = snakemake.config["run"].get("disable_progressbar", False) + url_eurostat = "https://ec.europa.eu/eurostat/documents/38154/4956218/Balances-December2022.zip/f7cf0d19-5c0f-60ad-4e48-098a5ddd6e48?t=1671184070589" + tarball_fn = Path(f"{rootpath}/data/eurostat/eurostat_2023.zip") + to_fn = Path( + f"{rootpath}/data/eurostat/eurostat-energy_balances-april_2023_edition/" + ) + + logger.info(f"Downloading Eurostat data from '{url_eurostat}'.") + progress_retrieve(url_eurostat, tarball_fn, disable=disable_progress) + + logger.info("Extracting Eurostat data.") + with zipfile.ZipFile(tarball_fn, "r") as zip_ref: + zip_ref.extractall(to_fn) + + logger.info(f"Eurostat data available in '{to_fn}'.") diff --git a/scripts/retrieve_gas_infrastructure_data.py b/scripts/retrieve_gas_infrastructure_data.py index 42b726db..8d7d0e08 100644 --- a/scripts/retrieve_gas_infrastructure_data.py +++ b/scripts/retrieve_gas_infrastructure_data.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -11,7 +11,12 @@ import logging import zipfile from pathlib import Path -from _helpers import progress_retrieve +from _helpers import ( + configure_logging, + progress_retrieve, + set_scenario_config, + validate_checksum, +) logger = logging.getLogger(__name__) @@ -24,6 +29,8 @@ if __name__ == "__main__": rootpath = ".." else: rootpath = "." + configure_logging(snakemake) + set_scenario_config(snakemake) url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip" @@ -35,6 +42,8 @@ if __name__ == "__main__": disable_progress = snakemake.config["run"].get("disable_progressbar", False) progress_retrieve(url, zip_fn, disable=disable_progress) + validate_checksum(zip_fn, url) + logger.info("Extracting databundle.") zipfile.ZipFile(zip_fn).extractall(to_fn) diff --git a/scripts/retrieve_irena.py b/scripts/retrieve_irena.py index 7b123475..04e48db1 100644 --- a/scripts/retrieve_irena.py +++ b/scripts/retrieve_irena.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2023 Thomas Gilon (Climact) -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -26,7 +26,7 @@ This rule downloads the existing capacities from `IRENASTAT solve_opts["clip_p_max_pu"], other=0.0, inplace=True) @@ -391,11 +391,11 @@ def prepare_network( if foresight == "perfect": n = add_land_use_constraint_perfect(n) if snakemake.params["sector"]["limit_max_growth"]["enable"]: - n = add_max_growth(n, config) + n = add_max_growth(n) - if n.stores.carrier.eq("co2 stored").any(): + if n.stores.carrier.eq("co2 sequestered").any(): limit = co2_sequestration_potential - add_co2_sequestration_limit(n, config, limit=limit) + add_co2_sequestration_limit(n, limit=limit) return n @@ -416,7 +416,7 @@ def add_CCL_constraints(n, config): Example ------- scenario: - opts: [Co2L-CCL-24H] + opts: [Co2L-CCL-24h] electricity: agg_p_nom_limits: data/agg_p_nom_minmax.csv """ @@ -461,7 +461,7 @@ def add_EQ_constraints(n, o, scaling=1e-1): Example ------- scenario: - opts: [Co2L-EQ0.7-24H] + opts: [Co2L-EQ0.7-24h] Require each country or node to on average produce a minimal share of its total electricity consumption itself. Example: EQ0.7c demands each country @@ -525,7 +525,7 @@ def add_BAU_constraints(n, config): Example ------- scenario: - opts: [Co2L-BAU-24H] + opts: [Co2L-BAU-24h] electricity: BAU_mincapacities: solar: 0 @@ -562,7 +562,7 @@ def add_SAFE_constraints(n, config): config.yaml requires to specify opts: scenario: - opts: [Co2L-SAFE-24H] + opts: [Co2L-SAFE-24h] electricity: SAFE_reservemargin: 0.1 Which sets a reserve margin of 10% above the peak demand. @@ -570,7 +570,7 @@ def add_SAFE_constraints(n, config): peakdemand = n.loads_t.p_set.sum(axis=1).max() margin = 1.0 + config["electricity"]["SAFE_reservemargin"] reserve_margin = peakdemand * margin - conventional_carriers = config["electricity"]["conventional_carriers"] + conventional_carriers = config["electricity"]["conventional_carriers"] # noqa: F841 ext_gens_i = n.generators.query( "carrier in @conventional_carriers & p_nom_extendable" ).index @@ -687,6 +687,37 @@ def add_battery_constraints(n): n.model.add_constraints(lhs == 0, name="Link-charger_ratio") +def add_lossy_bidirectional_link_constraints(n): + if not n.links.p_nom_extendable.any() or "reversed" not in n.links.columns: + return + + n.links["reversed"] = n.links.reversed.fillna(0).astype(bool) + carriers = n.links.loc[n.links.reversed, "carrier"].unique() # noqa: F841 + + forward_i = n.links.query( + "carrier in @carriers and ~reversed and p_nom_extendable" + ).index + + def get_backward_i(forward_i): + return pd.Index( + [ + ( + re.sub(r"-(\d{4})$", r"-reversed-\1", s) + if re.search(r"-\d{4}$", s) + else s + "-reversed" + ) + for s in forward_i + ] + ) + + backward_i = get_backward_i(forward_i) + + lhs = n.model["Link-p_nom"].loc[backward_i] + rhs = n.model["Link-p_nom"].loc[forward_i] + + n.model.add_constraints(lhs == rhs, name="Link-bidirectional_sync") + + def add_chp_constraints(n): electric = ( n.links.index.str.contains("urban central") @@ -745,9 +776,13 @@ def add_pipe_retrofit_constraint(n): """ Add constraint for retrofitting existing CH4 pipelines to H2 pipelines. """ - gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index + if "reversed" not in n.links.columns: + n.links["reversed"] = False + gas_pipes_i = n.links.query( + "carrier == 'gas pipeline' and p_nom_extendable and ~reversed" + ).index h2_retrofitted_i = n.links.query( - "carrier == 'H2 pipeline retrofitted' and p_nom_extendable" + "carrier == 'H2 pipeline retrofitted' and p_nom_extendable and ~reversed" ).index if h2_retrofitted_i.empty or gas_pipes_i.empty: @@ -762,6 +797,29 @@ def add_pipe_retrofit_constraint(n): n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit") +def add_co2_atmosphere_constraint(n, snapshots): + glcs = n.global_constraints[n.global_constraints.type == "co2_atmosphere"] + + if glcs.empty: + return + for name, glc in glcs.iterrows(): + carattr = glc.carrier_attribute + emissions = n.carriers.query(f"{carattr} != 0")[carattr] + + if emissions.empty: + continue + + # stores + n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) + stores = n.stores.query("carrier in @emissions.index and not e_cyclic") + if not stores.empty: + last_i = snapshots[-1] + lhs = n.model["Store-e"].loc[last_i, stores.index] + rhs = glc.constant + + n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") + + def extra_functionality(n, snapshots): """ Collects supplementary constraints which will be passed to @@ -771,29 +829,43 @@ def extra_functionality(n, snapshots): location to add them. The arguments ``opts`` and ``snakemake.config`` are expected to be attached to the network. """ - opts = n.opts config = n.config - if "BAU" in opts and n.generators.p_nom_extendable.any(): + constraints = config["solving"].get("constraints", {}) + if constraints["BAU"] and n.generators.p_nom_extendable.any(): add_BAU_constraints(n, config) - if "SAFE" in opts and n.generators.p_nom_extendable.any(): + if constraints["SAFE"] and n.generators.p_nom_extendable.any(): add_SAFE_constraints(n, config) - if "CCL" in opts and n.generators.p_nom_extendable.any(): + if constraints["CCL"] and n.generators.p_nom_extendable.any(): add_CCL_constraints(n, config) + reserve = config["electricity"].get("operational_reserve", {}) if reserve.get("activate"): add_operational_reserve_margin(n, snapshots, config) - for o in opts: - if "EQ" in o: - add_EQ_constraints(n, o) + + if EQ_o := constraints["EQ"]: + add_EQ_constraints(n, EQ_o.replace("EQ", "")) + add_battery_constraints(n) + add_lossy_bidirectional_link_constraints(n) add_pipe_retrofit_constraint(n) if n._multi_invest: add_carbon_constraint(n, snapshots) add_carbon_budget_constraint(n, snapshots) add_retrofit_gas_boiler_constraint(n, snapshots) + else: + add_co2_atmosphere_constraint(n, snapshots) + + if snakemake.params.custom_extra_functionality: + source_path = snakemake.params.custom_extra_functionality + assert os.path.exists(source_path), f"{source_path} does not exist" + sys.path.append(os.path.dirname(source_path)) + module_name = os.path.splitext(os.path.basename(source_path))[0] + module = importlib.import_module(module_name) + custom_extra_functionality = getattr(module, module_name) + custom_extra_functionality(n, snapshots, snakemake) -def solve_network(n, config, solving, opts="", **kwargs): +def solve_network(n, config, solving, **kwargs): set_of_options = solving["solver"]["options"] cf_solving = solving["options"] @@ -808,6 +880,10 @@ def solve_network(n, config, solving, opts="", **kwargs): "linearized_unit_commitment", False ) kwargs["assign_all_duals"] = cf_solving.get("assign_all_duals", False) + kwargs["io_api"] = cf_solving.get("io_api", None) + + if kwargs["solver_name"] == "gurobi": + logging.getLogger("gurobipy").setLevel(logging.CRITICAL) rolling_horizon = cf_solving.pop("rolling_horizon", False) skip_iterations = cf_solving.pop("skip_iterations", False) @@ -817,7 +893,6 @@ def solve_network(n, config, solving, opts="", **kwargs): # add to network for extra_functionality n.config = config - n.opts = opts if rolling_horizon: kwargs["horizon"] = cf_solving.get("horizon", 365) @@ -839,6 +914,9 @@ def solve_network(n, config, solving, opts="", **kwargs): f"Solving status '{status}' with termination condition '{condition}'" ) if "infeasible" in condition: + labels = n.model.compute_infeasibilities() + logger.info(f"Labels:\n{labels}") + n.model.print_infeasibilities() raise RuntimeError("Solving status 'infeasible'") return n @@ -849,25 +927,19 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "solve_sector_network_perfect", + "solve_sector_network", configfiles="../config/test/config.perfect.yaml", simpl="", opts="", - clusters="5", - ll="v1.5", - sector_opts="8760H-T-H-B-I-A-solar+p3-dist1", + clusters="37", + ll="v1.0", + sector_opts="CO2L0-1H-T-H-B-I-A-dist1", planning_horizons="2030", ) configure_logging(snakemake) - if "sector_opts" in snakemake.wildcards.keys(): - update_config_with_sector_opts( - snakemake.config, snakemake.wildcards.sector_opts - ) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) - opts = snakemake.wildcards.opts - if "sector_opts" in snakemake.wildcards.keys(): - opts += "-" + snakemake.wildcards.sector_opts - opts = [o for o in opts.split("-") if o != ""] solve_opts = snakemake.params.solving["options"] np.random.seed(solve_opts.get("seed", 123)) @@ -890,7 +962,6 @@ if __name__ == "__main__": n, config=snakemake.config, solving=snakemake.params.solving, - opts=opts, log_fn=snakemake.log.solver, ) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index dca49d02..bd4ce7aa 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -12,7 +12,11 @@ import logging import numpy as np import pypsa -from _helpers import configure_logging, update_config_with_sector_opts +from _helpers import ( + configure_logging, + set_scenario_config, + update_config_from_wildcards, +) from solve_network import prepare_network, solve_network logger = logging.getLogger(__name__) @@ -34,10 +38,9 @@ if __name__ == "__main__": ) configure_logging(snakemake) - update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + set_scenario_config(snakemake) + update_config_from_wildcards(snakemake.config, snakemake.wildcards) - opts = f"{snakemake.wildcards.opts}-{snakemake.wildcards.sector_opts}".split("-") - opts = [o for o in opts if o != ""] solve_opts = snakemake.params.options np.random.seed(solve_opts.get("seed", 123)) @@ -46,9 +49,7 @@ if __name__ == "__main__": n.optimize.fix_optimal_capacities() n = prepare_network(n, solve_opts, config=snakemake.config) - n = solve_network( - n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver - ) + n = solve_network(n, config=snakemake.config, log_fn=snakemake.log.solver) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/test.sh b/test.sh new file mode 100755 index 00000000..d6007750 --- /dev/null +++ b/test.sh @@ -0,0 +1,9 @@ +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.scenarios.yaml --rerun-triggers=mtime -n